summaryrefslogtreecommitdiff
path: root/init/main.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-05-16 20:42:32 +0200
committerIngo Molnar <mingo@kernel.org>2017-05-23 10:01:34 +0200
commit8fb12156b8db61af3d49f3e5e104568494581d1f (patch)
tree7a0bbfa7ff1a094ac918e8630f0993faec9c1173 /init/main.c
parent8655d5497735b288f8a9b458bd22e7d1bf95bb61 (diff)
downloadlwn-8fb12156b8db61af3d49f3e5e104568494581d1f.tar.gz
lwn-8fb12156b8db61af3d49f3e5e104568494581d1f.zip
init: Pin init task to the boot CPU, initially
Some of the boot code in init_kernel_freeable() which runs before SMP bringup assumes (rightfully) that it runs on the boot CPU and therefore can use smp_processor_id() in preemptible context. That works so far because the smp_processor_id() check starts to be effective after smp bringup. That's just wrong. Starting with SMP bringup and the ability to move threads around, smp_processor_id() in preemptible context is broken. Aside of that it does not make sense to allow init to run on all CPUs before sched_smp_init() has been run. Pin the init to the boot CPU so the existing code can continue to use smp_processor_id() without triggering the checks when the enabling of those checks starts earlier. Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/20170516184734.943149935@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'init/main.c')
-rw-r--r--init/main.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/init/main.c b/init/main.c
index f866510472d7..badae3bf08f1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -389,6 +389,7 @@ static __initdata DECLARE_COMPLETION(kthreadd_done);
static noinline void __ref rest_init(void)
{
+ struct task_struct *tsk;
int pid;
rcu_scheduler_starting();
@@ -397,7 +398,17 @@ static noinline void __ref rest_init(void)
* the init task will end up wanting to create kthreads, which, if
* we schedule it before we create kthreadd, will OOPS.
*/
- kernel_thread(kernel_init, NULL, CLONE_FS);
+ pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+ /*
+ * Pin init on the boot CPU. Task migration is not properly working
+ * until sched_init_smp() has been run. It will set the allowed
+ * CPUs for init to the non isolated CPUs.
+ */
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
+ rcu_read_unlock();
+
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
rcu_read_lock();
@@ -1015,10 +1026,6 @@ static noinline void __init kernel_init_freeable(void)
* init can allocate pages on any node
*/
set_mems_allowed(node_states[N_MEMORY]);
- /*
- * init can run on any cpu.
- */
- set_cpus_allowed_ptr(current, cpu_all_mask);
cad_pid = task_pid(current);