summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 08:30:27 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-07-27 15:14:51 +0200
commitf9d9cfe4fa444b93238d7c3cf07af2dd85b151e8 (patch)
tree8d35fdf49c61d518993f89593b38a20df62dc82f
parent0dc6c39e33b193bdf9bfab265c24a03108526bff (diff)
downloadlwn-f9d9cfe4fa444b93238d7c3cf07af2dd85b151e8.tar.gz
lwn-f9d9cfe4fa444b93238d7c3cf07af2dd85b151e8.zip
stop_machine: convert stop_machine_run() to PREEMPT_RT
Instead of playing with non-preemption, introduce explicit startup serialization. This is more robust and cleaner as well. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/stop_machine.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 912823e2a11b..22d1d77f9a62 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -40,6 +40,8 @@ static atomic_t thread_ack;
static DEFINE_MUTEX(lock);
/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
static DEFINE_MUTEX(setup_lock);
+/* do not start up until all worklets have been placed: */
+static DEFINE_MUTEX(startup_lock);
/* Users of stop_machine. */
static int refcount;
static struct workqueue_struct *stop_machine_wq;
@@ -71,6 +73,15 @@ static void stop_cpu(struct work_struct *unused)
int cpu = smp_processor_id();
int err;
+ /*
+ * Wait for the startup loop to finish:
+ */
+ mutex_lock(&startup_lock);
+ /*
+ * Let other threads continue too:
+ */
+ mutex_unlock(&startup_lock);
+
if (!active_cpus) {
if (cpu == cpumask_first(cpu_online_mask))
smdata = &active;
@@ -166,16 +177,21 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
set_state(STOPMACHINE_PREPARE);
- /* Schedule the stop_cpu work on all cpus: hold this CPU so one
- * doesn't hit this CPU until we're ready. */
- get_cpu();
+ /*
+ * Schedule the stop_cpu work on all cpus before allowing any
+ * of the CPUs to execute it:
+ */
+ mutex_lock(&startup_lock);
+
for_each_online_cpu(i) {
sm_work = per_cpu_ptr(stop_machine_work, i);
INIT_WORK(sm_work, stop_cpu);
queue_work_on(i, stop_machine_wq, sm_work);
}
- /* This will release the thread on our CPU. */
- put_cpu();
+
+ /* This will release the thread on all CPUs: */
+ mutex_unlock(&startup_lock);
+
flush_workqueue(stop_machine_wq);
ret = active.fnret;
mutex_unlock(&lock);