diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2008-01-25 21:08:02 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:02 +0100 |
commit | 95402b3829010fe1e208f44e4a158ccade88969a (patch) | |
tree | 3b9895b47623b4673e3c11121980e5171af76bbe /kernel/sched.c | |
parent | 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (diff) | |
download | lwn-95402b3829010fe1e208f44e4a158ccade88969a.tar.gz lwn-95402b3829010fe1e208f44e4a158ccade88969a.zip |
cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus
put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE hotplug notification events.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 672aa68bfeac..c0e2db683e29 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -439,7 +439,6 @@ struct rq { }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -static DEFINE_MUTEX(sched_hotcpu_mutex); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) { @@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) struct task_struct *p; int retval; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return -ESRCH; } @@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) } out_unlock: put_task_struct(p); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return retval; } @@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) struct task_struct *p; int retval; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); read_lock(&tasklist_lock); retval = -ESRCH; @@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) out_unlock: read_unlock(&tasklist_lock); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return retval; } @@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) struct rq *rq; switch (action) { - case CPU_LOCK_ACQUIRE: - mutex_lock(&sched_hotcpu_mutex); - break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: @@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) spin_unlock_irq(&rq->lock); break; #endif - case CPU_LOCK_RELEASE: - mutex_unlock(&sched_hotcpu_mutex); - break; } return NOTIFY_OK; } @@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) { int err; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return err; } @@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) { cpumask_t non_isolated_cpus; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); |