diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-10-13 21:18:56 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-10-13 21:18:56 +0200 |
commit | c2171f666fbd581bb18ed0b5d7a5270a4c931364 (patch) | |
tree | 5089d709a138614f42d1f65778a8aeddf6e49349 /kernel | |
parent | 476ced368096ca8aac49509405c4b0fbe4717596 (diff) | |
parent | d99f9884403747e8668442b9ce61b28a9d6bcd4a (diff) | |
download | lwn-c2171f666fbd581bb18ed0b5d7a5270a4c931364.tar.gz lwn-c2171f666fbd581bb18ed0b5d7a5270a4c931364.zip |
Merge branch 'rt/head' into rt/2.6.31
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/futex.c | 84 | ||||
-rw-r--r-- | kernel/softirq.c | 19 |
3 files changed, 27 insertions, 77 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 604f84972eb4..a9652601a6d0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1190,7 +1190,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; - p->futex_wakeup = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM diff --git a/kernel/futex.c b/kernel/futex.c index 3dab2f5c9e2a..7c4a6ac9e710 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -713,7 +713,7 @@ retry: * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. */ -static void wake_futex(struct task_struct **wake_list, struct futex_q *q) +static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; @@ -736,51 +736,8 @@ static void wake_futex(struct task_struct **wake_list, struct futex_q *q) smp_wmb(); q->lock_ptr = NULL; - /* - * Atomically grab the task, if ->futex_wakeup is !0 already it means - * its already queued (either by us or someone else) and will get the - * wakeup due to that. - * - * This cmpxchg() implies a full barrier, which pairs with the write - * barrier implied by the wakeup in wake_futex_list(). - */ - if (cmpxchg(&p->futex_wakeup, 0, p) != 0) { - /* - * It was already queued, drop the extra ref and we're done. - */ - put_task_struct(p); - return; - } - - /* - * Put the task on our wakeup list by atomically switching it with - * the list head. (XXX its a local list, no possible concurrency, - * this could be written without cmpxchg). - */ - do { - p->futex_wakeup = *wake_list; - } while (cmpxchg(wake_list, p->futex_wakeup, p) != p->futex_wakeup); -} - -/* - * For each task on the list, deliver the pending wakeup and release the - * task reference obtained in wake_futex(). - */ -static void wake_futex_list(struct task_struct *head) -{ - while (head != &init_task) { - struct task_struct *next = head->futex_wakeup; - - head->futex_wakeup = NULL; - /* - * wake_up_state() implies a wmb() to pair with the queueing - * in wake_futex() so as to not miss wakeups. - */ - wake_up_state(head, TASK_NORMAL); - put_task_struct(head); - - head = next; - } + wake_up_state(p, TASK_NORMAL); + put_task_struct(p); } static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) @@ -894,7 +851,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; - struct task_struct *wake_list = &init_task; int ret; if (!bitset) @@ -919,7 +875,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) if (!(this->bitset & bitset)) continue; - wake_futex(&wake_list, this); + wake_futex(this); if (++ret >= nr_wake) break; } @@ -927,8 +883,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) spin_unlock(&hb->lock); put_futex_key(fshared, &key); - - wake_futex_list(wake_list); out: return ret; } @@ -945,7 +899,6 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, struct futex_hash_bucket *hb1, *hb2; struct plist_head *head; struct futex_q *this, *next; - struct task_struct *wake_list = &init_task; int ret, op_ret; retry: @@ -996,7 +949,7 @@ retry_private: plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { - wake_futex(&wake_list, this); + wake_futex(this); if (++ret >= nr_wake) break; } @@ -1008,7 +961,7 @@ retry_private: op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { - wake_futex(&wake_list, this); + wake_futex(this); if (++op_ret >= nr_wake2) break; } @@ -1021,8 +974,6 @@ out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); - - wake_futex_list(wake_list); out: return ret; } @@ -1177,7 +1128,6 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, struct futex_hash_bucket *hb1, *hb2; struct plist_head *head1; struct futex_q *this, *next; - struct task_struct *wake_list = &init_task; u32 curval2; if (requeue_pi) { @@ -1322,7 +1272,7 @@ retry_private: * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { - wake_futex(&wake_list, this); + wake_futex(this); continue; } @@ -1368,8 +1318,6 @@ out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); - - wake_futex_list(wake_list); out: if (pi_state != NULL) free_pi_state(pi_state); @@ -1805,6 +1753,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, current->timer_slack_ns); } +retry: /* Prepare to wait on uaddr. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) @@ -1822,9 +1771,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, goto out_put_key; /* - * We expect signal_pending(current), but another thread may - * have handled it for us already. + * We expect signal_pending(current), but we might be the + * victim of a spurious wakeup as well. */ + if (!signal_pending(current)) { + put_futex_key(fshared, &q.key); + goto retry; + } + ret = -ERESTARTSYS; if (!abs_time) goto out_put_key; @@ -2131,9 +2085,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, */ plist_del(&q->list, &q->list.plist); + /* Handle spurious wakeups gracefully */ + ret = -EAGAIN; if (timeout && !timeout->task) ret = -ETIMEDOUT; - else + else if (signal_pending(current)) ret = -ERESTARTNOINTR; } return ret; @@ -2215,6 +2171,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, q.bitset = bitset; q.rt_waiter = &rt_waiter; +retry: key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -2307,6 +2264,9 @@ out_put_keys: out_key2: put_futex_key(fshared, &key2); + /* Spurious wakeup ? */ + if (ret == -EAGAIN) + goto retry; out: if (to) { hrtimer_cancel(&to->timer); diff --git a/kernel/softirq.c b/kernel/softirq.c index 352697636f0a..590049c17dc8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -1139,9 +1139,7 @@ static const char *softirq_names [] = [NET_RX_SOFTIRQ] = "net-rx", [BLOCK_SOFTIRQ] = "block", [TASKLET_SOFTIRQ] = "tasklet", -#ifdef CONFIG_HIGH_RES_TIMERS [HRTIMER_SOFTIRQ] = "hrtimer", -#endif [RCU_SOFTIRQ] = "rcu", }; @@ -1161,8 +1159,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; } for (i = 0; i < NR_SOFTIRQS; i++) { - if (!softirq_names[i]) - continue; p = kthread_create(ksoftirqd, &per_cpu(ksoftirqd, hotcpu)[i], "sirq-%s/%d", softirq_names[i], @@ -1179,11 +1175,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - for (i = 0; i < NR_SOFTIRQS; i++) { - p = per_cpu(ksoftirqd, hotcpu)[i].tsk; - if (p) - wake_up_process(p); - } + for (i = 0; i < NR_SOFTIRQS; i++) + wake_up_process(per_cpu(ksoftirqd, hotcpu)[i].tsk); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: @@ -1197,11 +1190,9 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, for (i = 0; i < NR_SOFTIRQS; i++) { param.sched_priority = MAX_RT_PRIO-1; p = per_cpu(ksoftirqd, hotcpu)[i].tsk; - if (p) { - sched_setscheduler(p, SCHED_FIFO, ¶m); - per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; - kthread_stop(p); - } + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; + kthread_stop(p); } takeover_tasklets(hotcpu); break; |