summaryrefslogtreecommitdiff
path: root/ipc
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-12-15 16:47:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 07:20:09 -0800
commitd4212093dca95c1f52197017d969cc66d5d962aa (patch)
treef52484ae3c7b3034715b5745ae2b7db3906ab022 /ipc
parent9cad200c7686708b326520a45dd680a4147568a6 (diff)
downloadlwn-d4212093dca95c1f52197017d969cc66d5d962aa.tar.gz
lwn-d4212093dca95c1f52197017d969cc66d5d962aa.zip
ipc/sem.c: sem preempt improve
The strange sysv semaphore wakeup scheme has a kind of busy-wait lock involved, which could deadlock if preemption is enabled during the "lock". It is an implementation detail (due to a spinlock being held) that this is actually the case. However if "spinlocks" are made preemptible, or if the sem lock is changed to a sleeping lock for example, then the wakeup would become buggy. So this might be a bugfix for -rt kernels. Imagine waker being preempted by wakee and never clearing IN_WAKEUP -- if wakee has higher RT priority then there is a priority inversion deadlock. Even if there is not a priority inversion to cause a deadlock, then there is still time wasted spinning. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Cc: Pierre Peiffer <peifferp@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/sem.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index d377b3adfc3d..2705fbbc4374 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -398,6 +398,27 @@ undo:
return result;
}
+/*
+ * Wake up a process waiting on the sem queue with a given error.
+ * The queue is invalid (may not be accessed) after the function returns.
+ */
+static void wake_up_sem_queue(struct sem_queue *q, int error)
+{
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+ * wakee busy-wait until we're scheduled back on. We're holding
+ * locks here so it may not strictly be needed, however if the
+ * locks become preemptible then this prevents such a problem.
+ */
+ preempt_disable();
+ q->status = IN_WAKEUP;
+ wake_up_process(q->sleeper);
+ /* hands-off: q can disappear immediately after writing q->status. */
+ smp_wmb();
+ q->status = error;
+ preempt_enable();
+}
+
/* Go through the pending queue for the indicated semaphore
* looking for tasks that can be completed.
*/
@@ -429,17 +450,7 @@ again:
* continue.
*/
alter = q->alter;
-
- /* wake up the waiting thread */
- q->status = IN_WAKEUP;
-
- wake_up_process(q->sleeper);
- /* hands-off: q will disappear immediately after
- * writing q->status.
- */
- smp_wmb();
- q->status = error;
-
+ wake_up_sem_queue(q, error);
if (alter)
goto again;
}
@@ -523,10 +534,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
list_del(&q->list);
- q->status = IN_WAKEUP;
- wake_up_process(q->sleeper); /* doesn't sleep */
- smp_wmb();
- q->status = -EIDRM; /* hands-off q */
+ wake_up_sem_queue(q, -EIDRM);
}
/* Remove the semaphore set from the IDR */