summaryrefslogtreecommitdiff
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-01-14 15:36:26 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-14 18:09:00 +0100
commit41719b03091911028116155deddc5eedf8c45e37 (patch)
tree20a699807d78bc0af86b19443dc751415c0cc6f7 /kernel/mutex.c
parent93d81d1aca26e64a75d06a85f7e128b5f49053e7 (diff)
downloadlwn-41719b03091911028116155deddc5eedf8c45e37.tar.gz
lwn-41719b03091911028116155deddc5eedf8c45e37.zip
mutex: preemption fixes
The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 357c6d221efe..524ffc33dc05 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct mutex_waiter waiter;
unsigned long flags;
+ preempt_disable();
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
@@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
+ preempt_enable();
return -EINTR;
}
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- schedule();
+ __schedule();
spin_lock_mutex(&lock->wait_lock, flags);
}
@@ -193,6 +195,7 @@ done:
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
+ preempt_enable();
return 0;
}