summaryrefslogtreecommitdiff
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-24 20:48:53 +0200
committerThomas Gleixner <tglx@linutronix.de>2009-07-27 21:07:15 +0200
commit34ca9f9b51cddfcea87ba7aef37e59b55c95f444 (patch)
tree3b83f8668db962f2c0b7577c71a89565ffb6c609 /lib/kernel_lock.c
parentf7c0160ac66e32cbf383e3443657e03e046353f0 (diff)
downloadlwn-34ca9f9b51cddfcea87ba7aef37e59b55c95f444.tar.gz
lwn-34ca9f9b51cddfcea87ba7aef37e59b55c95f444.zip
spinlocks: Create atomic_spinlock and convert rq->lock
atomic_spinlock_t will be used to annotate locks which are not converted to sleeping spinlocks on preempt-rt. rq->lock must be converted right away as rq->lock is handled by a few raw lock operations. Fix also the plist implementation so debugging can handle both spinlocks and atomic_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..67b7217d14a0 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -20,7 +20,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_ATOMIC_SPINLOCK(kernel_flag);
/*
@@ -79,7 +79,7 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (atomic_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));