summaryrefslogtreecommitdiff
path: root/kernel/locking/spinlock.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-11-28 18:42:18 +0000
committerIngo Molnar <mingo@kernel.org>2017-12-12 11:24:01 +0100
commitf87f3a328dbbb3e79dd53e7e889ced9222512649 (patch)
tree19436c896d80425dff360c8e97d4d7d8286643c4 /kernel/locking/spinlock.c
parent50c4c4e268a2d7a3e58ebb698ac74da0de40ae36 (diff)
downloadlwn-f87f3a328dbbb3e79dd53e7e889ced9222512649.tar.gz
lwn-f87f3a328dbbb3e79dd53e7e889ced9222512649.zip
locking/core: Fix deadlock during boot on systems with GENERIC_LOCKBREAK
Commit: a8a217c22116 ("locking/core: Remove {read,spin,write}_can_lock()") removed the definition of raw_spin_can_lock(), causing the GENERIC_LOCKBREAK spin_lock() routines to poll the ->break_lock field when waiting on a lock. This has been reported to cause a deadlock during boot on s390, because the ->break_lock field is also set by the waiters, and can potentially remain set indefinitely if no other CPUs come in to take the lock after it has been released. This patch removes the explicit spinning on ->break_lock from the waiters, instead relying on the outer trylock() operation to determine when the lock is available. Reported-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Tested-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: a8a217c22116 ("locking/core: Remove {read,spin,write}_can_lock()") Link: http://lkml.kernel.org/r/1511894539-7988-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/spinlock.c')
-rw-r--r--kernel/locking/spinlock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 1fd1a7543cdd..0ebb253e2199 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -68,8 +68,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while ((lock)->break_lock) \
- arch_##op##_relax(&lock->raw_lock); \
+ \
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
} \
@@ -88,8 +88,8 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while ((lock)->break_lock) \
- arch_##op##_relax(&lock->raw_lock); \
+ \
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
return flags; \