summaryrefslogtreecommitdiff
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-25 09:59:08 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-25 09:59:08 -0700
commit931ab4a5ce2d93cb434dc7fc067a702117453b13 (patch)
tree7135ba1e9b24886af8b42f7d20297507b33f7155 /include/linux/spinlock.h
parent520eccdfe187591a51ea9ab4c1a024ae4d0f68d9 (diff)
downloadlwn-931ab4a5ce2d93cb434dc7fc067a702117453b13.tar.gz
lwn-931ab4a5ce2d93cb434dc7fc067a702117453b13.zip
atomics: Revert addition of comment header to spin_unlock_wait()
There is still considerable confusion as to the semantics of spin_unlock_wait(), but there seems to be universal agreement that it is not that of a lock/unlock pair. This commit therefore removes the comment added by 6016ffc3874d ("atomics: Add header comment so spin_unlock_wait()") in order to prevent at least that flavor of confusion. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h20
1 files changed, 0 insertions, 20 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d9510e8522d4..59248dcc6ef3 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -369,26 +369,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-/**
- * spin_unlock_wait - Interpose between successive critical sections
- * @lock: the spinlock whose critical sections are to be interposed.
- *
- * Semantically this is equivalent to a spin_lock() immediately
- * followed by a spin_unlock(). However, most architectures have
- * more efficient implementations in which the spin_unlock_wait()
- * cannot block concurrent lock acquisition, and in some cases
- * where spin_unlock_wait() does not write to the lock variable.
- * Nevertheless, spin_unlock_wait() can have high overhead, so if
- * you feel the need to use it, please check to see if there is
- * a better way to get your job done.
- *
- * The ordering guarantees provided by spin_unlock_wait() are:
- *
- * 1. All accesses preceding the spin_unlock_wait() happen before
- * any accesses in later critical sections for this same lock.
- * 2. All accesses following the spin_unlock_wait() happen after
- * any accesses in earlier critical sections for this same lock.
- */
static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);