diff options
author | Arnd Bergmann <arnd@arndb.de> | 2020-10-19 09:09:21 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2020-10-26 20:19:48 +0100 |
commit | f44ca0871b7a98b075560711d48849914a102221 (patch) | |
tree | 40dbcc672bdb5d9248133cd637653ff812a6563b /include/asm-generic | |
parent | 6f6573a4044adefbd07f1bd951a2041150e888d7 (diff) | |
download | lwn-f44ca0871b7a98b075560711d48849914a102221.tar.gz lwn-f44ca0871b7a98b075560711d48849914a102221.zip |
qspinlock: use signed temporaries for cmpxchg
When building with W=2, the build log is flooded with
include/asm-generic/qrwlock.h:65:56: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qrwlock.h:92:53: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qspinlock.h:68:55: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qspinlock.h:82:52: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
The atomics are built on top of signed integers, but the caller
doesn't actually care. Just use signed types as well.
Fixes: 27df89689e25 ("locking/spinlocks: Remove an instruction from spin and write locks")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/qrwlock.h | 8 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 4 |
2 files changed, 6 insertions, 6 deletions
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 3aefde23dcea..84ce841ce735 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -37,7 +37,7 @@ extern void queued_write_lock_slowpath(struct qrwlock *lock); */ static inline int queued_read_trylock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { @@ -56,7 +56,7 @@ static inline int queued_read_trylock(struct qrwlock *lock) */ static inline int queued_write_trylock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_read(&lock->cnts); if (unlikely(cnts)) @@ -71,7 +71,7 @@ static inline int queued_write_trylock(struct qrwlock *lock) */ static inline void queued_read_lock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) @@ -87,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock) */ static inline void queued_write_lock(struct qrwlock *lock) { - u32 cnts = 0; + int cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) return; diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 4fe7fd0fe834..d74b13825501 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -60,7 +60,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - u32 val = atomic_read(&lock->val); + int val = atomic_read(&lock->val); if (unlikely(val)) return 0; @@ -77,7 +77,7 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; + int val = 0; if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; |