summaryrefslogtreecommitdiff
path: root/arch/alpha/include/asm/cmpxchg.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-25 15:02:12 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-26 13:20:50 +0200
commit96d330aff7060f0882a5440ddb281cc3ab232d96 (patch)
tree63643bd5a4e11de8086b500d67fde65beeb43c6f /arch/alpha/include/asm/cmpxchg.h
parent82b993e8249ae3cb29c1b6eb8f6548f5748508b7 (diff)
downloadlwn-96d330aff7060f0882a5440ddb281cc3ab232d96.tar.gz
lwn-96d330aff7060f0882a5440ddb281cc3ab232d96.zip
locking/atomic: alpha: move to ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates alpha to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Note: xchg_local() is NOT currently part of the generic atomic arch_atomic API, and is not instrumented. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-14-mark.rutland@arm.com
Diffstat (limited to 'arch/alpha/include/asm/cmpxchg.h')
-rw-r--r--arch/alpha/include/asm/cmpxchg.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg