diff options
Diffstat (limited to 'arch/s390/include/asm/cmpxchg.h')
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 374 |
1 files changed, 220 insertions, 154 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index aae0315374de..a9e2006033b7 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -11,185 +11,231 @@ #include <linux/mmdebug.h> #include <linux/types.h> #include <linux/bug.h> +#include <asm/asm.h> -void __xchg_called_with_bad_pointer(void); +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new) +{ + asm volatile( + " cs %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} + +static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new) +{ + asm volatile( + " csg %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} -static __always_inline unsigned long -__arch_xchg(unsigned long x, unsigned long address, int size) +static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new) { - unsigned long old; - int shift; + union { + u8 b[4]; + u32 w; + } old32, new32; + u32 prev; + int i; + + i = ptr & 3; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} + +static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new) +{ + union { + u16 b[2]; + u32 w; + } old32, new32; + u32 prev; + int i; + + i = (ptr & 3) >> 1; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} +static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size) +{ switch (size) { - case 1: - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 2: - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%1\n" - "0: cs %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" (x) - : "memory", "cc"); - return old; - case 8: - asm volatile( - " lg %0,%1\n" - "0: csg %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+QS" (*(long *) address) - : "d" (x) - : "memory", "cc"); - return old; + case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff); + case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff); + case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff); + case 8: return __csg_asm(ptr, old, new); + default: __cmpxchg_called_with_bad_pointer(); } - __xchg_called_with_bad_pointer(); - return x; + return old; } -#define arch_xchg(ptr, x) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ - __typeof__(*(ptr)) __ret; \ + (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +#define arch_try_cmpxchg(ptr, oldp, new) \ +({ \ + __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + int __cc; \ \ - __ret = (__typeof__(*(ptr))) \ - __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \ - sizeof(*(ptr))); \ - __ret; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: { \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + __cc = (__prev != __old); \ + if (unlikely(__cc)) \ + *__oldp = __prev; \ + break; \ + } \ + case 4: { \ + asm volatile( \ + " cs %[__old],%[__new],%[__ptr]\n" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+Q" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + case 8: { \ + asm volatile( \ + " csg %[__old],%[__new],%[__ptr]\n" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+QS" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + default: \ + __cmpxchg_called_with_bad_pointer(); \ + } \ + likely(__cc == 0); \ }) -void __cmpxchg_called_with_bad_pointer(void); +#else /* __HAVE_ASM_FLAG_OUTPUTS__ */ -static __always_inline unsigned long __cmpxchg(unsigned long address, - unsigned long old, - unsigned long new, int size) +#define arch_try_cmpxchg(ptr, oldp, new) \ +({ \ + __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + if (unlikely(__prev != __old)) \ + *__oldp = __prev; \ + likely(__prev == __old); \ +}) + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ + +#define arch_try_cmpxchg64 arch_try_cmpxchg +#define arch_try_cmpxchg_local arch_try_cmpxchg +#define arch_try_cmpxchg64_local arch_try_cmpxchg + +void __xchg_called_with_bad_pointer(void); + +static inline u8 __arch_xchg1(u64 ptr, u8 x) +{ + int shift = (3 ^ (ptr & 3)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static inline u16 __arch_xchg2(u64 ptr, u16 x) +{ + int shift = (2 ^ (ptr & 2)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xffff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size) { switch (size) { - case 1: { - unsigned int prev, shift, mask; - - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - old = (old & 0xff) << shift; - new = (new & 0xff) << shift; - mask = ~(0xff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } - case 2: { - unsigned int prev, shift, mask; - - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - old = (old & 0xffff) << shift; - new = (new & 0xffff) << shift; - mask = ~(0xffff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } + case 1: + return __arch_xchg1(ptr, x & 0xff); + case 2: + return __arch_xchg2(ptr, x & 0xffff); case 4: { - unsigned int prev = old; - - asm volatile( - " cs %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+Q" (*(int *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u32 old = READ_ONCE(*(u32 *)ptr); + + do { + } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff)); + return old; } case 8: { - unsigned long prev = old; - - asm volatile( - " csg %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+QS" (*(long *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u64 old = READ_ONCE(*(u64 *)ptr); + + do { + } while (!arch_try_cmpxchg((u64 *)ptr, &old, x)); + return old; } } - __cmpxchg_called_with_bad_pointer(); - return old; + __xchg_called_with_bad_pointer(); + return x; } -#define arch_cmpxchg(ptr, o, n) \ +#define arch_xchg(ptr, x) \ ({ \ - __typeof__(*(ptr)) __ret; \ - \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ + (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \ + (unsigned long)(x), \ + sizeof(*(ptr))); \ }) -#define arch_cmpxchg64 arch_cmpxchg -#define arch_cmpxchg_local arch_cmpxchg -#define arch_cmpxchg64_local arch_cmpxchg - #define system_has_cmpxchg128() 1 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) @@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n } #define arch_cmpxchg128 arch_cmpxchg128 +#define arch_cmpxchg128_local arch_cmpxchg128 + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new) +{ + int cc; + + asm volatile( + " cdsg %[old],%[new],%[ptr]\n" + : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc) + : [new] "d" (new) + : "memory"); + return likely(cc == 0); +} + +#define arch_try_cmpxchg128 arch_try_cmpxchg128 +#define arch_try_cmpxchg128_local arch_try_cmpxchg128 + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ #endif /* __ASM_CMPXCHG_H */ |