diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-18 17:45:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-18 17:45:41 -0800 |
commit | 0338cd9c22d1bce7dc4a6641d4215a50f476f429 (patch) | |
tree | 22d500d5d8bd2c04fe6ae2be3d6cd7e12a755f09 /arch/s390/include/asm/cmpxchg.h | |
parent | 5591fd5e034819a89ac93c0ccc6be2a930042f71 (diff) | |
parent | e200565d434b66e5b2bfc3b143b66b8ca29666ad (diff) | |
download | lwn-0338cd9c22d1bce7dc4a6641d4215a50f476f429.tar.gz lwn-0338cd9c22d1bce7dc4a6641d4215a50f476f429.zip |
Merge tag 's390-6.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens:
- Add firmware sysfs interface which allows user space to retrieve the
dump area size of the machine
- Add 'measurement_chars_full' CHPID sysfs attribute to make the
complete associated Channel-Measurements Characteristics Block
available
- Add virtio-mem support
- Move gmap aka KVM page fault handling from the main fault handler to
KVM code. This is the first step to make s390 KVM page fault handling
similar to other architectures. With this first step the main fault
handler does not have any special handling anymore, and therefore
convert it to support LOCK_MM_AND_FIND_VMA
- With gcc 14 s390 support for flag output operand support for inline
assemblies was added. This allows for several optimizations:
- Provide a cmpxchg inline assembly which makes use of this, and
provide all variants of arch_try_cmpxchg() so that the compiler
can generate slightly better code
- Convert a few cmpxchg() loops to try_cmpxchg() loops
- Similar to x86 add a CC_OUT() helper macro (and other macros),
and convert all inline assemblies to make use of them, so that
depending on compiler version better code can be generated
- List installed host-key hashes in sysfs if the machine supports the
Query Ultravisor Keys UVC
- Add 'Retrieve Secret' ioctl which allows user space in protected
execution guests to retrieve previously stored secrets from the
Ultravisor
- Add pkey-uv module which supports the conversion of Ultravisor
retrievable secrets to protected keys
- Extend the existing paes cipher to exploit the full AES-XTS hardware
acceleration introduced with message-security assist extension 10
- Convert hopefully all sysfs show functions to use sysfs_emit() so
that the constant flow of such patches stop
- For PCI devices make use of the newly added Topology ID attribute to
enable whole card multi-function support despite the change to PCHID
per port. Additionally improve the overall robustness and usability
of the multifunction support
- Various other small improvements, fixes, and cleanups
* tag 's390-6.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (133 commits)
s390/cio/ioasm: Convert to use flag output macros
s390/cio/qdio: Convert to use flag output macros
s390/sclp: Convert to use flag output macros
s390/dasd: Convert to use flag output macros
s390/boot/physmem: Convert to use flag output macros
s390/pci: Convert to use flag output macros
s390/kvm: Convert to use flag output macros
s390/extmem: Convert to use flag output macros
s390/string: Convert to use flag output macros
s390/diag: Convert to use flag output macros
s390/irq: Convert to use flag output macros
s390/smp: Convert to use flag output macros
s390/uv: Convert to use flag output macros
s390/pai: Convert to use flag output macros
s390/mm: Convert to use flag output macros
s390/cpu_mf: Convert to use flag output macros
s390/cpcmd: Convert to use flag output macros
s390/topology: Convert to use flag output macros
s390/time: Convert to use flag output macros
s390/pageattr: Convert to use flag output macros
...
Diffstat (limited to 'arch/s390/include/asm/cmpxchg.h')
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 374 |
1 files changed, 220 insertions, 154 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index aae0315374de..a9e2006033b7 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -11,185 +11,231 @@ #include <linux/mmdebug.h> #include <linux/types.h> #include <linux/bug.h> +#include <asm/asm.h> -void __xchg_called_with_bad_pointer(void); +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new) +{ + asm volatile( + " cs %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} + +static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new) +{ + asm volatile( + " csg %[old],%[new],%[ptr]\n" + : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} -static __always_inline unsigned long -__arch_xchg(unsigned long x, unsigned long address, int size) +static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new) { - unsigned long old; - int shift; + union { + u8 b[4]; + u32 w; + } old32, new32; + u32 prev; + int i; + + i = ptr & 3; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} + +static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new) +{ + union { + u16 b[2]; + u32 w; + } old32, new32; + u32 prev; + int i; + + i = (ptr & 3) >> 1; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} +static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size) +{ switch (size) { - case 1: - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 2: - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%1\n" - "0: cs %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" (x) - : "memory", "cc"); - return old; - case 8: - asm volatile( - " lg %0,%1\n" - "0: csg %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+QS" (*(long *) address) - : "d" (x) - : "memory", "cc"); - return old; + case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff); + case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff); + case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff); + case 8: return __csg_asm(ptr, old, new); + default: __cmpxchg_called_with_bad_pointer(); } - __xchg_called_with_bad_pointer(); - return x; + return old; } -#define arch_xchg(ptr, x) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ - __typeof__(*(ptr)) __ret; \ + (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +#define arch_try_cmpxchg(ptr, oldp, new) \ +({ \ + __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + int __cc; \ \ - __ret = (__typeof__(*(ptr))) \ - __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \ - sizeof(*(ptr))); \ - __ret; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: { \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + __cc = (__prev != __old); \ + if (unlikely(__cc)) \ + *__oldp = __prev; \ + break; \ + } \ + case 4: { \ + asm volatile( \ + " cs %[__old],%[__new],%[__ptr]\n" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+Q" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + case 8: { \ + asm volatile( \ + " csg %[__old],%[__new],%[__ptr]\n" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+QS" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + default: \ + __cmpxchg_called_with_bad_pointer(); \ + } \ + likely(__cc == 0); \ }) -void __cmpxchg_called_with_bad_pointer(void); +#else /* __HAVE_ASM_FLAG_OUTPUTS__ */ -static __always_inline unsigned long __cmpxchg(unsigned long address, - unsigned long old, - unsigned long new, int size) +#define arch_try_cmpxchg(ptr, oldp, new) \ +({ \ + __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + if (unlikely(__prev != __old)) \ + *__oldp = __prev; \ + likely(__prev == __old); \ +}) + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ + +#define arch_try_cmpxchg64 arch_try_cmpxchg +#define arch_try_cmpxchg_local arch_try_cmpxchg +#define arch_try_cmpxchg64_local arch_try_cmpxchg + +void __xchg_called_with_bad_pointer(void); + +static inline u8 __arch_xchg1(u64 ptr, u8 x) +{ + int shift = (3 ^ (ptr & 3)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static inline u16 __arch_xchg2(u64 ptr, u16 x) +{ + int shift = (2 ^ (ptr & 2)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xffff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size) { switch (size) { - case 1: { - unsigned int prev, shift, mask; - - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - old = (old & 0xff) << shift; - new = (new & 0xff) << shift; - mask = ~(0xff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } - case 2: { - unsigned int prev, shift, mask; - - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - old = (old & 0xffff) << shift; - new = (new & 0xffff) << shift; - mask = ~(0xffff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } + case 1: + return __arch_xchg1(ptr, x & 0xff); + case 2: + return __arch_xchg2(ptr, x & 0xffff); case 4: { - unsigned int prev = old; - - asm volatile( - " cs %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+Q" (*(int *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u32 old = READ_ONCE(*(u32 *)ptr); + + do { + } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff)); + return old; } case 8: { - unsigned long prev = old; - - asm volatile( - " csg %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+QS" (*(long *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u64 old = READ_ONCE(*(u64 *)ptr); + + do { + } while (!arch_try_cmpxchg((u64 *)ptr, &old, x)); + return old; } } - __cmpxchg_called_with_bad_pointer(); - return old; + __xchg_called_with_bad_pointer(); + return x; } -#define arch_cmpxchg(ptr, o, n) \ +#define arch_xchg(ptr, x) \ ({ \ - __typeof__(*(ptr)) __ret; \ - \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ + (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \ + (unsigned long)(x), \ + sizeof(*(ptr))); \ }) -#define arch_cmpxchg64 arch_cmpxchg -#define arch_cmpxchg_local arch_cmpxchg -#define arch_cmpxchg64_local arch_cmpxchg - #define system_has_cmpxchg128() 1 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) @@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n } #define arch_cmpxchg128 arch_cmpxchg128 +#define arch_cmpxchg128_local arch_cmpxchg128 + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new) +{ + int cc; + + asm volatile( + " cdsg %[old],%[new],%[ptr]\n" + : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc) + : [new] "d" (new) + : "memory"); + return likely(cc == 0); +} + +#define arch_try_cmpxchg128 arch_try_cmpxchg128 +#define arch_try_cmpxchg128_local arch_try_cmpxchg128 + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ #endif /* __ASM_CMPXCHG_H */ |