diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 16:44:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 16:44:24 -0800 |
commit | a200dcb34693084e56496960d855afdeaaf9578f (patch) | |
tree | bf65e4350460b7f98247278469f7600d1808c3fc /include/asm-generic/barrier.h | |
parent | d05d82f7110b08fd36178a641b69a1f206e1142b (diff) | |
parent | 43e361f23c49dbddf74f56ddf6cdd85c5dbff6da (diff) | |
download | lwn-a200dcb34693084e56496960d855afdeaaf9578f.tar.gz lwn-a200dcb34693084e56496960d855afdeaaf9578f.zip |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio barrier rework+fixes from Michael Tsirkin:
"This adds a new kind of barrier, and reworks virtio and xen to use it.
Plus some fixes here and there"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits)
checkpatch: add virt barriers
checkpatch: check for __smp outside barrier.h
checkpatch.pl: add missing memory barriers
virtio: make find_vqs() checkpatch.pl-friendly
virtio_balloon: fix race between migration and ballooning
virtio_balloon: fix race by fill and leak
s390: more efficient smp barriers
s390: use generic memory barriers
xen/events: use virt_xxx barriers
xen/io: use virt_xxx barriers
xenbus: use virt_xxx barriers
virtio_ring: use virt_store_mb
sh: move xchg_cmpxchg to a header by itself
sh: support 1 and 2 byte xchg
virtio_ring: update weak barriers to use virt_xxx
Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
asm-generic: implement virt_xxx memory barriers
x86: define __smp_xxx
xtensa: define __smp_xxx
tile: define __smp_xxx
...
Diffstat (limited to 'include/asm-generic/barrier.h')
-rw-r--r-- | include/asm-generic/barrier.h | 106 |
1 files changed, 97 insertions, 9 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 0f45f93ef692..1cceca146905 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -54,22 +54,38 @@ #define read_barrier_depends() do { } while (0) #endif +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() mb() +#define smp_mb() __smp_mb() #endif #ifndef smp_rmb -#define smp_rmb() rmb() +#define smp_rmb() __smp_rmb() #endif #ifndef smp_wmb -#define smp_wmb() wmb() +#define smp_wmb() __smp_wmb() #endif #ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() read_barrier_depends() +#define smp_read_barrier_depends() __smp_read_barrier_depends() #endif #else /* !CONFIG_SMP */ @@ -92,32 +108,104 @@ #endif /* CONFIG_SMP */ +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() smp_mb() +#define smp_mb__before_atomic() barrier() #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() smp_mb() +#define smp_mb__after_atomic() barrier() #endif +#ifndef smp_store_release #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ WRITE_ONCE(*p, v); \ } while (0) +#endif +#ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ ___p1; \ }) +#endif + +#endif + +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ |