diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-03-26 17:45:37 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 09:42:01 +0100 |
commit | 7bd3e239d6c6d1cad276e8f130b386df4234dcd7 (patch) | |
tree | 260eb35894afb72ab32e860f37960b2dd6114e12 /include/linux/compiler.h | |
parent | e6beaa363d56d7fc2f8cd6f7291e4d93911a428a (diff) | |
download | lwn-7bd3e239d6c6d1cad276e8f130b386df4234dcd7.tar.gz lwn-7bd3e239d6c6d1cad276e8f130b386df4234dcd7.zip |
locking: Remove atomicy checks from {READ,WRITE}_ONCE
The fact that volatile allows for atomic load/stores is a special case
not a requirement for {READ,WRITE}_ONCE(). Their primary purpose is to
force the compiler to emit load/stores _once_.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r-- | include/linux/compiler.h | 16 |
1 files changed, 0 insertions, 16 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 1b45e4a0519b..0e41ca0e5927 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include <uapi/linux/types.h> -static __always_inline void data_access_exceeds_word_size(void) -#ifdef __compiletime_warning -__compiletime_warning("data access exceeds word size and won't be atomic") -#endif -; - -static __always_inline void data_access_exceeds_word_size(void) -{ -} - static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { switch (size) { case 1: *(__u8 *)res = *(volatile __u8 *)p; break; case 2: *(__u16 *)res = *(volatile __u16 *)p; break; case 4: *(__u32 *)res = *(volatile __u32 *)p; break; -#ifdef CONFIG_64BIT case 8: *(__u64 *)res = *(volatile __u64 *)p; break; -#endif default: barrier(); __builtin_memcpy((void *)res, (const void *)p, size); - data_access_exceeds_word_size(); barrier(); } } @@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break; -#ifdef CONFIG_64BIT case 8: *(volatile __u64 *)p = *(__u64 *)res; break; -#endif default: barrier(); __builtin_memcpy((void *)p, (const void *)res, size); - data_access_exceeds_word_size(); barrier(); } } |