diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-18 11:51:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-18 11:51:00 -0700 |
commit | 818e95c768c6607a1df4cf022c00c3c58e2f203e (patch) | |
tree | 453a2b891097c379f1f133f4821a46628d86002b /arch | |
parent | d4df33b0e9925c158b313a586fb1557cf29cfdf4 (diff) | |
parent | 0aeb1def44169cbe7119f26cf10b974a2046142e (diff) | |
download | lwn-818e95c768c6607a1df4cf022c00c3c58e2f203e.tar.gz lwn-818e95c768c6607a1df4cf022c00c3c58e2f203e.zip |
Merge tag 'trace-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"The main changes in this release include:
- Add user space specific memory reading for kprobes
- Allow kprobes to be executed earlier in boot
The rest are mostly just various clean ups and small fixes"
* tag 'trace-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits)
tracing: Make trace_get_fields() global
tracing: Let filter_assign_type() detect FILTER_PTR_STRING
tracing: Pass type into tracing_generic_entry_update()
ftrace/selftest: Test if set_event/ftrace_pid exists before writing
ftrace/selftests: Return the skip code when tracing directory not configured in kernel
tracing/kprobe: Check registered state using kprobe
tracing/probe: Add trace_event_call accesses APIs
tracing/probe: Add probe event name and group name accesses APIs
tracing/probe: Add trace flag access APIs for trace_probe
tracing/probe: Add trace_event_file access APIs for trace_probe
tracing/probe: Add trace_event_call register API for trace_probe
tracing/probe: Add trace_probe init and free functions
tracing/uprobe: Set print format when parsing command
tracing/kprobe: Set print format right after parsed command
kprobes: Fix to init kprobes in subsys_initcall
tracepoint: Use struct_size() in kmalloc()
ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS
ftrace: Enable trampoline when rec count returns back to one
tracing/kprobe: Do not run kprobe boot tests if kprobe_event is on cmdline
tracing: Make a separate config for trace event self tests
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/Kconfig | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 6 |
3 files changed, 6 insertions, 20 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index e8d19c3cb91f..6dd1faab6ccb 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -128,22 +128,6 @@ config UPROBES managed by the kernel and kept transparent to the probed application. ) -config HAVE_64BIT_ALIGNED_ACCESS - def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS - help - Some architectures require 64 bit accesses to be 64 bit - aligned, which also requires structs containing 64 bit values - to be 64 bit aligned too. This includes some 32 bit - architectures which can do 64 bit accesses, as well as 64 bit - architectures without unaligned access. - - This symbol should be selected by an architecture if 64 bit - accesses are required to be 64 bit aligned in this way even - though it is not a 64 bit architecture. - - See Documentation/unaligned-memory-access.txt for more - information on the topic of unaligned memory accesses. - config HAVE_EFFICIENT_UNALIGNED_ACCESS bool help diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index c82abd6e4ca3..9c4435307ff8 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -66,7 +66,9 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un }) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP -# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) +static inline bool pagefault_disabled(void); +# define WARN_ON_IN_IRQ() \ + WARN_ON_ONCE(!in_task() && !pagefault_disabled()) #else # define WARN_ON_IN_IRQ() #endif diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 4b73f5937f41..024c3053dbba 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -373,7 +373,7 @@ static int add_brk_on_nop(struct dyn_ftrace *rec) return add_break(rec->ip, old); } -static int add_breakpoints(struct dyn_ftrace *rec, int enable) +static int add_breakpoints(struct dyn_ftrace *rec, bool enable) { unsigned long ftrace_addr; int ret; @@ -481,7 +481,7 @@ static int add_update_nop(struct dyn_ftrace *rec) return add_update_code(ip, new); } -static int add_update(struct dyn_ftrace *rec, int enable) +static int add_update(struct dyn_ftrace *rec, bool enable) { unsigned long ftrace_addr; int ret; @@ -527,7 +527,7 @@ static int finish_update_nop(struct dyn_ftrace *rec) return ftrace_write(ip, new, 1); } -static int finish_update(struct dyn_ftrace *rec, int enable) +static int finish_update(struct dyn_ftrace *rec, bool enable) { unsigned long ftrace_addr; int ret; |