diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/Kconfig | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 24 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 31 | ||||
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/entry/common.c | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/unwind.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/stacktrace.c | 96 | ||||
-rw-r--r-- | arch/x86/kernel/unwind_frame.c | 2 |
11 files changed, 176 insertions, 20 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index cd211a14a88f..6ad00ad73459 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -713,6 +713,12 @@ config HAVE_STACK_VALIDATION Architecture supports the 'objtool check' host tool command, which performs compile-time stack metadata validation. +config HAVE_RELIABLE_STACKTRACE + bool + help + Architecture has a save_stack_trace_tsk_reliable() function which + only returns a stack trace if it can guarantee the trace is reliable. + config HAVE_ARCH_HASH bool default n diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 87e4b2d8dcd4..6fc6464f7421 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void) TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ @@ -115,6 +116,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM) +#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SECCOMP (1<<TIF_SECCOMP) @@ -131,7 +133,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_RESTORE_TM) + _TIF_RESTORE_TM | _TIF_PATCH_PENDING) #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 3a3671172436..e9436c5e1e09 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -14,6 +14,7 @@ #include <linux/uprobes.h> #include <linux/key.h> #include <linux/context_tracking.h> +#include <linux/livepatch.h> #include <asm/hw_breakpoint.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -162,6 +163,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) tracehook_notify_resume(regs); } + if (thread_info_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + user_enter(); } diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a5b54a445eb8..646845edf148 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -51,14 +51,13 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); /* * thread information flags bit numbers */ +/* _TIF_WORK bits */ #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ -#define TIF_SECCOMP 5 /* secure computing */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_UPROBE 7 /* breakpointed or single-stepping */ +#define TIF_UPROBE 3 /* breakpointed or single-stepping */ +#define TIF_PATCH_PENDING 4 /* pending live patching update */ + #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ @@ -66,15 +65,24 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_BLOCK_STEP 20 /* This task is block stepped */ #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ +/* _TIF_TRACE bits */ +#define TIF_SYSCALL_TRACE 24 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */ +#define TIF_SECCOMP 26 /* secure computing */ +#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ + #define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) #define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) +#define _TIF_UPROBE _BITUL(TIF_UPROBE) +#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) + +#define _TIF_31BIT _BITUL(TIF_31BIT) +#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) + #define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP _BITUL(TIF_SECCOMP) #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) -#define _TIF_UPROBE _BITUL(TIF_UPROBE) -#define _TIF_31BIT _BITUL(TIF_31BIT) -#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dff2152350a7..a08b5eea5567 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -47,7 +47,7 @@ STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE) + _TIF_UPROBE | _TIF_PATCH_PENDING) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ @@ -334,6 +334,11 @@ ENTRY(system_call) #endif TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP jo .Lsysc_singlestep +#ifdef CONFIG_LIVEPATCH + TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING + jo .Lsysc_patch_pending # handle live patching just before + # signals and possible syscall restart +#endif TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lsysc_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME @@ -409,6 +414,16 @@ ENTRY(system_call) #endif # +# _TIF_PATCH_PENDING is set, call klp_update_patch_state +# +#ifdef CONFIG_LIVEPATCH +.Lsysc_patch_pending: + lg %r2,__LC_CURRENT # pass pointer to task struct + larl %r14,.Lsysc_return + jg klp_update_patch_state +#endif + +# # _PIF_PER_TRAP is set, call do_per_trap # .Lsysc_singlestep: @@ -659,6 +674,10 @@ ENTRY(io_int_handler) jo .Lio_mcck_pending TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lio_reschedule +#ifdef CONFIG_LIVEPATCH + TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING + jo .Lio_patch_pending +#endif TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lio_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME @@ -708,6 +727,16 @@ ENTRY(io_int_handler) j .Lio_return # +# _TIF_PATCH_PENDING is set, call klp_update_patch_state +# +#ifdef CONFIG_LIVEPATCH +.Lio_patch_pending: + lg %r2,__LC_CURRENT # pass pointer to task struct + larl %r14,.Lio_return + jg klp_update_patch_state +#endif + +# # _TIF_SIGPENDING or is set, call do_signal # .Lio_sigpending: diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc98d5a294ee..2a26852c11b6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -160,6 +160,7 @@ config X86 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 370c42c7f046..cdefcfdd9e63 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -22,6 +22,7 @@ #include <linux/context_tracking.h> #include <linux/user-return-notifier.h> #include <linux/uprobes.h> +#include <linux/livepatch.h> #include <asm/desc.h> #include <asm/traps.h> @@ -130,14 +131,13 @@ static long syscall_trace_enter(struct pt_regs *regs) #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) + _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { /* * In order to return to user mode, we need to have IRQs off with - * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, - * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags + * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags * can be set at any time on preemptable kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. @@ -164,6 +164,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) if (cached_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); + if (cached_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + /* Disable IRQs and retry */ local_irq_disable(); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6f5eb07a95..83372dc43943 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -73,9 +73,6 @@ struct thread_info { * thread information flags * - these are process state flags that various assembly files * may need to access - * - pending work-to-be-done flags are in LSW - * - other flags in MSW - * Warning: layout of LSW is hardcoded in entry.S */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ @@ -87,6 +84,7 @@ struct thread_info { #define TIF_SECCOMP 8 /* secure computing */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +#define TIF_PATCH_PENDING 13 /* pending live patching update */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ @@ -103,13 +101,14 @@ struct thread_info { #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_NOHZ (1 << TIF_NOHZ) @@ -133,8 +132,10 @@ struct thread_info { /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ) + (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ + _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \ + _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \ + _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 6fa75b17aec3..137e9cce2ab4 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -11,6 +11,7 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; + bool error; #ifdef CONFIG_FRAME_POINTER unsigned long *bp, *orig_sp; struct pt_regs *regs; @@ -40,6 +41,11 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, __unwind_start(state, task, regs, first_frame); } +static inline bool unwind_error(struct unwind_state *state) +{ + return state->error; +} + #ifdef CONFIG_FRAME_POINTER static inline diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8e2b79b88e51..8dabd7bf1673 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -76,6 +76,101 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE + +#define STACKTRACE_DUMP_ONCE(task) ({ \ + static bool __section(.data.unlikely) __dumped; \ + \ + if (!__dumped) { \ + __dumped = true; \ + WARN_ON(1); \ + show_stack(task, NULL); \ + } \ +}) + +static int __save_stack_trace_reliable(struct stack_trace *trace, + struct task_struct *task) +{ + struct unwind_state state; + struct pt_regs *regs; + unsigned long addr; + + for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); + unwind_next_frame(&state)) { + + regs = unwind_get_entry_regs(&state); + if (regs) { + /* + * Kernel mode registers on the stack indicate an + * in-kernel interrupt or exception (e.g., preemption + * or a page fault), which can make frame pointers + * unreliable. + */ + if (!user_mode(regs)) + return -EINVAL; + + /* + * The last frame contains the user mode syscall + * pt_regs. Skip it and finish the unwind. + */ + unwind_next_frame(&state); + if (!unwind_done(&state)) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + break; + } + + addr = unwind_get_return_address(&state); + + /* + * A NULL or invalid return address probably means there's some + * generated code which __kernel_text_address() doesn't know + * about. + */ + if (!addr) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + + if (save_stack_address(trace, addr, false)) + return -EINVAL; + } + + /* Check for stack corruption */ + if (unwind_error(&state)) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; + + return 0; +} + +/* + * This function returns an error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + int ret; + + if (!try_get_task_stack(tsk)) + return -EINVAL; + + ret = __save_stack_trace_reliable(trace, tsk); + + put_task_stack(tsk); + + return ret; +} +#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ + /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ struct stack_frame_user { @@ -138,4 +233,3 @@ void save_stack_trace_user(struct stack_trace *trace) if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } - diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 478d15dbaee4..5ed43910e04b 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -225,6 +225,8 @@ bool unwind_next_frame(struct unwind_state *state) return true; bad_address: + state->error = true; + /* * When unwinding a non-current task, the task might actually be * running on another CPU, in which case it could be modifying its |