diff options
author | Mark Rutland <mark.rutland@arm.com> | 2023-04-11 17:29:43 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2023-04-11 18:34:59 +0100 |
commit | b5ecc19e684eee1df32a035b7d981932f344fc67 (patch) | |
tree | 2262790a549a3128c322cb16a103e71c0e4a1dbf | |
parent | ead6122c289eec06bc1bd167442dfab358fafefb (diff) | |
download | lwn-b5ecc19e684eee1df32a035b7d981932f344fc67.tar.gz lwn-b5ecc19e684eee1df32a035b7d981932f344fc67.zip |
arm64: stacktrace: always inline core stacktrace functions
The arm64 stacktrace code can be used in kprobe context, and so cannot
be safely probed. Some (but not all) of the unwind functions are
annotated with `NOKPROBE_SYMBOL()` to ensure this, with others markes as
`__always_inline`, relying on the top-level unwind function being marked
as `noinstr`.
This patch has stacktrace.c consistently mark the internal stacktrace
functions as `__always_inline`, removing the need for NOKPROBE_SYMBOL()
as the top-level unwind function (arch_stack_walk()) is marked as
`noinstr`. This is more consistent and is a simpler pattern to follow
for future additions to stacktrace.c.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Kalesh Singh <kaleshsingh@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230411162943.203199-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ee398cf5141f..f527fadcb33b 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -25,8 +25,9 @@ * * The regs must be on a stack currently owned by the calling task. */ -static __always_inline void unwind_init_from_regs(struct unwind_state *state, - struct pt_regs *regs) +static __always_inline void +unwind_init_from_regs(struct unwind_state *state, + struct pt_regs *regs) { unwind_init_common(state, current); @@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state, * * The function which invokes this must be noinline. */ -static __always_inline void unwind_init_from_caller(struct unwind_state *state) +static __always_inline void +unwind_init_from_caller(struct unwind_state *state) { unwind_init_common(state, current); @@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) * duration of the unwind, or the unwind will be bogus. It is never valid to * call this for the current task. */ -static __always_inline void unwind_init_from_task(struct unwind_state *state, - struct task_struct *task) +static __always_inline void +unwind_init_from_task(struct unwind_state *state, + struct task_struct *task) { unwind_init_common(state, task); @@ -102,7 +105,8 @@ unwind_recover_return_address(struct unwind_state *state) * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -static int notrace unwind_next(struct unwind_state *state) +static __always_inline int +unwind_next(struct unwind_state *state) { struct task_struct *tsk = state->task; unsigned long fp = state->fp; @@ -120,10 +124,10 @@ static int notrace unwind_next(struct unwind_state *state) return unwind_recover_return_address(state); } -NOKPROBE_SYMBOL(unwind_next); -static void notrace unwind(struct unwind_state *state, - stack_trace_consume_fn consume_entry, void *cookie) +static __always_inline void +unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, + void *cookie) { if (unwind_recover_return_address(state)) return; @@ -138,7 +142,6 @@ static void notrace unwind(struct unwind_state *state, break; } } -NOKPROBE_SYMBOL(unwind); /* * Per-cpu stacks are only accessible when unwinding the current task in a |