summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@kernel.org>2021-10-21 09:55:09 +0900
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2021-10-22 12:16:53 -0400
commitcd9bc2c9258816dc934b300705076519d7375b81 (patch)
tree3be8beecf0c48ccece7a6f36a2c10ca19117753a
parentfc6d647638a8412800dfd10ad687709cb4aee373 (diff)
downloadlwn-cd9bc2c9258816dc934b300705076519d7375b81.tar.gz
lwn-cd9bc2c9258816dc934b300705076519d7375b81.zip
arm64: Recover kretprobe modified return address in stacktrace
Since the kretprobe replaces the function return address with the kretprobe_trampoline on the stack, stack unwinder shows it instead of the correct return address. This checks whether the next return address is the __kretprobe_trampoline(), and if so, try to find the correct return address from the kretprobe instance list. For this purpose this adds 'kr_cur' loop cursor to memorize the current kretprobe instance. With this fix, now arm64 can enable CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE, and pass the kprobe self tests. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/stacktrace.h4
-rw-r--r--arch/arm64/kernel/stacktrace.c7
3 files changed, 12 insertions, 0 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..edde5171ffb2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -11,6 +11,7 @@ config ARM64
select ACPI_PPTT if ACPI
select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_STATE
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 8aebc00c1718..a4e046ef4568 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
+#include <linux/llist.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
@@ -59,6 +60,9 @@ struct stackframe {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph;
#endif
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 8982a2b78acf..c30624fff6ac 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -41,6 +41,9 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame->graph = 0;
#endif
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+#endif
/*
* Prime the first unwind.
@@ -129,6 +132,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = ret_stack->ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+ frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
+#endif
frame->pc = ptrauth_strip_insn_pac(frame->pc);