summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry-common.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2020-11-30 11:59:49 +0000
committerWill Deacon <will@kernel.org>2020-11-30 12:11:38 +0000
commitf0cd5ac1e4c53cb691b3ed3cda1031e1c42153e2 (patch)
tree8bfa5ac25e88a12048d27746f568f0d3c7b6bf5e /arch/arm64/kernel/entry-common.c
parent7cd1ea1010acbede7eb87b6abb6198921fb36957 (diff)
downloadlwn-f0cd5ac1e4c53cb691b3ed3cda1031e1c42153e2.tar.gz
lwn-f0cd5ac1e4c53cb691b3ed3cda1031e1c42153e2.zip
arm64: entry: fix NMI {user, kernel}->kernel transitions
Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry-common.c')
-rw-r--r--arch/arm64/kernel/entry-common.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 86622d8dd0d8..6d70be0d3e8f 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -63,10 +63,40 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
}
}
+void noinstr arm64_enter_nmi(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+}
+
+void noinstr arm64_exit_nmi(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
+
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- nmi_enter();
+ arm64_enter_nmi(regs);
else
enter_from_kernel_mode(regs);
}
@@ -74,7 +104,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- nmi_exit();
+ arm64_exit_nmi(regs);
else
exit_to_kernel_mode(regs);
}