summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/fault.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-03-16 20:42:02 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-04-14 23:04:43 +1000
commitd738ee8d56de38c91610741f672ec5c1ffae76fc (patch)
tree79fa84d930a84cea7357e51700a3fa1695033f01 /arch/powerpc/mm/fault.c
parentceff77efa4f8d9f02d8442171b325d3b7068fe5e (diff)
downloadlwn-d738ee8d56de38c91610741f672ec5c1ffae76fc.tar.gz
lwn-d738ee8d56de38c91610741f672ec5c1ffae76fc.zip
powerpc/64e/interrupt: handle bad_page_fault in C
With non-volatile registers saved on interrupt, bad_page_fault can now be called by do_page_fault. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210316104206.407354-9-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r--arch/powerpc/mm/fault.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 0c0b1c2cfb49..18e588fda43d 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -552,12 +552,9 @@ static long __do_page_fault(struct pt_regs *regs)
if (likely(entry)) {
instruction_pointer_set(regs, extable_fixup(entry));
return 0;
- } else if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
+ } else {
__bad_page_fault(regs, err);
return 0;
- } else {
- /* 32 and 64e handle the bad page fault in asm */
- return err;
}
}
NOKPROBE_SYMBOL(__do_page_fault);