diff options
author | Benjamin Gray <bgray@linux.ibm.com> | 2024-03-25 16:28:15 +1100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2024-05-08 00:35:42 +1000 |
commit | c3710ee7cd695dc1b0b4b8cfbf464e313467f970 (patch) | |
tree | 28ec95cd53817160e1a24fa032da55b2bd71fe6b /arch/powerpc/lib/code-patching.c | |
parent | c5ef5e35844ad30503c49802b9d6a6c818fca886 (diff) | |
download | lwn-c3710ee7cd695dc1b0b4b8cfbf464e313467f970.tar.gz lwn-c3710ee7cd695dc1b0b4b8cfbf464e313467f970.zip |
powerpc/code-patching: Use dedicated memory routines for patching
The patching page set up as a writable alias may be in quadrant 0
(userspace) if the temporary mm path is used. This causes sanitiser
failures if so. Sanitiser failures also occur on the non-mm path
because the plain memset family is instrumented, and KASAN treats the
patching window as poisoned.
Introduce locally defined patch_* variants of memset that perform an
uninstrumented lower level set, as well as detecting write errors like
the original single patch variant does.
copy_to_user() is not correct here, as the PTE makes it a proper kernel
page (the EAA is privileged access only, RW). It just happens to be in
quadrant 0 because that's the hardware's mechanism for using the current
PID vs PID 0 in translations. Importantly, it's incorrect to allow user
page accesses.
Now that the patching memsets are used, we also propagate a failure up
to the caller as the single patch variant does.
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240325052815.854044-2-bgray@linux.ibm.com
Diffstat (limited to 'arch/powerpc/lib/code-patching.c')
-rw-r--r-- | arch/powerpc/lib/code-patching.c | 31 |
1 files changed, 27 insertions, 4 deletions
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index c6ab46156cda..df64343b9214 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -372,9 +372,32 @@ int patch_instruction(u32 *addr, ppc_inst_t instr) } NOKPROBE_SYMBOL(patch_instruction); +static int patch_memset64(u64 *addr, u64 val, size_t count) +{ + for (u64 *end = addr + count; addr < end; addr++) + __put_kernel_nofault(addr, &val, u64, failed); + + return 0; + +failed: + return -EPERM; +} + +static int patch_memset32(u32 *addr, u32 val, size_t count) +{ + for (u32 *end = addr + count; addr < end; addr++) + __put_kernel_nofault(addr, &val, u32, failed); + + return 0; + +failed: + return -EPERM; +} + static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr) { unsigned long start = (unsigned long)patch_addr; + int err; /* Repeat instruction */ if (repeat_instr) { @@ -383,19 +406,19 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool rep if (ppc_inst_prefixed(instr)) { u64 val = ppc_inst_as_ulong(instr); - memset64((u64 *)patch_addr, val, len / 8); + err = patch_memset64((u64 *)patch_addr, val, len / 8); } else { u32 val = ppc_inst_val(instr); - memset32(patch_addr, val, len / 4); + err = patch_memset32(patch_addr, val, len / 4); } } else { - memcpy(patch_addr, code, len); + err = copy_to_kernel_nofault(patch_addr, code, len); } smp_wmb(); /* smp write barrier */ flush_icache_range(start, start + len); - return 0; + return err; } /* |