diff options
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/lib/checksum_32.S | 117 | ||||
-rw-r--r-- | arch/x86/lib/copy_mc.c | 96 | ||||
-rw-r--r-- | arch/x86/lib/copy_mc_64.S | 163 | ||||
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 111 | ||||
-rw-r--r-- | arch/x86/lib/csum-copy_64.S | 140 | ||||
-rw-r--r-- | arch/x86/lib/csum-wrappers_64.c | 86 | ||||
-rw-r--r-- | arch/x86/lib/getuser.S | 107 | ||||
-rw-r--r-- | arch/x86/lib/insn-eval.c | 130 | ||||
-rw-r--r-- | arch/x86/lib/memcpy_64.S | 115 | ||||
-rw-r--r-- | arch/x86/lib/putuser.S | 47 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 21 |
12 files changed, 704 insertions, 430 deletions
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index aa067859a70b..bad4dee4f0e4 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o lib-y := delay.o misc.o cmdline.o cpu.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index d1d768912368..4304320e51f4 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -253,28 +253,17 @@ EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, - int len, int sum, int *src_err_ptr, int *dst_err_ptr) + int len) */ /* * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for all access types. - * - * FIXME: could someone double-check whether I haven't mixed up some SRC and - * DST definitions? It's damn hard to trigger all cases. I hope I got - * them all but there's no guarantee. */ -#define SRC(y...) \ +#define EXC(y...) \ 9999: y; \ _ASM_EXTABLE_UA(9999b, 6001f) -#define DST(y...) \ - 9999: y; \ - _ASM_EXTABLE_UA(9999b, 6002f) - #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 @@ -285,20 +274,20 @@ SYM_FUNC_START(csum_partial_copy_generic) pushl %edi pushl %esi pushl %ebx - movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+4(%esp),%esi # src movl ARGBASE+8(%esp),%edi # dst + movl $-1, %eax # sum testl $2, %edi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f -SRC(1: movw (%esi), %bx ) +EXC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +EXC( movw %bx, (%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -306,34 +295,34 @@ DST( movw %bx, (%edi) ) movl %ecx, FP(%esp) shrl $5, %ecx jz 2f - testl %esi, %esi -SRC(1: movl (%esi), %ebx ) -SRC( movl 4(%esi), %edx ) + testl %esi, %esi # what's wrong with clc? +EXC(1: movl (%esi), %ebx ) +EXC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +EXC( movl %ebx, (%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +EXC( movl %edx, 4(%edi) ) -SRC( movl 8(%esi), %ebx ) -SRC( movl 12(%esi), %edx ) +EXC( movl 8(%esi), %ebx ) +EXC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +EXC( movl %ebx, 8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +EXC( movl %edx, 12(%edi) ) -SRC( movl 16(%esi), %ebx ) -SRC( movl 20(%esi), %edx ) +EXC( movl 16(%esi), %ebx ) +EXC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +EXC( movl %ebx, 16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +EXC( movl %edx, 20(%edi) ) -SRC( movl 24(%esi), %ebx ) -SRC( movl 28(%esi), %edx ) +EXC( movl 24(%esi), %ebx ) +EXC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +EXC( movl %ebx, 24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +EXC( movl %edx, 28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -345,9 +334,9 @@ DST( movl %edx, 28(%edi) ) andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF -SRC(3: movl (%esi), %ebx ) +EXC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +EXC( movl %ebx, (%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -357,39 +346,24 @@ DST( movl %ebx, (%edi) ) jz 7f cmpl $2, %ecx jb 5f -SRC( movw (%esi), %cx ) +EXC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +EXC( movw %cx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx -SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +EXC(5: movb (%esi), %cl ) +EXC( movb %cl, (%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: -5000: # Exception handler: .section .fixup, "ax" 6001: - movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) - - # zero the complete destination - computing the rest - # is too much work - movl ARGBASE+8(%esp), %edi # dst - movl ARGBASE+12(%esp), %ecx # len - xorl %eax,%eax - rep ; stosb - - jmp 5000b - -6002: - movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) - jmp 5000b + xorl %eax, %eax + jmp 7b .previous @@ -405,14 +379,14 @@ SYM_FUNC_END(csum_partial_copy_generic) /* Version for PentiumII/PPro */ #define ROUND1(x) \ - SRC(movl x(%esi), %ebx ) ; \ + EXC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + EXC(movl %ebx, x(%edi) ) ; #define ROUND(x) \ - SRC(movl x(%esi), %ebx ) ; \ + EXC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + EXC(movl %ebx, x(%edi) ) ; #define ARGBASE 12 @@ -423,7 +397,7 @@ SYM_FUNC_START(csum_partial_copy_generic) movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+12(%esp),%ecx #len - movl ARGBASE+16(%esp),%eax #sum + movl $-1, %eax #sum # movl %ecx, %edx movl %ecx, %ebx movl %esi, %edx @@ -439,7 +413,7 @@ SYM_FUNC_START(csum_partial_copy_generic) JMP_NOSPEC ebx 1: addl $64,%esi addl $64,%edi - SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) + EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl) ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) @@ -453,29 +427,20 @@ SYM_FUNC_START(csum_partial_copy_generic) jz 7f cmpl $2, %edx jb 5f -SRC( movw (%esi), %dx ) +EXC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +EXC( movw %dx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: -SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +EXC( movb (%esi), %dl ) +EXC( movb %dl, (%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" -6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) - # zero the complete destination (computing the rest is too much work) - movl ARGBASE+8(%esp),%edi # dst - movl ARGBASE+12(%esp),%ecx # len - xorl %eax,%eax - rep; stosb - jmp 7b -6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) +6001: xorl %eax, %eax jmp 7b .previous diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c new file mode 100644 index 000000000000..c13e8c9ee926 --- /dev/null +++ b/arch/x86/lib/copy_mc.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ + +#include <linux/jump_label.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/mce.h> + +#ifdef CONFIG_X86_MCE +/* + * See COPY_MC_TEST for self-test of the copy_mc_fragile() + * implementation. + */ +static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key); + +void enable_copy_mc_fragile(void) +{ + static_branch_inc(©_mc_fragile_key); +} +#define copy_mc_fragile_enabled (static_branch_unlikely(©_mc_fragile_key)) + +/* + * Similar to copy_user_handle_tail, probe for the write fault point, or + * source exception point. + */ +__visible notrace unsigned long +copy_mc_fragile_handle_tail(char *to, char *from, unsigned len) +{ + for (; len; --len, to++, from++) + if (copy_mc_fragile(to, from, 1)) + break; + return len; +} +#else +/* + * No point in doing careful copying, or consulting a static key when + * there is no #MC handler in the CONFIG_X86_MCE=n case. + */ +void enable_copy_mc_fragile(void) +{ +} +#define copy_mc_fragile_enabled (0) +#endif + +unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len); + +/** + * copy_mc_to_kernel - memory copy that handles source exceptions + * + * @dst: destination address + * @src: source address + * @len: number of bytes to copy + * + * Call into the 'fragile' version on systems that benefit from avoiding + * corner case poison consumption scenarios, For example, accessing + * poison across 2 cachelines with a single instruction. Almost all + * other uses case can use copy_mc_enhanced_fast_string() for a fast + * recoverable copy, or fallback to plain memcpy. + * + * Return 0 for success, or number of bytes not copied if there was an + * exception. + */ +unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len) +{ + if (copy_mc_fragile_enabled) + return copy_mc_fragile(dst, src, len); + if (static_cpu_has(X86_FEATURE_ERMS)) + return copy_mc_enhanced_fast_string(dst, src, len); + memcpy(dst, src, len); + return 0; +} +EXPORT_SYMBOL_GPL(copy_mc_to_kernel); + +unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len) +{ + unsigned long ret; + + if (copy_mc_fragile_enabled) { + __uaccess_begin(); + ret = copy_mc_fragile(dst, src, len); + __uaccess_end(); + return ret; + } + + if (static_cpu_has(X86_FEATURE_ERMS)) { + __uaccess_begin(); + ret = copy_mc_enhanced_fast_string(dst, src, len); + __uaccess_end(); + return ret; + } + + return copy_user_generic(dst, src, len); +} diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S new file mode 100644 index 000000000000..892d8915f609 --- /dev/null +++ b/arch/x86/lib/copy_mc_64.S @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ + +#include <linux/linkage.h> +#include <asm/copy_mc_test.h> +#include <asm/export.h> +#include <asm/asm.h> + +#ifndef CONFIG_UML + +#ifdef CONFIG_X86_MCE +COPY_MC_TEST_CTL + +/* + * copy_mc_fragile - copy memory with indication if an exception / fault happened + * + * The 'fragile' version is opted into by platform quirks and takes + * pains to avoid unrecoverable corner cases like 'fast-string' + * instruction sequences, and consuming poison across a cacheline + * boundary. The non-fragile version is equivalent to memcpy() + * regardless of CPU machine-check-recovery capability. + */ +SYM_FUNC_START(copy_mc_fragile) + cmpl $8, %edx + /* Less than 8 bytes? Go to byte copy loop */ + jb .L_no_whole_words + + /* Check for bad alignment of source */ + testl $7, %esi + /* Already aligned */ + jz .L_8byte_aligned + + /* Copy one byte at a time until source is 8-byte aligned */ + movl %esi, %ecx + andl $7, %ecx + subl $8, %ecx + negl %ecx + subl %ecx, %edx +.L_read_leading_bytes: + movb (%rsi), %al + COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes + COPY_MC_TEST_DST %rdi 1 .E_leading_bytes +.L_write_leading_bytes: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz .L_read_leading_bytes + +.L_8byte_aligned: + movl %edx, %ecx + andl $7, %edx + shrl $3, %ecx + jz .L_no_whole_words + +.L_read_words: + movq (%rsi), %r8 + COPY_MC_TEST_SRC %rsi 8 .E_read_words + COPY_MC_TEST_DST %rdi 8 .E_write_words +.L_write_words: + movq %r8, (%rdi) + addq $8, %rsi + addq $8, %rdi + decl %ecx + jnz .L_read_words + + /* Any trailing bytes? */ +.L_no_whole_words: + andl %edx, %edx + jz .L_done_memcpy_trap + + /* Copy trailing bytes */ + movl %edx, %ecx +.L_read_trailing_bytes: + movb (%rsi), %al + COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes + COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes +.L_write_trailing_bytes: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz .L_read_trailing_bytes + + /* Copy successful. Return zero */ +.L_done_memcpy_trap: + xorl %eax, %eax +.L_done: + ret +SYM_FUNC_END(copy_mc_fragile) +EXPORT_SYMBOL_GPL(copy_mc_fragile) + + .section .fixup, "ax" + /* + * Return number of bytes not copied for any failure. Note that + * there is no "tail" handling since the source buffer is 8-byte + * aligned and poison is cacheline aligned. + */ +.E_read_words: + shll $3, %ecx +.E_leading_bytes: + addl %edx, %ecx +.E_trailing_bytes: + mov %ecx, %eax + jmp .L_done + + /* + * For write fault handling, given the destination is unaligned, + * we handle faults on multi-byte writes with a byte-by-byte + * copy up to the write-protected page. + */ +.E_write_words: + shll $3, %ecx + addl %edx, %ecx + movl %ecx, %edx + jmp copy_mc_fragile_handle_tail + + .previous + + _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) + _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) + _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE(.L_write_words, .E_write_words) + _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) +#endif /* CONFIG_X86_MCE */ + +/* + * copy_mc_enhanced_fast_string - memory copy with exception handling + * + * Fast string copy + fault / exception handling. If the CPU does + * support machine check exception recovery, but does not support + * recovering from fast-string exceptions then this CPU needs to be + * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any + * machine check recovery support this version should be no slower than + * standard memcpy. + */ +SYM_FUNC_START(copy_mc_enhanced_fast_string) + movq %rdi, %rax + movq %rdx, %rcx +.L_copy: + rep movsb + /* Copy successful. Return zero */ + xorl %eax, %eax + ret +SYM_FUNC_END(copy_mc_enhanced_fast_string) + + .section .fixup, "ax" +.E_copy: + /* + * On fault %rcx is updated such that the copy instruction could + * optionally be restarted at the fault position, i.e. it + * contains 'bytes remaining'. A non-zero return indicates error + * to copy_mc_generic() users, or indicate short transfers to + * user-copy routines. + */ + movq %rcx, %rax + ret + + .previous + + _ASM_EXTABLE_FAULT(.L_copy, .E_copy) +#endif /* !CONFIG_UML */ diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 816f128a6d52..77b9b2a3b5c8 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -15,6 +15,7 @@ #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> +#include <asm/trapnr.h> .macro ALIGN_DESTINATION /* check for bad alignment of destination */ @@ -36,8 +37,8 @@ jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) + _ASM_EXTABLE_CPY(100b, 103b) + _ASM_EXTABLE_CPY(101b, 103b) .endm /* @@ -116,26 +117,26 @@ SYM_FUNC_START(copy_user_generic_unrolled) 60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ .previous - _ASM_EXTABLE_UA(1b, 30b) - _ASM_EXTABLE_UA(2b, 30b) - _ASM_EXTABLE_UA(3b, 30b) - _ASM_EXTABLE_UA(4b, 30b) - _ASM_EXTABLE_UA(5b, 30b) - _ASM_EXTABLE_UA(6b, 30b) - _ASM_EXTABLE_UA(7b, 30b) - _ASM_EXTABLE_UA(8b, 30b) - _ASM_EXTABLE_UA(9b, 30b) - _ASM_EXTABLE_UA(10b, 30b) - _ASM_EXTABLE_UA(11b, 30b) - _ASM_EXTABLE_UA(12b, 30b) - _ASM_EXTABLE_UA(13b, 30b) - _ASM_EXTABLE_UA(14b, 30b) - _ASM_EXTABLE_UA(15b, 30b) - _ASM_EXTABLE_UA(16b, 30b) - _ASM_EXTABLE_UA(18b, 40b) - _ASM_EXTABLE_UA(19b, 40b) - _ASM_EXTABLE_UA(21b, 50b) - _ASM_EXTABLE_UA(22b, 50b) + _ASM_EXTABLE_CPY(1b, 30b) + _ASM_EXTABLE_CPY(2b, 30b) + _ASM_EXTABLE_CPY(3b, 30b) + _ASM_EXTABLE_CPY(4b, 30b) + _ASM_EXTABLE_CPY(5b, 30b) + _ASM_EXTABLE_CPY(6b, 30b) + _ASM_EXTABLE_CPY(7b, 30b) + _ASM_EXTABLE_CPY(8b, 30b) + _ASM_EXTABLE_CPY(9b, 30b) + _ASM_EXTABLE_CPY(10b, 30b) + _ASM_EXTABLE_CPY(11b, 30b) + _ASM_EXTABLE_CPY(12b, 30b) + _ASM_EXTABLE_CPY(13b, 30b) + _ASM_EXTABLE_CPY(14b, 30b) + _ASM_EXTABLE_CPY(15b, 30b) + _ASM_EXTABLE_CPY(16b, 30b) + _ASM_EXTABLE_CPY(18b, 40b) + _ASM_EXTABLE_CPY(19b, 40b) + _ASM_EXTABLE_CPY(21b, 50b) + _ASM_EXTABLE_CPY(22b, 50b) SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -180,8 +181,8 @@ SYM_FUNC_START(copy_user_generic_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 11b) - _ASM_EXTABLE_UA(3b, 12b) + _ASM_EXTABLE_CPY(1b, 11b) + _ASM_EXTABLE_CPY(3b, 12b) SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) @@ -213,7 +214,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 12b) + _ASM_EXTABLE_CPY(1b, 12b) SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) @@ -221,6 +222,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened * * Input: * rdi destination @@ -232,12 +234,25 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) */ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) movl %edx,%ecx + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f 1: rep movsb 2: mov %ecx,%eax ASM_CLAC ret - _ASM_EXTABLE_UA(1b, 2b) + /* + * Return zero to pretend that this copy succeeded. This + * is counter-intuitive, but needed to prevent the code + * in lib/iov_iter.c from retrying and running back into + * the poison cache line again. The machine check handler + * will ensure that a SIGBUS is sent to the task. + */ +3: xorl %eax,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_CPY(1b, 2b) SYM_CODE_END(.Lcopy_user_handle_tail) /* @@ -366,27 +381,27 @@ SYM_FUNC_START(__copy_user_nocache) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) - _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy) SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 3394a8ff7fd0..1fbd8ee9642d 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -18,9 +18,6 @@ * rdi source * rsi destination * edx len (32bit) - * ecx sum (32bit) - * r8 src_err_ptr (int) - * r9 dst_err_ptr (int) * * Output * eax 64bit sum. undefined in case of exception. @@ -31,44 +28,32 @@ .macro source 10: - _ASM_EXTABLE_UA(10b, .Lbad_source) + _ASM_EXTABLE_UA(10b, .Lfault) .endm .macro dest 20: - _ASM_EXTABLE_UA(20b, .Lbad_dest) + _ASM_EXTABLE_UA(20b, .Lfault) .endm - /* - * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a - * potentially unmapped kernel address. - */ - .macro ignore L=.Lignore -30: - _ASM_EXTABLE(30b, \L) - .endm - - SYM_FUNC_START(csum_partial_copy_generic) - cmpl $3*64, %edx - jle .Lignore - -.Lignore: - subq $7*8, %rsp - movq %rbx, 2*8(%rsp) - movq %r12, 3*8(%rsp) - movq %r14, 4*8(%rsp) - movq %r13, 5*8(%rsp) - movq %r15, 6*8(%rsp) + subq $5*8, %rsp + movq %rbx, 0*8(%rsp) + movq %r12, 1*8(%rsp) + movq %r14, 2*8(%rsp) + movq %r13, 3*8(%rsp) + movq %r15, 4*8(%rsp) - movq %r8, (%rsp) - movq %r9, 1*8(%rsp) - - movl %ecx, %eax + movl $-1, %eax + xorl %r9d, %r9d movl %edx, %ecx + cmpl $8, %ecx + jb .Lshort - xorl %r9d, %r9d - movq %rcx, %r12 + testb $7, %sil + jne .Lunaligned +.Laligned: + movl %ecx, %r12d shrq $6, %r12 jz .Lhandle_tail /* < 64 */ @@ -99,7 +84,12 @@ SYM_FUNC_START(csum_partial_copy_generic) source movq 56(%rdi), %r13 - ignore 2f +30: + /* + * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a + * potentially unmapped kernel address. + */ + _ASM_EXTABLE(30b, 2f) prefetcht0 5*64(%rdi) 2: adcq %rbx, %rax @@ -131,8 +121,6 @@ SYM_FUNC_START(csum_partial_copy_generic) dest movq %r13, 56(%rsi) -3: - leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi @@ -142,8 +130,8 @@ SYM_FUNC_START(csum_partial_copy_generic) /* do last up to 56 bytes */ .Lhandle_tail: - /* ecx: count */ - movl %ecx, %r10d + /* ecx: count, rcx.63: the end result needs to be rol8 */ + movq %rcx, %r10 andl $63, %ecx shrl $3, %ecx jz .Lfold @@ -172,6 +160,7 @@ SYM_FUNC_START(csum_partial_copy_generic) .Lhandle_7: movl %r10d, %ecx andl $7, %ecx +.L1: /* .Lshort rejoins the common path here */ shrl $1, %ecx jz .Lhandle_1 movl $2, %edx @@ -203,26 +192,65 @@ SYM_FUNC_START(csum_partial_copy_generic) adcl %r9d, %eax /* carry */ .Lende: - movq 2*8(%rsp), %rbx - movq 3*8(%rsp), %r12 - movq 4*8(%rsp), %r14 - movq 5*8(%rsp), %r13 - movq 6*8(%rsp), %r15 - addq $7*8, %rsp + testq %r10, %r10 + js .Lwas_odd +.Lout: + movq 0*8(%rsp), %rbx + movq 1*8(%rsp), %r12 + movq 2*8(%rsp), %r14 + movq 3*8(%rsp), %r13 + movq 4*8(%rsp), %r15 + addq $5*8, %rsp ret +.Lshort: + movl %ecx, %r10d + jmp .L1 +.Lunaligned: + xorl %ebx, %ebx + testb $1, %sil + jne .Lodd +1: testb $2, %sil + je 2f + source + movw (%rdi), %bx + dest + movw %bx, (%rsi) + leaq 2(%rdi), %rdi + subq $2, %rcx + leaq 2(%rsi), %rsi + addq %rbx, %rax +2: testb $4, %sil + je .Laligned + source + movl (%rdi), %ebx + dest + movl %ebx, (%rsi) + leaq 4(%rdi), %rdi + subq $4, %rcx + leaq 4(%rsi), %rsi + addq %rbx, %rax + jmp .Laligned + +.Lodd: + source + movb (%rdi), %bl + dest + movb %bl, (%rsi) + leaq 1(%rdi), %rdi + leaq 1(%rsi), %rsi + /* decrement, set MSB */ + leaq -1(%rcx, %rcx), %rcx + rorq $1, %rcx + shll $8, %ebx + addq %rbx, %rax + jmp 1b + +.Lwas_odd: + roll $8, %eax + jmp .Lout - /* Exception handlers. Very simple, zeroing is done in the wrappers */ -.Lbad_source: - movq (%rsp), %rax - testq %rax, %rax - jz .Lende - movl $-EFAULT, (%rax) - jmp .Lende - -.Lbad_dest: - movq 8(%rsp), %rax - testq %rax, %rax - jz .Lende - movl $-EFAULT, (%rax) - jmp .Lende + /* Exception: just return 0 */ +.Lfault: + xorl %eax, %eax + jmp .Lout SYM_FUNC_END(csum_partial_copy_generic) diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index ee63d7576fd2..189344924a2b 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -21,52 +21,16 @@ * src and dst are best aligned to 64bits. */ __wsum -csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum isum, int *errp) +csum_and_copy_from_user(const void __user *src, void *dst, int len) { - might_sleep(); - *errp = 0; + __wsum sum; + might_sleep(); if (!user_access_begin(src, len)) - goto out_err; - - /* - * Why 6, not 7? To handle odd addresses aligned we - * would need to do considerable complications to fix the - * checksum which is defined as an 16bit accumulator. The - * fix alignment code is primarily for performance - * compatibility with 32bit and that will handle odd - * addresses slowly too. - */ - if (unlikely((unsigned long)src & 6)) { - while (((unsigned long)src & 6) && len >= 2) { - __u16 val16; - - unsafe_get_user(val16, (const __u16 __user *)src, out); - - *(__u16 *)dst = val16; - isum = (__force __wsum)add32_with_carry( - (__force unsigned)isum, val16); - src += 2; - dst += 2; - len -= 2; - } - } - isum = csum_partial_copy_generic((__force const void *)src, - dst, len, isum, errp, NULL); - user_access_end(); - if (unlikely(*errp)) - goto out_err; - - return isum; - -out: + return 0; + sum = csum_partial_copy_generic((__force const void *)src, dst, len); user_access_end(); -out_err: - *errp = -EFAULT; - memset(dst, 0, len); - - return isum; + return sum; } EXPORT_SYMBOL(csum_and_copy_from_user); @@ -82,40 +46,16 @@ EXPORT_SYMBOL(csum_and_copy_from_user); * src and dst are best aligned to 64bits. */ __wsum -csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum isum, int *errp) +csum_and_copy_to_user(const void *src, void __user *dst, int len) { - __wsum ret; + __wsum sum; might_sleep(); - - if (!user_access_begin(dst, len)) { - *errp = -EFAULT; + if (!user_access_begin(dst, len)) return 0; - } - - if (unlikely((unsigned long)dst & 6)) { - while (((unsigned long)dst & 6) && len >= 2) { - __u16 val16 = *(__u16 *)src; - - isum = (__force __wsum)add32_with_carry( - (__force unsigned)isum, val16); - unsafe_put_user(val16, (__u16 __user *)dst, out); - src += 2; - dst += 2; - len -= 2; - } - } - - *errp = 0; - ret = csum_partial_copy_generic(src, (void __force *)dst, - len, isum, NULL, errp); - user_access_end(); - return ret; -out: + sum = csum_partial_copy_generic(src, (void __force *)dst, len); user_access_end(); - *errp = -EFAULT; - return isum; + return sum; } EXPORT_SYMBOL(csum_and_copy_to_user); @@ -129,9 +69,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user); * Returns an 32bit unfolded checksum of the buffer. */ __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c8a85b512796..fa1bc2104b32 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,10 +35,21 @@ #include <asm/smap.h> #include <asm/export.h> +#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC + +#ifdef CONFIG_X86_5LEVEL +#define LOAD_TASK_SIZE_MINUS_N(n) \ + ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \ + __stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57 +#else +#define LOAD_TASK_SIZE_MINUS_N(n) \ + mov $(TASK_SIZE_MAX - (n)),%_ASM_DX +#endif + .text SYM_FUNC_START(__get_user_1) - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(0) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX @@ -51,15 +62,13 @@ SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) SYM_FUNC_START(__get_user_2) - add $1,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(1) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -2: movzwl -1(%_ASM_AX),%edx +2: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret @@ -67,15 +76,13 @@ SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) SYM_FUNC_START(__get_user_4) - add $3,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(3) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -3: movl -3(%_ASM_AX),%edx +3: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret @@ -84,29 +91,25 @@ EXPORT_SYMBOL(__get_user_4) SYM_FUNC_START(__get_user_8) #ifdef CONFIG_X86_64 - add $7,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(7) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -4: movq -7(%_ASM_AX),%rdx +4: movq (%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC ret #else - add $7,%_ASM_AX - jc bad_get_user_8 - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(7) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user_8 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -4: movl -7(%_ASM_AX),%edx -5: movl -3(%_ASM_AX),%ecx +4: movl (%_ASM_AX),%edx +5: movl 4(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC ret @@ -114,6 +117,52 @@ SYM_FUNC_START(__get_user_8) SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) +/* .. and the same for __get_user, just without the range checks */ +SYM_FUNC_START(__get_user_nocheck_1) + ASM_STAC + ASM_BARRIER_NOSPEC +6: movzbl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_1) +EXPORT_SYMBOL(__get_user_nocheck_1) + +SYM_FUNC_START(__get_user_nocheck_2) + ASM_STAC + ASM_BARRIER_NOSPEC +7: movzwl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_2) +EXPORT_SYMBOL(__get_user_nocheck_2) + +SYM_FUNC_START(__get_user_nocheck_4) + ASM_STAC + ASM_BARRIER_NOSPEC +8: movl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_4) +EXPORT_SYMBOL(__get_user_nocheck_4) + +SYM_FUNC_START(__get_user_nocheck_8) + ASM_STAC + ASM_BARRIER_NOSPEC +#ifdef CONFIG_X86_64 +9: movq (%_ASM_AX),%rdx +#else +9: movl (%_ASM_AX),%edx +10: movl 4(%_ASM_AX),%ecx +#endif + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_8) +EXPORT_SYMBOL(__get_user_nocheck_8) + SYM_CODE_START_LOCAL(.Lbad_get_user_clac) ASM_CLAC @@ -134,6 +183,7 @@ bad_get_user_8: SYM_CODE_END(.Lbad_get_user_8_clac) #endif +/* get_user */ _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac) @@ -143,3 +193,14 @@ SYM_CODE_END(.Lbad_get_user_8_clac) _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac) _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac) #endif + +/* __get_user */ + _ASM_EXTABLE_UA(6b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(7b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(8b, .Lbad_get_user_clac) +#ifdef CONFIG_X86_64 + _ASM_EXTABLE_UA(9b, .Lbad_get_user_clac) +#else + _ASM_EXTABLE_UA(9b, .Lbad_get_user_8_clac) + _ASM_EXTABLE_UA(10b, .Lbad_get_user_8_clac) +#endif diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 5e69603ff63f..58f7fb95c7f4 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -20,6 +20,7 @@ enum reg_type { REG_TYPE_RM = 0, + REG_TYPE_REG, REG_TYPE_INDEX, REG_TYPE_BASE, }; @@ -53,6 +54,30 @@ static bool is_string_insn(struct insn *insn) } /** + * insn_has_rep_prefix() - Determine if instruction has a REP prefix + * @insn: Instruction containing the prefix to inspect + * + * Returns: + * + * true if the instruction has a REP prefix, false if not. + */ +bool insn_has_rep_prefix(struct insn *insn) +{ + int i; + + insn_get_prefixes(insn); + + for (i = 0; i < insn->prefixes.nbytes; i++) { + insn_byte_t p = insn->prefixes.bytes[i]; + + if (p == 0xf2 || p == 0xf3) + return true; + } + + return false; +} + +/** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * @@ -439,6 +464,13 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, regno += 8; break; + case REG_TYPE_REG: + regno = X86_MODRM_REG(insn->modrm.value); + + if (X86_REX_R(insn->rex_prefix.value)) + regno += 8; + break; + case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) @@ -808,6 +840,21 @@ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) } /** + * insn_get_modrm_reg_off() - Obtain register in reg part of the ModRM byte + * @insn: Instruction containing the ModRM byte + * @regs: Register values as seen when entering kernel mode + * + * Returns: + * + * The register indicated by the reg part of the ModRM byte. The + * register is obtained as an offset from the base of pt_regs. + */ +int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs) +{ + return get_reg_offset(insn, regs, REG_TYPE_REG); +} + +/** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode @@ -1367,3 +1414,86 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) return (void __user *)-1L; } } + +/** + * insn_fetch_from_user() - Copy instruction bytes from user-space memory + * @regs: Structure with register values as seen when entering kernel mode + * @buf: Array to store the fetched instruction + * + * Gets the linear address of the instruction and copies the instruction bytes + * to the buf. + * + * Returns: + * + * Number of instruction bytes copied. + * + * 0 if nothing was copied. + */ +int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]) +{ + unsigned long seg_base = 0; + int not_copied; + + /* + * If not in user-space long mode, a custom code segment could be in + * use. This is true in protected mode (if the process defined a local + * descriptor table), or virtual-8086 mode. In most of the cases + * seg_base will be zero as in USER_CS. + */ + if (!user_64bit_mode(regs)) { + seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS); + if (seg_base == -1L) + return 0; + } + + + not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip), + MAX_INSN_SIZE); + + return MAX_INSN_SIZE - not_copied; +} + +/** + * insn_decode() - Decode an instruction + * @insn: Structure to store decoded instruction + * @regs: Structure with register values as seen when entering kernel mode + * @buf: Buffer containing the instruction bytes + * @buf_size: Number of instruction bytes available in buf + * + * Decodes the instruction provided in buf and stores the decoding results in + * insn. Also determines the correct address and operand sizes. + * + * Returns: + * + * True if instruction was decoded, False otherwise. + */ +bool insn_decode(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size) +{ + int seg_defs; + + insn_init(insn, buf, buf_size, user_64bit_mode(regs)); + + /* + * Override the default operand and address sizes with what is specified + * in the code segment descriptor. The instruction decoder only sets + * the address size it to either 4 or 8 address bytes and does nothing + * for the operand bytes. This OK for most of the cases, but we could + * have special cases where, for instance, a 16-bit code segment + * descriptor is used. + * If there is an address override prefix, the instruction decoder + * correctly updates these values, even for 16-bit defaults. + */ + seg_defs = insn_get_code_seg_params(regs); + if (seg_defs == -EINVAL) + return false; + + insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs); + insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs); + + insn_get_length(insn); + if (buf_size < insn->length) + return false; + + return true; +} diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index bbcc05bcefad..037faac46b0c 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,7 +4,6 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> -#include <asm/mcsafe_test.h> #include <asm/alternative-asm.h> #include <asm/export.h> @@ -187,117 +186,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig) SYM_FUNC_END(memcpy_orig) .popsection - -#ifndef CONFIG_UML - -MCSAFE_TEST_CTL - -/* - * __memcpy_mcsafe - memory copy with machine check exception handling - * Note that we only catch machine checks when reading the source addresses. - * Writes to target are posted and don't generate machine checks. - */ -SYM_FUNC_START(__memcpy_mcsafe) - cmpl $8, %edx - /* Less than 8 bytes? Go to byte copy loop */ - jb .L_no_whole_words - - /* Check for bad alignment of source */ - testl $7, %esi - /* Already aligned */ - jz .L_8byte_aligned - - /* Copy one byte at a time until source is 8-byte aligned */ - movl %esi, %ecx - andl $7, %ecx - subl $8, %ecx - negl %ecx - subl %ecx, %edx -.L_read_leading_bytes: - movb (%rsi), %al - MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes - MCSAFE_TEST_DST %rdi 1 .E_leading_bytes -.L_write_leading_bytes: - movb %al, (%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz .L_read_leading_bytes - -.L_8byte_aligned: - movl %edx, %ecx - andl $7, %edx - shrl $3, %ecx - jz .L_no_whole_words - -.L_read_words: - movq (%rsi), %r8 - MCSAFE_TEST_SRC %rsi 8 .E_read_words - MCSAFE_TEST_DST %rdi 8 .E_write_words -.L_write_words: - movq %r8, (%rdi) - addq $8, %rsi - addq $8, %rdi - decl %ecx - jnz .L_read_words - - /* Any trailing bytes? */ -.L_no_whole_words: - andl %edx, %edx - jz .L_done_memcpy_trap - - /* Copy trailing bytes */ - movl %edx, %ecx -.L_read_trailing_bytes: - movb (%rsi), %al - MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes - MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes -.L_write_trailing_bytes: - movb %al, (%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz .L_read_trailing_bytes - - /* Copy successful. Return zero */ -.L_done_memcpy_trap: - xorl %eax, %eax -.L_done: - ret -SYM_FUNC_END(__memcpy_mcsafe) -EXPORT_SYMBOL_GPL(__memcpy_mcsafe) - - .section .fixup, "ax" - /* - * Return number of bytes not copied for any failure. Note that - * there is no "tail" handling since the source buffer is 8-byte - * aligned and poison is cacheline aligned. - */ -.E_read_words: - shll $3, %ecx -.E_leading_bytes: - addl %edx, %ecx -.E_trailing_bytes: - mov %ecx, %eax - jmp .L_done - - /* - * For write fault handling, given the destination is unaligned, - * we handle faults on multi-byte writes with a byte-by-byte - * copy up to the write-protected page. - */ -.E_write_words: - shll $3, %ecx - addl %edx, %ecx - movl %ecx, %edx - jmp mcsafe_handle_tail - - .previous - - _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) - _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) - _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) - _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) - _ASM_EXTABLE(.L_write_words, .E_write_words) - _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) -#endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 7c7c92db8497..0ea344c5ea43 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -25,76 +25,87 @@ * Inputs: %eax[:%edx] contains the data * %ecx contains the address * - * Outputs: %eax is error code (0 or -EFAULT) + * Outputs: %ecx is error code (0 or -EFAULT) + * + * Clobbers: %ebx needed for task pointer * * These functions should not modify any other registers, * as they get called from within inline assembly. */ -#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX +#ifdef CONFIG_X86_5LEVEL +#define LOAD_TASK_SIZE_MINUS_N(n) \ + ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \ + __stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57 +#else +#define LOAD_TASK_SIZE_MINUS_N(n) \ + mov $(TASK_SIZE_MAX - (n)),%_ASM_BX +#endif .text SYM_FUNC_START(__put_user_1) - ENTER - cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX + LOAD_TASK_SIZE_MINUS_N(0) + cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) ASM_STAC 1: movb %al,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) +EXPORT_SYMBOL(__put_user_nocheck_1) SYM_FUNC_START(__put_user_2) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $1,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(1) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) ASM_STAC 2: movw %ax,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) +EXPORT_SYMBOL(__put_user_nocheck_2) SYM_FUNC_START(__put_user_4) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $3,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(3) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) ASM_STAC 3: movl %eax,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) +EXPORT_SYMBOL(__put_user_nocheck_4) SYM_FUNC_START(__put_user_8) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $7,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 5: movl %edx,4(%_ASM_CX) #endif - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) +EXPORT_SYMBOL(__put_user_nocheck_8) SYM_CODE_START_LOCAL(.Lbad_put_user_clac) ASM_CLAC .Lbad_put_user: - movl $-EFAULT,%eax + movl $-EFAULT,%ecx RET SYM_CODE_END(.Lbad_put_user_clac) diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 1847e993ac63..508c81e97ab1 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -56,27 +56,6 @@ unsigned long clear_user(void __user *to, unsigned long n) } EXPORT_SYMBOL(clear_user); -/* - * Similar to copy_user_handle_tail, probe for the write fault point, - * but reuse __memcpy_mcsafe in case a new read error is encountered. - * clac() is handled in _copy_to_iter_mcsafe(). - */ -__visible notrace unsigned long -mcsafe_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++, from++) { - /* - * Call the assembly routine back directly since - * memcpy_mcsafe() may silently fallback to memcpy. - */ - unsigned long rem = __memcpy_mcsafe(to, from, 1); - - if (rem) - break; - } - return len; -} - #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE /** * clean_cache_range - write back a cache range with CLWB |