diff options
Diffstat (limited to 'arch/x86/um')
-rw-r--r-- | arch/x86/um/asm/barrier.h | 6 | ||||
-rw-r--r-- | arch/x86/um/asm/module.h | 24 | ||||
-rw-r--r-- | arch/x86/um/os-Linux/mcontext.c | 15 | ||||
-rw-r--r-- | arch/x86/um/os-Linux/registers.c | 21 | ||||
-rw-r--r-- | arch/x86/um/shared/sysdep/faultinfo_32.h | 12 | ||||
-rw-r--r-- | arch/x86/um/shared/sysdep/faultinfo_64.h | 12 | ||||
-rw-r--r-- | arch/x86/um/signal.c | 13 | ||||
-rw-r--r-- | arch/x86/um/vdso/vma.c | 17 |
8 files changed, 71 insertions, 49 deletions
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 4da336965698..b51aefd6ec2b 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -12,9 +12,9 @@ */ #ifdef CONFIG_X86_32 -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +#define mb() alternative("lock addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) +#define rmb() alternative("lock addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else /* CONFIG_X86_32 */ diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h deleted file mode 100644 index a3b061d66082..000000000000 --- a/arch/x86/um/asm/module.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __UM_MODULE_H -#define __UM_MODULE_H - -/* UML is simple */ -struct mod_arch_specific -{ -}; - -#ifdef CONFIG_X86_32 - -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr - -#else - -#define Elf_Shdr Elf64_Shdr -#define Elf_Sym Elf64_Sym -#define Elf_Ehdr Elf64_Ehdr - -#endif - -#endif diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c index e80ab7d28117..37decaa74761 100644 --- a/arch/x86/um/os-Linux/mcontext.c +++ b/arch/x86/um/os-Linux/mcontext.c @@ -4,6 +4,7 @@ #include <asm/ptrace.h> #include <sysdep/ptrace.h> #include <sysdep/mcontext.h> +#include <arch.h> void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) { @@ -27,7 +28,17 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) COPY(RIP); COPY2(EFLAGS, EFL); COPY2(CS, CSGSFS); - regs->gp[CS / sizeof(unsigned long)] &= 0xffff; - regs->gp[CS / sizeof(unsigned long)] |= 3; + regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48; +#endif +} + +void mc_set_rip(void *_mc, void *target) +{ + mcontext_t *mc = _mc; + +#ifdef __i386__ + mc->gregs[REG_EIP] = (unsigned long)target; +#else + mc->gregs[REG_RIP] = (unsigned long)target; #endif } diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c index 76eaeb93928c..eb1cdadc8a61 100644 --- a/arch/x86/um/os-Linux/registers.c +++ b/arch/x86/um/os-Linux/registers.c @@ -18,6 +18,7 @@ #include <registers.h> #include <sys/mman.h> +static unsigned long ptrace_regset; unsigned long host_fp_size; int get_fp_registers(int pid, unsigned long *regs) @@ -27,7 +28,7 @@ int get_fp_registers(int pid, unsigned long *regs) .iov_len = host_fp_size, }; - if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0) + if (ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov) < 0) return -errno; return 0; } @@ -39,7 +40,7 @@ int put_fp_registers(int pid, unsigned long *regs) .iov_len = host_fp_size, }; - if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0) + if (ptrace(PTRACE_SETREGSET, pid, ptrace_regset, &iov) < 0) return -errno; return 0; } @@ -58,9 +59,23 @@ int arch_init_registers(int pid) return -ENOMEM; /* GDB has x86_xsave_length, which uses x86_cpuid_count */ - ret = ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov); + ptrace_regset = NT_X86_XSTATE; + ret = ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov); if (ret) ret = -errno; + + if (ret == -ENODEV) { +#ifdef CONFIG_X86_32 + ptrace_regset = NT_PRXFPREG; +#else + ptrace_regset = NT_PRFPREG; +#endif + iov.iov_len = 2 * 1024 * 1024; + ret = ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov); + if (ret) + ret = -errno; + } + munmap(iov.iov_base, 2 * 1024 * 1024); host_fp_size = iov.iov_len; diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h index b6f2437ec29c..ab5c8e47049c 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_32.h +++ b/arch/x86/um/shared/sysdep/faultinfo_32.h @@ -29,4 +29,16 @@ struct faultinfo { #define PTRACE_FULL_FAULTINFO 0 +#define ___backtrack_faulted(_faulted) \ + asm volatile ( \ + "mov $0, %0\n" \ + "movl $__get_kernel_nofault_faulted_%=,%1\n" \ + "jmp _end_%=\n" \ + "__get_kernel_nofault_faulted_%=:\n" \ + "mov $1, %0;" \ + "_end_%=:" \ + : "=r" (_faulted), \ + "=m" (current->thread.segv_continue) :: \ + ) + #endif diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h index ee88f88974ea..26fb4835d3e9 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_64.h +++ b/arch/x86/um/shared/sysdep/faultinfo_64.h @@ -29,4 +29,16 @@ struct faultinfo { #define PTRACE_FULL_FAULTINFO 1 +#define ___backtrack_faulted(_faulted) \ + asm volatile ( \ + "mov $0, %0\n" \ + "movq $__get_kernel_nofault_faulted_%=,%1\n" \ + "jmp _end_%=\n" \ + "__get_kernel_nofault_faulted_%=:\n" \ + "mov $1, %0;" \ + "_end_%=:" \ + : "=r" (_faulted), \ + "=m" (current->thread.segv_continue) :: \ + ) + #endif diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 75087e85b6fd..2934e170b0fe 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -187,7 +187,12 @@ static int copy_sc_to_user(struct sigcontext __user *to, * Put magic/size values for userspace. We do not bother to verify them * later on, however, userspace needs them should it try to read the * XSTATE data. And ptrace does not fill in these parts. + * + * Skip this if we do not have an XSTATE frame. */ + if (host_fp_size <= sizeof(to_fp64->fpstate)) + return 0; + BUILD_BUG_ON(sizeof(int) != FP_XSTATE_MAGIC2_SIZE); #ifdef CONFIG_X86_32 __put_user(offsetof(struct _fpstate_32, _fxsr_env) + @@ -367,11 +372,13 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, int err = 0, sig = ksig->sig; unsigned long fp_to; - frame = (struct rt_sigframe __user *) - round_down(stack_top - sizeof(struct rt_sigframe), 16); + frame = (void __user *)stack_top - sizeof(struct rt_sigframe); /* Add required space for math frame */ - frame = (struct rt_sigframe __user *)((unsigned long)frame - math_size); + frame = (void __user *)((unsigned long)frame - math_size); + + /* ABI requires 16 byte boundary alignment */ + frame = (void __user *)round_down((unsigned long)frame, 16); /* Subtract 128 for a red zone and 8 for proper alignment */ frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c index f238f7b33cdd..dc8dfb2abd80 100644 --- a/arch/x86/um/vdso/vma.c +++ b/arch/x86/um/vdso/vma.c @@ -12,33 +12,22 @@ static unsigned int __read_mostly vdso_enabled = 1; unsigned long um_vdso_addr; +static struct page *um_vdso; extern unsigned long task_size; extern char vdso_start[], vdso_end[]; -static struct page **vdsop; - static int __init init_vdso(void) { - struct page *um_vdso; - BUG_ON(vdso_end - vdso_start > PAGE_SIZE); um_vdso_addr = task_size - PAGE_SIZE; - vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL); - if (!vdsop) - goto oom; - um_vdso = alloc_page(GFP_KERNEL); - if (!um_vdso) { - kfree(vdsop); - + if (!um_vdso) goto oom; - } copy_page(page_address(um_vdso), vdso_start); - *vdsop = um_vdso; return 0; @@ -56,6 +45,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; static struct vm_special_mapping vdso_mapping = { .name = "[vdso]", + .pages = &um_vdso, }; if (!vdso_enabled) @@ -64,7 +54,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (mmap_write_lock_killable(mm)) return -EINTR; - vdso_mapping.pages = vdsop; vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |