diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-09-11 13:12:11 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-09-11 13:12:11 -0400 |
commit | 1b67fd086dd7be076f190dfe4b52403d0cf58375 (patch) | |
tree | b7ea623c62bd8b1a1310c5e9d24dbf155a9d04aa /arch | |
parent | b5331379bc62611d1026173a09c73573384201d9 (diff) | |
parent | 7b75cd5128421c673153efb1236705696a1a9812 (diff) | |
download | lwn-1b67fd086dd7be076f190dfe4b52403d0cf58375.tar.gz lwn-1b67fd086dd7be076f190dfe4b52403d0cf58375.zip |
Merge tag 'kvmarm-fixes-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for Linux 5.9, take #1
- Multiple stolen time fixes, with a new capability to match x86
- Fix for hugetlbfs mappings when PUD and PMD are the same level
- Fix for hugetlbfs mappings when PTE mappings are enforced
(dirty logging, for example)
- Fix tracing output of 64bit values
Diffstat (limited to 'arch')
329 files changed, 1498 insertions, 3201 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index a1124481d910..af14a567b493 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -481,9 +481,6 @@ config HAVE_STACKPROTECTOR An arch should select this symbol if: - it has implemented a stack canary (e.g. __stack_chk_guard) -config CC_HAS_STACKPROTECTOR_NONE - def_bool $(cc-option,-fno-stack-protector) - config STACKPROTECTOR bool "Stack Protector buffer overflow detection" depends on HAVE_STACKPROTECTOR @@ -975,6 +972,9 @@ config HAVE_SPARSE_SYSCALL_NR entries at 4000, 5000 and 6000 locations. This option turns on syscall related optimizations for a given architecture. +config ARCH_HAS_VDSO_DATA + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h index 0a07055bc0fe..2d9726fc02ef 100644 --- a/arch/alpha/include/asm/core_apecs.h +++ b/arch/alpha/include/asm/core_apecs.h @@ -384,7 +384,7 @@ struct el_apecs_procdata } \ } while (0) -__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -420,7 +420,7 @@ __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -456,7 +456,7 @@ __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < APECS_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h index c706a7f2b061..cb22991f6761 100644 --- a/arch/alpha/include/asm/core_cia.h +++ b/arch/alpha/include/asm/core_cia.h @@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck { #define vuip volatile unsigned int __force * #define vulp volatile unsigned long __force * -__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < CIA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h index 84d5e5b84f4f..ec86314418cb 100644 --- a/arch/alpha/include/asm/core_lca.h +++ b/arch/alpha/include/asm/core_lca.h @@ -230,7 +230,7 @@ union el_lca { } while (0) -__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -266,7 +266,7 @@ __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -302,7 +302,7 @@ __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < LCA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h index cc6fd92d5fa9..b266e02e284b 100644 --- a/arch/alpha/include/asm/core_marvel.h +++ b/arch/alpha/include/asm/core_marvel.h @@ -332,10 +332,10 @@ struct io7 { #define vucp volatile unsigned char __force * #define vusp volatile unsigned short __force * -extern unsigned int marvel_ioread8(void __iomem *); +extern unsigned int marvel_ioread8(const void __iomem *); extern void marvel_iowrite8(u8 b, void __iomem *); -__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) +__EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr) { return __kernel_ldwu(*(vusp)addr); } diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index b30dc128210d..cb24d1bd6141 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -267,7 +267,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr) return (addr & 0x80000000UL) == 0; } -__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -291,7 +291,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x00) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -315,7 +315,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x08) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr; diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index e0b33d09e93a..12bb7addc789 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -572,7 +572,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) it doesn't make sense to merge the pio and mmio routines. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr) \ { \ if (t2_is_mmio(xaddr)) \ return t2_read##OS(xaddr); \ diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index a4d0c19f1e79..1f6a909d1fa5 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -150,9 +150,9 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ alpha_mv.mv_##NAME(b, addr); \ } -REMAP1(unsigned int, ioread8, /**/) -REMAP1(unsigned int, ioread16, /**/) -REMAP1(unsigned int, ioread32, /**/) +REMAP1(unsigned int, ioread8, const) +REMAP1(unsigned int, ioread16, const) +REMAP1(unsigned int, ioread32, const) REMAP1(u8, readb, const volatile) REMAP1(u16, readw, const volatile) REMAP1(u32, readl, const volatile) @@ -307,7 +307,7 @@ static inline int __is_mmio(const volatile void __iomem *addr) */ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) -extern inline unsigned int ioread8(void __iomem *addr) +extern inline unsigned int ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -316,7 +316,7 @@ extern inline unsigned int ioread8(void __iomem *addr) return ret; } -extern inline unsigned int ioread16(void __iomem *addr) +extern inline unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -359,7 +359,7 @@ extern inline void outw(u16 b, unsigned long port) #endif #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) -extern inline unsigned int ioread32(void __iomem *addr) +extern inline unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); @@ -489,10 +489,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif -#define ioread16be(p) be16_to_cpu(ioread16(p)) -#define ioread32be(p) be32_to_cpu(ioread32(p)) -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) +#define ioread16be(p) swab16(ioread16(p)) +#define ioread32be(p) swab32(ioread32(p)) +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) #define inb_p inb #define inw_p inw diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h index ba3d8f0cfe0c..a1a29cbe02fa 100644 --- a/arch/alpha/include/asm/io_trivial.h +++ b/arch/alpha/include/asm/io_trivial.h @@ -7,15 +7,15 @@ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a) { - return __kernel_ldbu(*(volatile u8 __force *)a); + return __kernel_ldbu(*(const volatile u8 __force *)a); } __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a) { - return __kernel_ldwu(*(volatile u16 __force *)a); + return __kernel_ldwu(*(const volatile u16 __force *)a); } __EXTERN_INLINE void @@ -33,9 +33,9 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a) { - return *(volatile u32 __force *)a; + return *(const volatile u32 __force *)a; } __EXTERN_INLINE void @@ -73,14 +73,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) __EXTERN_INLINE u8 IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread8)(addr); } __EXTERN_INLINE u16 IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread16)(addr); } diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h index 436dc905b6ad..916895155a88 100644 --- a/arch/alpha/include/asm/jensen.h +++ b/arch/alpha/include/asm/jensen.h @@ -305,7 +305,7 @@ __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) that it doesn't make sense to merge them. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr) \ { \ if (jensen_is_mmio(xaddr)) \ return jensen_read##OS(xaddr - 0x100000000ul); \ diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index a6b73c6d10ee..a4e96e2bec74 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -46,9 +46,9 @@ struct alpha_machine_vector void (*mv_pci_tbi)(struct pci_controller *hose, dma_addr_t start, dma_addr_t end); - unsigned int (*mv_ioread8)(void __iomem *); - unsigned int (*mv_ioread16)(void __iomem *); - unsigned int (*mv_ioread32)(void __iomem *); + unsigned int (*mv_ioread8)(const void __iomem *); + unsigned int (*mv_ioread16)(const void __iomem *); + unsigned int (*mv_ioread32)(const void __iomem *); void (*mv_iowrite8)(u8, void __iomem *); void (*mv_iowrite16)(u16, void __iomem *); diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 1fe2b56cb861..1b6f25efa247 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,7 +20,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * Is a address valid? This does a straightforward calculation rather diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 4c80d992a659..4485b77f8658 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -806,7 +806,7 @@ void __iomem *marvel_ioportmap (unsigned long addr) } unsigned int -marvel_ioread8(void __iomem *xaddr) +marvel_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (__marvel_is_port_kbd(addr)) diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index 938de13adfbf..838586abb1e0 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c @@ -14,7 +14,7 @@ "generic", which bumps through the machine vector. */ unsigned int -ioread8(void __iomem *addr) +ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -23,7 +23,7 @@ ioread8(void __iomem *addr) return ret; } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -32,7 +32,7 @@ unsigned int ioread16(void __iomem *addr) return ret; } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); @@ -257,7 +257,7 @@ EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ -void ioread8_rep(void __iomem *port, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) @@ -300,7 +300,7 @@ EXPORT_SYMBOL(insb); * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ -void ioread16_rep(void __iomem *port, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) @@ -340,7 +340,7 @@ EXPORT_SYMBOL(insw); * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ -void ioread32_rep(void __iomem *port, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index a28fb211881d..ec8bed9e7b75 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -249,7 +249,7 @@ 316 common mlockall sys_mlockall 317 common munlockall sys_munlockall 318 common sysinfo sys_sysinfo -319 common _sysctl sys_sysctl +319 common _sysctl sys_ni_syscall # 320 was sys_idle 321 common oldumount sys_oldumount 322 common swapon sys_swapon diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index c2303a8c2b9f..09172f017efc 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); @@ -116,6 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, #endif if (user_mode(regs)) flags |= FAULT_FLAG_USER; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); vma = find_vma(mm, address); @@ -148,7 +150,7 @@ retry: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -164,10 +166,6 @@ retry: } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h index 6a2a5be5026d..871f8ab11bfd 100644 --- a/arch/arc/include/asm/segment.h +++ b/arch/arc/include/asm/segment.h @@ -14,8 +14,7 @@ typedef unsigned long mm_segment_t; #define KERNEL_DS MAKE_MM_SEG(0) #define USER_DS MAKE_MM_SEG(TASK_SIZE) - -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #endif /* __ASSEMBLY__ */ #endif /* __ASMARC_SEGMENT_H */ diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 105420c23c8b..efeba1fe7252 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -91,7 +91,7 @@ fault: goto fail; mmap_read_lock(current->mm); - ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, + ret = fixup_user_fault(current->mm, (unsigned long) uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(current->mm); @@ -296,11 +296,6 @@ void flush_thread(void) { } -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) -{ - return 0; -} - int elf_check_arch(const struct elf32_hdr *x) { unsigned int eflags; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 7287c793d1c9..f5657cb68e4f 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -105,6 +105,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) if (write) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); @@ -130,7 +131,7 @@ retry: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -155,22 +156,9 @@ bad_area: * Major/minor page fault accounting * (in case of retry we only land here once) */ - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - if (likely(!(fault & VM_FAULT_ERROR))) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - + if (likely(!(fault & VM_FAULT_ERROR))) /* Normal return path: fault Handled Gracefully */ return; - } if (!user_mode(regs)) goto no_context; diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 00602a6fba04..b1147b7f2c8d 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -84,9 +84,8 @@ endif # -fstack-protector-strong triggers protection checks in this code, # but it is being used too early to link to meaningful stack_chk logic. -nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector $(foreach o, $(libfdt_objs) atags_to_fdt.o, \ - $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt $(nossp-flags-y))) + $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector)) # These were previously generated C files. When you are building the kernel # with O=, make sure to remove the stale files in the output tree. Otherwise, @@ -103,13 +102,9 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -ifeq ($(CONFIG_FUNCTION_TRACER),y) -ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) -endif - ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \ -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) +ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 45de2ff6a777..b88d0caa4b2d 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -290,7 +290,7 @@ am33xx_pinmux: pinmux@800 { compatible = "pinctrl-single"; reg = <0x800 0x238>; - #pinctrl-cells = <1>; + #pinctrl-cells = <2>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0x7f>; }; diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig index f56ac394caf1..4e49d6cb2f62 100644 --- a/arch/arm/configs/am200epdkit_defconfig +++ b/arch/arm/configs/am200epdkit_defconfig @@ -3,7 +3,6 @@ CONFIG_LOCALVERSION="gum" CONFIG_SYSVIPC=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index c675bc0d5aa8..be666f58bf7a 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -9,9 +9,6 @@ struct dev_archdata { #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif -#ifdef CONFIG_IOMMU_API - void *iommu; /* private IOMMU data */ -#endif #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b5fdd30252f8..a13d90206472 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -76,7 +76,7 @@ static inline void set_fs(mm_segment_t fs) modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * We use 33-bit arithmetic here. Success returns zero, failure returns @@ -267,7 +267,7 @@ extern int __put_user_8(void *, unsigned long long); */ #define USER_DS KERNEL_DS -#define segment_eq(a, b) (1) +#define uaccess_kernel() (true) #define __addr_ok(addr) ((void)(addr), 1) #define __range_ok(addr, size) ((void)(addr), 0) #define get_fs() (KERNEL_DS) diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h index 1b207cf07697..2134cbd5469f 100644 --- a/arch/arm/include/asm/vdso/gettimeofday.h +++ b/arch/arm/include/asm/vdso/gettimeofday.h @@ -113,7 +113,8 @@ static inline bool arm_vdso_hres_capable(void) } #define __arch_vdso_hres_capable arm_vdso_hres_capable -static __always_inline u64 __arch_get_hw_counter(int clock_mode) +static __always_inline u64 __arch_get_hw_counter(int clock_mode, + const struct vdso_data *vd) { #ifdef CONFIG_ARM_ARCH_TIMER u64 cycle_now; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 3395be186c7d..8e6ace03e960 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -281,21 +281,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) return 1; } -/* - * fill in the fpe structure for a core dump... - */ -int dump_fpu (struct pt_regs *regs, struct user_fp *fp) -{ - struct thread_info *thread = current_thread_info(); - int used_math = thread->used_cp[1] | thread->used_cp[2]; - - if (used_math) - memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); - - return used_math != 0; -} -EXPORT_SYMBOL(dump_fpu); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab2568996ddb..c9dc912b83f0 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -713,7 +713,9 @@ struct page *get_signal_page(void) /* Defer to generic check */ asmlinkage void addr_limit_check_failed(void) { +#ifdef CONFIG_MMU addr_limit_user_check(); +#endif } #ifdef CONFIG_DEBUG_RSEQ diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c6550eddfce1..efa402025031 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -202,7 +202,8 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) static vm_fault_t __kprobes __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - unsigned int flags, struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk, + struct pt_regs *regs) { struct vm_area_struct *vma; vm_fault_t fault; @@ -224,7 +225,7 @@ good_area: goto out; } - return handle_mm_fault(vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ @@ -266,6 +267,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -290,7 +293,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, fsr, flags, tsk); + fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because @@ -302,23 +305,7 @@ retry: return 0; } - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, addr); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, addr); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; goto retry; diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 7e8ee4adf269..171077cbf419 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -162,7 +162,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 12b778d55342..996498751318 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -6,9 +6,6 @@ #define __ASM_DEVICE_H struct dev_archdata { -#ifdef CONFIG_IOMMU_API - void *iommu; /* private IOMMU data */ -#endif }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e52c927aade5..905c2b87e05a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -368,7 +368,6 @@ struct kvm_vcpu_arch { /* Guest PV state */ struct { - u64 steal; u64 last_steal; gpa_t base; } steal; @@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); void kvm_update_stolen_time(struct kvm_vcpu *vcpu); +bool kvm_arm_pvtime_supported(void); int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8d7c466f809b..991dd5f031e4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -50,7 +50,7 @@ static inline void set_fs(mm_segment_t fs) CONFIG_ARM64_UAO)); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * Test whether a block of memory is a valid user space address. diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 17e81bd9a2d3..734860ac7cf9 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev) __SYSCALL(__NR_getsid, sys_getsid) #define __NR_fdatasync 148 __SYSCALL(__NR_fdatasync, sys_fdatasync) -#define __NR__sysctl 149 -__SYSCALL(__NR__sysctl, compat_sys_sysctl) + /* 149 was sys_sysctl */ +__SYSCALL(149, sys_ni_syscall) #define __NR_mlock 150 __SYSCALL(__NR_mlock, sys_mlock) #define __NR_munlock 151 diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index 75cbae60455b..7508b0ac1d21 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -103,7 +103,8 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) return ret; } -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { u64 res; diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 9c29ad3049f8..631ab1281633 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -64,7 +64,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { u64 res; diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index dab88260b137..7689f2031c0c 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -180,7 +180,7 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, /* * We didn't take an exception to get here, set PAN. UAO will be cleared - * by sdei_event_handler()s set_fs(USER_DS) call. + * by sdei_event_handler()s force_uaccess_begin() call. */ __uaccess_enable_hw_pan(); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 691d21e4c717..57876b0b870b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) */ r = 1; break; + case KVM_CAP_STEAL_TIME: + r = kvm_arm_pvtime_supported(); + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index ba00bcc0c884..9a636b8064f1 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1877,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { force_pte = true; vma_pagesize = PAGE_SIZE; + vma_shift = PAGE_SHIFT; } /* @@ -1970,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, (fault_status == FSC_PERM && stage2_is_exec(mmu, fault_ipa, vma_pagesize)); - if (vma_pagesize == PUD_SIZE) { + /* + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and + * all we have is a 2-level page table. Trying to map a PUD in + * this case would be fatally wrong. + */ + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); new_pud = kvm_pud_mkhuge(new_pud); diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index f7b52ce1557e..920ac43077ad 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -13,25 +13,22 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - u64 steal; - __le64 steal_le; - u64 offset; - int idx; u64 base = vcpu->arch.steal.base; + u64 last_steal = vcpu->arch.steal.last_steal; + u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); + u64 steal = 0; + int idx; if (base == GPA_INVALID) return; - /* Let's do the local bookkeeping */ - steal = vcpu->arch.steal.steal; - steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal; - vcpu->arch.steal.last_steal = current->sched_info.run_delay; - vcpu->arch.steal.steal = steal; - - steal_le = cpu_to_le64(steal); idx = srcu_read_lock(&kvm->srcu); - offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); - kvm_put_guest(kvm, base + offset, steal_le, u64); + if (!kvm_get_guest(kvm, base + offset, steal)) { + steal = le64_to_cpu(steal); + vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.steal.last_steal - last_steal; + kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); + } srcu_read_unlock(&kvm->srcu, idx); } @@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) switch (feature) { case ARM_SMCCC_HV_PV_TIME_FEATURES: case ARM_SMCCC_HV_PV_TIME_ST: - val = SMCCC_RET_SUCCESS; + if (vcpu->arch.steal.base != GPA_INVALID) + val = SMCCC_RET_SUCCESS; break; } @@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) * Start counting stolen time from the time the guest requests * the feature enabled. */ - vcpu->arch.steal.steal = 0; vcpu->arch.steal.last_steal = current->sched_info.run_delay; idx = srcu_read_lock(&kvm->srcu); @@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) return base; } -static bool kvm_arm_pvtime_supported(void) +bool kvm_arm_pvtime_supported(void) { return !!sched_info_on(); } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 4691053c5ee4..ff0444352bba 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) + TP_printk("PC: 0x%016lx", __entry->vcpu_pc) ); TRACE_EVENT(kvm_exit, @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx", + TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx", __print_symbolic(__entry->ret, kvm_arm_exception_type), __entry->esr_ec, __print_symbolic(__entry->esr_ec, kvm_arm_exception_class), @@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault, __entry->ipa = ipa; ), - TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx", + TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx", __entry->ipa, __entry->hsr, __entry->hxfar, __entry->vcpu_pc) ); @@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->cpsr = cpsr; ), - TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", + TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)", __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); @@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->end = end; ), - TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", + TP_printk("mmu notifier unmap range: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva, __entry->hva = hva; ), - TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) + TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_age_hva, @@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva, __entry->end = end; ), - TP_printk("mmu notifier age hva: %#08lx -- %#08lx", + TP_printk("mmu notifier age hva: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva, __entry->hva = hva; ), - TP_printk("mmu notifier test age hva: %#08lx", __entry->hva) + TP_printk("mmu notifier test age hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_set_way_flush, diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h index 2c56d1e0f5bd..8d78acc4fba7 100644 --- a/arch/arm64/kvm/trace_handle_exit.h +++ b/arch/arm64/kvm/trace_handle_exit.h @@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64, __entry->is_wfe = is_wfe; ), - TP_printk("guest executed wf%c at: 0x%08lx", + TP_printk("guest executed wf%c at: 0x%016lx", __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc) ); @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64, __entry->imm = imm; ), - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)", + TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)", __entry->vcpu_pc, __entry->r0, __entry->imm) ); @@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg, __entry->write_value = write_value; ), - TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) + TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) ); TRACE_EVENT(kvm_handle_sys_reg, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 8afb238ff335..f07333e86c2f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADACCESS 0x020000 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags) + unsigned int mm_flags, unsigned long vm_flags, + struct pt_regs *regs) { struct vm_area_struct *vma = find_vma(mm, addr); @@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, */ if (!(vma->vm_flags & vm_flags)) return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs); } static bool is_el0_instruction_abort(unsigned int esr) @@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, { const struct fault_info *inf; struct mm_struct *mm = current->mm; - vm_fault_t fault, major = 0; + vm_fault_t fault; unsigned long vm_flags = VM_ACCESS_FLAGS; unsigned int mm_flags = FAULT_FLAG_DEFAULT; @@ -516,8 +517,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, mm_flags, vm_flags); - major |= fault & VM_FAULT_MAJOR; + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -538,25 +538,8 @@ retry: * Handle the "normal" (no error) case first. */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | - VM_FAULT_BADACCESS)))) { - /* - * Major/minor page fault accounting is only done - * once. If we go through a retry, it is extremely - * likely that the page will be found in page cache at - * that point. - */ - if (major) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, - addr); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, - addr); - } - + VM_FAULT_BADACCESS)))) return 0; - } /* * If we are in kernel mode at this point, we have no context to diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index aafcee3e3f7e..73f8b49d485c 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -461,13 +461,3 @@ void __init arm64_numa_init(void) numa_init(dummy_numa_init); } - -/* - * We hope that we will be hotplugging memory on nodes we already know about, - * such that acpi_get_node() succeeds and we never fall back to this... - */ -int memory_add_physaddr_to_nid(u64 addr) -{ - pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr); - return 0; -} diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index db2640d5f575..79ede9b1a646 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -13,6 +13,6 @@ typedef struct { #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index b1dce9f2f04d..081b178b41b1 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -150,7 +150,8 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, + regs); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -160,16 +161,6 @@ good_area: goto bad_area; BUG(); } - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, - address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, - address); - } - mmap_read_unlock(mm); return; diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index a407978f9f9f..37950725d9b9 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,7 +33,7 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index d294e71d11d8..dfd322c5ce83 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -154,15 +154,6 @@ unsigned long get_wchan(struct task_struct *p) } /* - * Required placeholder. - */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) -{ - return 0; -} - - -/* * Called on the exit path of event entry; see vm_entry.S * * Interrupts will already be disabled. diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index cd3808f96b93..ef32c5a84ff3 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -18,6 +18,7 @@ #include <linux/signal.h> #include <linux/extable.h> #include <linux/hardirq.h> +#include <linux/perf_event.h> /* * Decode of hardware exception sends us to one of several @@ -53,6 +54,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); vma = find_vma(mm, address); @@ -88,7 +91,7 @@ good_area: break; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -96,10 +99,6 @@ good_area: /* The most common case -- we are done. */ if (likely(!(fault & VM_FAULT_ERROR))) { if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; goto retry; diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index 3eb397415381..918b198cd5bb 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h @@ -6,9 +6,6 @@ #define _ASM_IA64_DEVICE_H struct dev_archdata { -#ifdef CONFIG_IOMMU_API - void *iommu; /* hook for IOMMU specific extension */ -#endif }; struct pdev_archdata { diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 7847ae40a181..aa92234c0142 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -18,7 +18,6 @@ #include <linux/bitops.h> #include <linux/irqreturn.h> -#include <asm/io.h> #include <asm/param.h> #include <asm/processor.h> #include <asm/ptrace.h> @@ -44,11 +43,6 @@ ia64_get_lid (void) #ifdef CONFIG_SMP -#define XTP_OFFSET 0x1e0008 - -#define SMP_IRQ_REDIRECTION (1 << 0) -#define SMP_IPI_REDIRECTION (1 << 1) - #define raw_smp_processor_id() (current_thread_info()->cpu) extern struct smp_boot_data { @@ -62,7 +56,6 @@ extern cpumask_t cpu_core_map[NR_CPUS]; DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); extern int smp_num_siblings; extern void __iomem *ipi_base_addr; -extern unsigned char smp_int_redirect; extern volatile int ia64_cpu_to_sapicid[]; #define cpu_physical_id(i) ia64_cpu_to_sapicid[i] @@ -84,34 +77,6 @@ cpu_logical_id (int cpuid) return i; } -/* - * XTP control functions: - * min_xtp : route all interrupts to this CPU - * normal_xtp: nominal XTP value - * max_xtp : never deliver interrupts to this CPU. - */ - -static inline void -min_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */ -} - -static inline void -normal_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */ -} - -static inline void -max_xtp (void) -{ - if (smp_int_redirect & SMP_IRQ_REDIRECTION) - writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ -} - /* Upping and downing of CPUs */ extern int __cpu_disable (void); extern void __cpu_die (unsigned int cpu); diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 8aa473a4b0f4..179243c3dfc7 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -50,7 +50,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * When accessing user memory, we need to make sure the entire area really is in diff --git a/arch/ia64/include/asm/xtp.h b/arch/ia64/include/asm/xtp.h new file mode 100644 index 000000000000..5bf1d70ad860 --- /dev/null +++ b/arch/ia64/include/asm/xtp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_IA64_XTP_H +#define _ASM_IA64_XTP_H + +#include <asm/io.h> + +#ifdef CONFIG_SMP + +#define XTP_OFFSET 0x1e0008 + +#define SMP_IRQ_REDIRECTION (1 << 0) +#define SMP_IPI_REDIRECTION (1 << 1) + +extern unsigned char smp_int_redirect; + +/* + * XTP control functions: + * min_xtp : route all interrupts to this CPU + * normal_xtp: nominal XTP value + * max_xtp : never deliver interrupts to this CPU. + */ + +static inline void +min_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */ +} + +static inline void +normal_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */ +} + +static inline void +max_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ +} + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_IA64_XTP_Hy */ diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index fad4db20ce65..35adcf89035a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -95,6 +95,7 @@ #include <asm/iosapic.h> #include <asm/processor.h> #include <asm/ptrace.h> +#include <asm/xtp.h> #undef DEBUG_INTERRUPT_ROUTING diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 0a8e5e585edc..ecef17c7c35b 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -25,6 +25,7 @@ #include <linux/kernel_stat.h> #include <asm/mca.h> +#include <asm/xtp.h> /* * 'what should we do if we get a hw irq event on an illegal vector'. diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ec0b40f6e9c6..f19cb97c0098 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -47,6 +47,7 @@ #include <linux/uaccess.h> #include <asm/unwind.h> #include <asm/user.h> +#include <asm/xtp.h> #include "entry.h" @@ -531,51 +532,17 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void * } void -do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) -{ - elf_fpreg_t *dst = arg; - int i; - - memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ - - if (unw_unwind_to_user(info) < 0) - return; - - /* f0 is 0.0, f1 is 1.0 */ - - for (i = 2; i < 32; ++i) - unw_get_fr(info, i, dst + i); - - ia64_flush_fph(task); - if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) - memcpy(dst + 32, task->thread.fph, 96*16); -} - -void do_copy_regs (struct unw_frame_info *info, void *arg) { do_copy_task_regs(current, info, arg); } void -do_dump_fpu (struct unw_frame_info *info, void *arg) -{ - do_dump_task_fpu(current, info, arg); -} - -void ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) { unw_init_running(do_copy_regs, dst); } -int -dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) -{ - unw_init_running(do_dump_fpu, dst); - return 1; /* f0-f31 are always valid so we always return 1 */ -} - /* * Flush thread state. This is called when a thread does an execve(). */ diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index c455ece977ad..e4f0705c0282 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/sal.h> #include <asm/pal.h> +#include <asm/xtp.h> __cacheline_aligned DEFINE_SPINLOCK(sal_lock); unsigned long sal_platform_features; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index d2d440fe855b..dd595fbd8006 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -65,6 +65,7 @@ #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/uv/uv.h> +#include <asm/xtp.h> #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 0e2742003121..7b7b64eb3129 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -45,6 +45,7 @@ #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/mca.h> +#include <asm/xtp.h> /* * Note: alignment of 4 entries/cacheline was empirically determined diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index ced9c83e47c9..f52a41f4c340 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -135,7 +135,7 @@ 123 common writev sys_writev 124 common pread64 sys_pread64 125 common pwrite64 sys_pwrite64 -126 common _sysctl sys_sysctl +126 common _sysctl sys_ni_syscall 127 common mmap sys_mmap 128 common munmap sys_munmap 129 common mlock sys_mlock diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3a4dec334cc5..cd9766d2b6e0 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/kdebug.h> #include <linux/prefetch.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <asm/processor.h> #include <asm/exception.h> @@ -105,6 +106,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re flags |= FAULT_FLAG_USER; if (mask & VM_WRITE) flags |= FAULT_FLAG_WRITE; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); @@ -143,7 +146,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -166,10 +169,6 @@ retry: } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 5e1015eb6d0d..f34964271101 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -106,7 +106,5 @@ int memory_add_physaddr_to_nid(u64 addr) return 0; return nid; } - -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #endif diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 6663f1741798..6f2f38d05772 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -16,6 +16,7 @@ config M68K select HAVE_DEBUG_BUGVERBOSE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 + select NO_DMA if !MMU && !COLDFIRE select HAVE_UID16 select VIRT_TO_BUS select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS @@ -59,9 +60,6 @@ config TIME_LOW_RES config NO_IOPORT_MAP def_bool y -config NO_DMA - def_bool (MMU && SUN3) || (!MMU && !COLDFIRE) - config ZONE_DMA bool default y diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index a82651d58af4..17e8c3a292d7 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -126,6 +126,7 @@ config SUN3 depends on MMU depends on !MMU_MOTOROLA select MMU_SUN3 if MMU + select NO_DMA select M68020 help This option enables support for the Sun 3 series of workstations diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index c6686559e9b7..2b5e68a71ef7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -52,7 +52,7 @@ static inline void set_fs(mm_segment_t val) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h index 191e75a6bb24..5337bc2c262f 100644 --- a/arch/m68k/include/asm/tlbflush.h +++ b/arch/m68k/include/asm/tlbflush.h @@ -85,10 +85,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) { - mm_segment_t old_fs = get_fs(); - set_fs(USER_DS); + mm_segment_t old_fs = force_uaccess_begin(); + __flush_tlb_one(addr); - set_fs(old_fs); + force_uaccess_end(old_fs); } } diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 1a4822de7292..81fc799d8392 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 508abb63da67..795f483b1050 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <asm/setup.h> #include <asm/traps.h> @@ -84,6 +85,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); @@ -134,7 +137,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); pr_debug("handle_mm_fault returns %x\n", fault); if (fault_signal_pending(fault, regs)) @@ -150,16 +153,7 @@ good_area: BUG(); } - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 6723c56ec378..304b04ffea2f 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -41,7 +41,7 @@ # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) -# define segment_eq(a, b) ((a).seg == (b).seg) +# define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #ifndef CONFIG_MMU diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a3f4be8e7238..b4e263916f41 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index a2bfe587b491..b3fed2cecf84 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -28,6 +28,7 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> +#include <linux/perf_event.h> #include <asm/page.h> #include <asm/mmu.h> @@ -121,6 +122,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -214,7 +217,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -230,10 +233,6 @@ good_area: } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (unlikely(fault & VM_FAULT_MAJOR)) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index 6b471cdb16cf..e924c817f73d 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 62b298c50905..61fc01f177a6 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -72,7 +72,7 @@ extern u64 __ua_limit; #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * eva_kernel_access() - determine whether kernel memory access on an EVA system diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index c63ddcaea54c..2203e2d0ae2a 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -167,7 +167,8 @@ static __always_inline u64 read_gic_count(const struct vdso_data *data) #endif -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { #ifdef CONFIG_CSRC_R4K if (clock_mode == VDSO_CLOCKMODE_R4K) @@ -175,7 +176,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) #endif #ifdef CONFIG_CLKSRC_MIPS_GIC if (clock_mode == VDSO_CLOCKMODE_GIC) - return read_gic_count(get_vdso_data()); + return read_gic_count(vd); #endif /* * Core checks mode already. So this raced against a concurrent diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 6b4ee92e3aed..f9df9edb67a4 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -159,7 +159,7 @@ 149 n32 munlockall sys_munlockall 150 n32 vhangup sys_vhangup 151 n32 pivot_root sys_pivot_root -152 n32 _sysctl compat_sys_sysctl +152 n32 _sysctl sys_ni_syscall 153 n32 prctl sys_prctl 154 n32 adjtimex sys_adjtimex_time32 155 n32 setrlimit compat_sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 391acbf425a0..557f9954a2b9 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -159,7 +159,7 @@ 149 n64 munlockall sys_munlockall 150 n64 vhangup sys_vhangup 151 n64 pivot_root sys_pivot_root -152 n64 _sysctl sys_sysctl +152 n64 _sysctl sys_ni_syscall 153 n64 prctl sys_prctl 154 n64 adjtimex sys_adjtimex 155 n64 setrlimit sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 5727c5187508..195b43cf27c8 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -164,7 +164,7 @@ 150 o32 unused150 sys_ni_syscall 151 o32 getsid sys_getsid 152 o32 fdatasync sys_fdatasync -153 o32 _sysctl sys_sysctl compat_sys_sysctl +153 o32 _sysctl sys_ni_syscall 154 o32 mlock sys_mlock 155 o32 munlock sys_munlock 156 o32 mlockall sys_mlockall diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 0adce604fa44..126a5f3f4e4c 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -191,17 +191,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = get_fs(); - set_fs(USER_DS); + seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -209,12 +208,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lwe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -222,12 +221,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhue_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWUE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -235,35 +234,35 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case she_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; case swe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; default: - set_fs(seg); + force_uaccess_end(seg); goto sigill; } - set_fs(seg); + force_uaccess_end(seg); } #endif break; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 01b168a90434..7c871b14e74a 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -96,6 +96,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); vma = find_vma(mm, address); @@ -152,12 +154,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -168,15 +169,6 @@ good_area: BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - tsk->maj_flt++; - } else { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - tsk->min_flt++; - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 2e64c7600eea..57fe83235281 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -35,8 +35,7 @@ cflags-vdso := $(ccflags-vdso) \ -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ - $(call cc-option, -fno-asynchronous-unwind-tables) \ - $(call cc-option, -fno-stack-protector) + $(call cc-option, -fno-asynchronous-unwind-tables) aflags-vdso := $(ccflags-vdso) \ -D__ASSEMBLY__ -Wa,-gdwarf-2 diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 3a9219f53ee0..010ba5f1d7dd 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -44,7 +44,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index e85bbbadc0e7..e01ad5d17224 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs) regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); pr_info(" IRQs o%s Segment %s\n", interrupts_enabled(regs) ? "n" : "ff", - segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user"); + uaccess_kernel() ? "kernel" : "user"); } EXPORT_SYMBOL(show_regs); diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index c8b9061a2ee3..1eb7ded6992b 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -512,7 +512,7 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) { unsigned long inst; int ret = -EFAULT; - mm_segment_t seg = get_fs(); + mm_segment_t seg; inst = get_inst(regs->ipc); @@ -520,13 +520,12 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr, regs->ipc, inst); - set_fs(USER_DS); - + seg = force_uaccess_begin(); if (inst & NDS32_16BIT_INSTRUCTION) ret = do_16((inst >> 16) & 0xffff, regs); else ret = do_32(inst, regs); - set_fs(seg); + force_uaccess_end(seg); return ret; } diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 8fb73f6401a0..f02524eb6d56 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -121,6 +121,8 @@ void do_page_fault(unsigned long entry, unsigned long addr, if (unlikely(faulthandler_disabled() || !mm)) goto no_context; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -206,7 +208,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, regs); /* * If we need to retry but a fatal signal is pending, handle the @@ -228,22 +230,7 @@ good_area: goto bad_area; } - /* - * Major/minor page fault accounting is only done on the initial - * attempt. If we go through a retry, it is extremely likely that the - * page will be found in page cache at that point. - */ - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, regs, addr); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, regs, addr); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e83f831a76f9..a741abbed6fb 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -30,7 +30,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __access_ok(addr, len) \ (((signed long)(((long)get_fs().seg) & \ diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 0a42ab8e4c32..88a4ec03edab 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -252,14 +252,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) regs->sp = sp; } -#include <linux/elfcore.h> - -/* Fill in the FPU structure for a core dump. */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) -{ - return 0; /* Nios2 has no FPU and thus no FPU registers */ -} - asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp, int __user *parent_tidptr, int __user *child_tidptr, unsigned long tls) diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 4112ef0e247e..9476feecf512 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -24,6 +24,7 @@ #include <linux/mm.h> #include <linux/extable.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <asm/mmu_context.h> #include <asm/traps.h> @@ -83,6 +84,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (!mmap_read_trylock(mm)) { if (!user_mode(regs) && !search_exception_tables(regs->ea)) goto bad_area_nosemaphore; @@ -131,7 +134,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -146,16 +149,7 @@ good_area: BUG(); } - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h index db02fb2077d9..7d6b4a77b379 100644 --- a/arch/openrisc/include/asm/io.h +++ b/arch/openrisc/include/asm/io.h @@ -14,6 +14,8 @@ #ifndef __ASM_OPENRISC_IO_H #define __ASM_OPENRISC_IO_H +#include <linux/types.h> + /* * PCI: can we really do 0 here if we have no port IO? */ @@ -25,9 +27,12 @@ #define PIO_OFFSET 0 #define PIO_MASK 0 -#include <asm-generic/io.h> - +#define ioremap ioremap void __iomem *ioremap(phys_addr_t offset, unsigned long size); + +#define iounmap iounmap extern void iounmap(void *addr); +#include <asm-generic/io.h> + #endif diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 17c24f14615f..f0390211236b 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -43,21 +43,22 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* Ensure that the range from addr to addr+size is all within the process' * address space */ -#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size)) +static inline int __range_ok(unsigned long addr, unsigned long size) +{ + const mm_segment_t fs = get_fs(); -/* Ensure that addr is below task's addr_limit */ -#define __addr_ok(addr) ((unsigned long) addr < get_fs()) + return size <= fs && addr <= (fs - size); +} #define access_ok(addr, size) \ ({ \ - unsigned long __ao_addr = (unsigned long)(addr); \ - unsigned long __ao_size = (unsigned long)(size); \ - __range_ok(__ao_addr, __ao_size); \ + __chk_user_ptr(addr); \ + __range_ok((unsigned long)(addr), (size)); \ }) /* @@ -100,7 +101,7 @@ extern long __put_user_bad(void); #define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ if (access_ok(__pu_addr, size)) \ __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ @@ -173,7 +174,7 @@ struct __large_struct { #define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ - const __typeof__(*(ptr)) * __gu_addr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ if (access_ok(__gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ @@ -241,17 +242,17 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long size) return __copy_tofrom_user(to, (__force const void *)from, size); } static inline unsigned long -raw_copy_to_user(void *to, const void __user *from, unsigned long size) +raw_copy_to_user(void __user *to, const void *from, unsigned long size) { return __copy_tofrom_user((__force void *)to, from, size); } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER -extern unsigned long __clear_user(void *addr, unsigned long size); +extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline __must_check unsigned long -clear_user(void *addr, unsigned long size) +clear_user(void __user *addr, unsigned long size) { if (likely(access_ok(addr, size))) size = __clear_user(addr, size); diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 848f74c2c47a..0ff391f00334 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -214,13 +214,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) regs->sp = sp; } -/* Fill in the fpu structure for a core dump. */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) -{ - /* TODO */ - return 0; -} - extern struct thread_info *_switch(struct thread_info *old_ti, struct thread_info *new_ti); extern int lwa_flag; diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index 8aa438e1f51f..b18e775f8be3 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -292,13 +292,15 @@ void __init setup_arch(char **cmdline_p) init_mm.brk = (unsigned long)_end; #ifdef CONFIG_BLK_DEV_INITRD - initrd_start = (unsigned long)&__initrd_start; - initrd_end = (unsigned long)&__initrd_end; if (initrd_start == initrd_end) { + printk(KERN_INFO "Initial ramdisk not found\n"); initrd_start = 0; initrd_end = 0; + } else { + printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), initrd_end - initrd_start); + initrd_below_start_ok = 1; } - initrd_below_start_ok = 1; #endif /* setup memblock allocator */ diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 4f0754874d78..97804f21a40c 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -68,7 +68,7 @@ static int restore_sigcontext(struct pt_regs *regs, asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) { - struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp; sigset_t set; /* @@ -76,7 +76,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) * then frame should be dword aligned here. If it's * not, then the user is trying to mess with us. */ - if (((long)frame) & 3) + if (((unsigned long)frame) & 3) goto badframe; if (!access_ok(frame, sizeof(*frame))) @@ -151,7 +151,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; unsigned long return_ip; int err = 0; @@ -181,10 +181,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, l.ori r11,r0,__NR_sigreturn l.sys 1 */ - err |= __put_user(0xa960, (short *)(frame->retcode + 0)); - err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); - err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); - err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); + err |= __put_user(0xa960, (short __user *)(frame->retcode + 0)); + err |= __put_user(__NR_rt_sigreturn, (short __user *)(frame->retcode + 2)); + err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4)); + err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8)); if (err) return -EFAULT; diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index bd1e660bbc89..29c82ef2e207 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -219,30 +219,99 @@ static inline void ipi_flush_tlb_all(void *ignored) local_flush_tlb_all(); } +static inline void ipi_flush_tlb_mm(void *info) +{ + struct mm_struct *mm = (struct mm_struct *)info; + + local_flush_tlb_mm(mm); +} + +static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm) +{ + unsigned int cpuid; + + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + local_flush_tlb_mm(mm); + } else { + on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1); + } + put_cpu(); +} + +struct flush_tlb_data { + unsigned long addr1; + unsigned long addr2; +}; + +static inline void ipi_flush_tlb_page(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_page(NULL, fd->addr1); +} + +static inline void ipi_flush_tlb_range(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_range(NULL, fd->addr1, fd->addr2); +} + +static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start, + unsigned long end) +{ + unsigned int cpuid; + + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + if ((end - start) <= PAGE_SIZE) + local_flush_tlb_page(NULL, start); + else + local_flush_tlb_range(NULL, start, end); + } else { + struct flush_tlb_data fd; + + fd.addr1 = start; + fd.addr2 = end; + + if ((end - start) <= PAGE_SIZE) + on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1); + else + on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1); + } + put_cpu(); +} + void flush_tlb_all(void) { on_each_cpu(ipi_flush_tlb_all, NULL, 1); } -/* - * FIXME: implement proper functionality instead of flush_tlb_all. - * *But*, as things currently stands, the local_tlb_flush_* functions will - * all boil down to local_tlb_flush_all anyway. - */ void flush_tlb_mm(struct mm_struct *mm) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + smp_flush_tlb_mm(mm_cpumask(mm), mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end); } /* Instruction cache invalidate - performed on each cpu */ diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7..54d38809e22c 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/processor.h> @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 60449fd7f16f..22fbc5fb24b3 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -96,18 +96,6 @@ SECTIONS __init_end = .; - . = ALIGN(PAGE_SIZE); - .initrd : AT(ADDR(.initrd) - LOAD_OFFSET) - { - __initrd_start = .; - *(.initrd) - __initrd_end = .; - FILL (0); - . = ALIGN (PAGE_SIZE); - } - - __vmlinux_end = .; /* last address of the physical file */ - BSS_SECTION(0, 0, 0x20) _end = .; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index d2224ccca294..ca97d9baab51 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/sched/signal.h> +#include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/siginfo.h> @@ -103,6 +104,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, if (in_interrupt() || !mm) goto no_context; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + retry: mmap_read_lock(mm); vma = find_vma(mm, address); @@ -159,7 +162,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -176,10 +179,6 @@ good_area: if (flags & FAULT_FLAG_ALLOW_RETRY) { /*RGD modeled on Cris */ - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c index 4b680aed8f5f..2b6feabf6381 100644 --- a/arch/openrisc/mm/tlb.c +++ b/arch/openrisc/mm/tlb.c @@ -137,21 +137,28 @@ void local_flush_tlb_mm(struct mm_struct *mm) void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *next_tsk) { + unsigned int cpu; + + if (unlikely(prev == next)) + return; + + cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + /* remember the pgd for the fault handlers * this is similar to the pgd register in some other CPU's. * we need our own copy of it because current and active_mm * might be invalid at points where we still need to derefer * the pgd. */ - current_pgd[smp_processor_id()] = next->pgd; + current_pgd[cpu] = next->pgd; /* We don't have context support implemented, so flush all * entries belonging to previous map */ - - if (prev != next) - local_flush_tlb_mm(prev); - + local_flush_tlb_mm(prev); } /* diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 03862320f779..21b375c67e53 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -34,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) @@ -85,7 +85,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ -} \ +} #define ATOMIC_OP_RETURN(op, c_op) \ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ @@ -148,7 +148,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ -} \ +} #define ATOMIC64_OP_RETURN(op, c_op) \ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f..640d46edf32e 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -26,6 +26,67 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 301af07a04d4..3bd465a27791 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -305,9 +305,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; struct task_struct; -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) - struct pt_regs; /* forward declaration... */ diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 116effe26143..45e20d38dc59 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -303,8 +303,8 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); #define ioread64be ioread64be #define iowrite64 iowrite64 #define iowrite64be iowrite64be -extern u64 ioread64(void __iomem *addr); -extern u64 ioread64be(void __iomem *addr); +extern u64 ioread64(const void __iomem *addr); +extern u64 ioread64be(const void __iomem *addr); extern void iowrite64(u64 val, void __iomem *addr); extern void iowrite64be(u64 val, void __iomem *addr); diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index cc7ecc2ef55d..a6482b2ce0ea 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -10,6 +10,7 @@ #include <asm/cache.h> +#define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PMD_FREE #define __HAVE_ARCH_PGD_FREE #include <asm-generic/pgalloc.h> @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); } +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); +} + static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 45537cd4d1d3..06b510f8172e 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -7,6 +7,7 @@ #ifndef _ASMPARISC_TIMEX_H #define _ASMPARISC_TIMEX_H +#include <asm/special_insns.h> #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ebbb9ffe038c..ed2cd4fb479b 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -14,7 +14,7 @@ #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 1df0f67ed667..4bab21c71055 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -64,7 +64,8 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent, function_trace_op, regs); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || + if (dereference_function_descriptor(ftrace_graph_return) != + dereference_function_descriptor(ftrace_stub) || ftrace_graph_entry != ftrace_graph_entry_stub) { unsigned long *parent_rp; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 86ec30cc0e77..f196d96e2f9f 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -152,25 +152,6 @@ void release_thread(struct task_struct *dead_task) } /* - * Fill in the FPU structure for a core dump. - */ - -int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) -{ - if (regs == NULL) - return 0; - - memcpy(r, regs->fr, sizeof *r); - return 1; -} - -int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) -{ - memcpy(r, tsk->thread.regs.fr, sizeof(*r)); - return 1; -} - -/* * Idle thread support * * Detect when running on QEMU with SeaBIOS PDC Firmware and let diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 292baabefade..def64d221cd4 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -163,7 +163,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 0195aec657e2..ce400417d54e 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -43,13 +43,13 @@ #endif struct iomap_ops { - unsigned int (*read8)(void __iomem *); - unsigned int (*read16)(void __iomem *); - unsigned int (*read16be)(void __iomem *); - unsigned int (*read32)(void __iomem *); - unsigned int (*read32be)(void __iomem *); - u64 (*read64)(void __iomem *); - u64 (*read64be)(void __iomem *); + unsigned int (*read8)(const void __iomem *); + unsigned int (*read16)(const void __iomem *); + unsigned int (*read16be)(const void __iomem *); + unsigned int (*read32)(const void __iomem *); + unsigned int (*read32be)(const void __iomem *); + u64 (*read64)(const void __iomem *); + u64 (*read64be)(const void __iomem *); void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); @@ -57,9 +57,9 @@ struct iomap_ops { void (*write32be)(u32, void __iomem *); void (*write64)(u64, void __iomem *); void (*write64be)(u64, void __iomem *); - void (*read8r)(void __iomem *, void *, unsigned long); - void (*read16r)(void __iomem *, void *, unsigned long); - void (*read32r)(void __iomem *, void *, unsigned long); + void (*read8r)(const void __iomem *, void *, unsigned long); + void (*read16r)(const void __iomem *, void *, unsigned long); + void (*read32r)(const void __iomem *, void *, unsigned long); void (*write8r)(void __iomem *, const void *, unsigned long); void (*write16r)(void __iomem *, const void *, unsigned long); void (*write32r)(void __iomem *, const void *, unsigned long); @@ -69,17 +69,17 @@ struct iomap_ops { #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff) -static unsigned int ioport_read8(void __iomem *addr) +static unsigned int ioport_read8(const void __iomem *addr) { return inb(ADDR2PORT(addr)); } -static unsigned int ioport_read16(void __iomem *addr) +static unsigned int ioport_read16(const void __iomem *addr) { return inw(ADDR2PORT(addr)); } -static unsigned int ioport_read32(void __iomem *addr) +static unsigned int ioport_read32(const void __iomem *addr) { return inl(ADDR2PORT(addr)); } @@ -99,17 +99,17 @@ static void ioport_write32(u32 datum, void __iomem *addr) outl(datum, ADDR2PORT(addr)); } -static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count) { insb(ADDR2PORT(addr), dst, count); } -static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count) { insw(ADDR2PORT(addr), dst, count); } -static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count) { insl(ADDR2PORT(addr), dst, count); } @@ -150,37 +150,37 @@ static const struct iomap_ops ioport_ops = { /* Legacy I/O memory ops */ -static unsigned int iomem_read8(void __iomem *addr) +static unsigned int iomem_read8(const void __iomem *addr) { return readb(addr); } -static unsigned int iomem_read16(void __iomem *addr) +static unsigned int iomem_read16(const void __iomem *addr) { return readw(addr); } -static unsigned int iomem_read16be(void __iomem *addr) +static unsigned int iomem_read16be(const void __iomem *addr) { return __raw_readw(addr); } -static unsigned int iomem_read32(void __iomem *addr) +static unsigned int iomem_read32(const void __iomem *addr) { return readl(addr); } -static unsigned int iomem_read32be(void __iomem *addr) +static unsigned int iomem_read32be(const void __iomem *addr) { return __raw_readl(addr); } -static u64 iomem_read64(void __iomem *addr) +static u64 iomem_read64(const void __iomem *addr) { return readq(addr); } -static u64 iomem_read64be(void __iomem *addr) +static u64 iomem_read64be(const void __iomem *addr) { return __raw_readq(addr); } @@ -220,7 +220,7 @@ static void iomem_write64be(u64 datum, void __iomem *addr) __raw_writel(datum, addr); } -static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u8 *)dst = __raw_readb(addr); @@ -228,7 +228,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u16 *)dst = __raw_readw(addr); @@ -236,7 +236,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u32 *)dst = __raw_readl(addr); @@ -297,49 +297,49 @@ static const struct iomap_ops *iomap_ops[8] = { }; -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr); return *((u8 *)addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr); return le16_to_cpup((u16 *)addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr); return *((u16 *)addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr); return le32_to_cpup((u32 *)addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr); return *((u32 *)addr); } -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr); return le64_to_cpup((u64 *)addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); @@ -411,7 +411,7 @@ void iowrite64be(u64 datum, void __iomem *addr) /* Repeating interfaces */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count); @@ -423,7 +423,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count); @@ -435,7 +435,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 66ac0719bd49..4bfe2da9fbe3 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -18,6 +18,7 @@ #include <linux/extable.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> +#include <linux/perf_event.h> #include <asm/traps.h> @@ -281,6 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, acc_type = parisc_acctyp(code, regs->iir); if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); @@ -302,7 +304,7 @@ good_area: * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -323,10 +325,6 @@ good_area: BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { /* * No need to mmap_read_unlock(mm) as we would diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 44af71543380..b88fd27a45f0 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -366,6 +366,8 @@ initrd-y := $(patsubst zImage%, zImage.initrd%, \ $(patsubst treeImage%, treeImage.initrd%, $(image-y))))) initrd-y := $(filter-out $(image-y), $(initrd-y)) targets += $(image-y) $(initrd-y) +targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \ + $(patsubst $(x).%, dts/%.dtb, $(filter $(x).%, $(image-y)))) $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index d8a0729cf754..219559d65864 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -29,9 +29,6 @@ struct dev_archdata { struct iommu_table *iommu_table_base; #endif -#ifdef CONFIG_IOMMU_API - void *iommu_domain; -#endif #ifdef CONFIG_PPC64 struct pci_dn *pci_data; #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 64c04ab09112..00699903f1ef 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -38,8 +38,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) #ifdef __powerpc64__ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index d4d5946224f8..cbf41fb4ee89 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -16,7 +16,7 @@ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) +CFLAGS_prom_init.o += -fno-stack-protector CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING CFLAGS_prom_init.o += -ffreestanding diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 5ac84efc6ede..9fe4fb3b08aa 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -15,23 +15,23 @@ * Here comes the ppc64 implementation of the IOMAP * interfaces. */ -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return readw_be(addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return readl_be(addr); } @@ -41,27 +41,27 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); #ifdef __powerpc64__ -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { return readq(addr); } -u64 ioread64_lo_hi(void __iomem *addr) +u64 ioread64_lo_hi(const void __iomem *addr) { return readq(addr); } -u64 ioread64_hi_lo(void __iomem *addr) +u64 ioread64_hi_lo(const void __iomem *addr) { return readq(addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_lo_hi(void __iomem *addr) +u64 ioread64be_lo_hi(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_hi_lo(void __iomem *addr) +u64 ioread64be_hi_lo(const void __iomem *addr) { return readq_be(addr); } @@ -139,15 +139,15 @@ EXPORT_SYMBOL(iowrite64be_hi_lo); * FIXME! We could make these do EEH handling if we really * wanted. Not clear if we do. */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { readsb(addr, dst, count); } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { readsw(addr, dst, count); } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { readsl(addr, dst, count); } diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index be9f74546068..c2d737ff2e7b 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -197,7 +197,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 nospu _sysctl sys_sysctl compat_sys_sysctl +149 nospu _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 1478fceeb683..1da9dbba9217 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1115,9 +1115,8 @@ void hash__early_init_mmu_secondary(void) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); -#ifdef CONFIG_PPC_MEM_KEYS - mtspr(SPRN_UAMOR, default_uamor); -#endif + if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY)) + mtspr(SPRN_UAMOR, default_uamor); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 69a6b87f2bb4..b1d091a97611 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -73,12 +73,6 @@ static int scan_pkey_feature(void) if (early_radix_enabled()) return 0; - /* - * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 - */ - if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) - return 0; - ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total); if (ret == 0) { /* @@ -124,6 +118,12 @@ void __init pkey_early_init_devtree(void) __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) != (sizeof(u64) * BITS_PER_BYTE)); + /* + * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 + */ + if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) + return; + /* scan the device tree for pkey feature */ pkeys_total = scan_pkey_feature(); if (!pkeys_total) diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index b83abbead4a2..8acd00178956 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -64,7 +64,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; @@ -76,11 +76,6 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, BUG(); } - if (*flt & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - out_unlock: mmap_read_unlock(mm); return ret; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 925a7231abb3..0add963a849b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -511,7 +511,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); major |= fault & VM_FAULT_MAJOR; @@ -537,14 +537,9 @@ retry: /* * Major/minor page fault accounting. */ - if (major) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + if (major) cmo_account_page_fault(); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - } + return 0; } NOKPROBE_SYMBOL(__do_page_fault); diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index f4247ade71ca..cf85f0662d0d 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS_bootx_init.o += -fPIC -CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector) +CFLAGS_bootx_init.o += -fno-stack-protector KASAN_SANITIZE_bootx_init.o := n diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 89c76ca35640..eb25d7554ffd 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -7,8 +7,7 @@ UBSAN_SANITIZE := n KASAN_SANITIZE := n # Disable ftrace for the entire directory -ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) +ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE) ifdef CONFIG_CC_IS_CLANG # clang stores addresses on the stack causing the frame size to blow diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 8ce9d607b53d..f56c66b3f5fe 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -8,6 +8,8 @@ #ifndef _ASM_RISCV_UACCESS_H #define _ASM_RISCV_UACCESS_H +#include <asm/pgtable.h> /* for TASK_SIZE */ + /* * User space memory access functions */ @@ -62,11 +64,9 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) - /** * access_ok: - Checks if a user space pointer is valid * @addr: User space pointer to start of block to check diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h index 3099362d9f26..f839f16e0d2a 100644 --- a/arch/riscv/include/asm/vdso/gettimeofday.h +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -60,7 +60,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { /* * The purpose of csr_read(CSR_TIME) is to trap the system into diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index d0c5c316e9bb..0a4e81b8dc79 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -77,16 +77,10 @@ relocate: csrw CSR_SATP, a0 .align 2 1: - /* Set trap vector to exception handler */ - la a0, handle_exception + /* Set trap vector to spin forever to help debug */ + la a0, .Lsecondary_park csrw CSR_TVEC, a0 - /* - * Set sup0 scratch register to 0, indicating to exception vector that - * we are presently executing in kernel. - */ - csrw CSR_SCRATCH, zero - /* Reload the global pointer */ .option push .option norelax @@ -144,9 +138,23 @@ secondary_start_common: la a0, swapper_pg_dir call relocate #endif + call setup_trap_vector tail smp_callin #endif /* CONFIG_SMP */ +.align 2 +setup_trap_vector: + /* Set trap vector to exception handler */ + la a0, handle_exception + csrw CSR_TVEC, a0 + + /* + * Set sup0 scratch register to 0, indicating to exception vector that + * we are presently executing in kernel. + */ + csrw CSR_SCRATCH, zero + ret + .Lsecondary_park: /* We lack SMP support or have too many harts, so park this hart */ wfi @@ -240,6 +248,7 @@ clear_bss_done: call relocate #endif /* CONFIG_MMU */ + call setup_trap_vector /* Restore C environment */ la tp, init_task sw zero, TASK_TI_CPU(tp) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 5873835a3e6b..716d64e36f83 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -109,7 +109,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, regs); /* * If we need to retry but a fatal signal is pending, handle the @@ -127,21 +127,7 @@ good_area: BUG(); } - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, regs, addr); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, regs, addr); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index e63940bb57cd..8b98c501142d 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -7,5 +7,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ -obj-$(CONFIG_NUMA) += numa/ obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8c0b52940165..3d86e12e8e3c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -126,7 +126,6 @@ config S390 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_VMALLOC - select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SOFT_DIRTY @@ -766,6 +765,7 @@ config VFIO_AP def_tristate n prompt "VFIO support for AP devices" depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM + depends on ZCRYPT help This driver grants access to Adjunct Processor (AP) devices via the VFIO mediated device interface. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 8dfa2cf1f05c..ba94b03c8b2f 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables -KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding) +KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index cae473a7b6f7..11c5952e1afa 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -45,7 +45,11 @@ static inline int atomic_fetch_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v) { #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + /* + * Order of conditions is important to circumvent gcc 10 bug: + * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html + */ + if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { __atomic_add_const(i, &v->counter); return; } @@ -112,7 +116,11 @@ static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) static inline void atomic64_add(s64 i, atomic64_t *v) { #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + /* + * Order of conditions is important to circumvent gcc 10 bug: + * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html + */ + if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { __atomic64_add_const(i, (long *)&v->counter); return; } diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 17a26261f288..c1b82bcc017c 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -2,7 +2,7 @@ /* * S/390 debug facility * - * Copyright IBM Corp. 1999, 2000 + * Copyright IBM Corp. 1999, 2020 */ #ifndef DEBUG_H #define DEBUG_H @@ -26,19 +26,14 @@ #define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */ /* the entry information */ -#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */ +#define __DEBUG_FEATURE_VERSION 3 /* version of debug feature */ struct __debug_entry { - union { - struct { - unsigned long clock : 52; - unsigned long exception : 1; - unsigned long level : 3; - unsigned long cpuid : 8; - } fields; - unsigned long stck; - } id; + unsigned long clock : 60; + unsigned long exception : 1; + unsigned long level : 3; void *caller; + unsigned short cpu; } __packed; typedef struct __debug_entry debug_entry_t; diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index fbb507504a3b..3a0ac0c7a9a3 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -86,12 +86,6 @@ static inline const struct cpumask *cpumask_of_node(int node) #define pcibus_to_node(bus) __pcibus_to_node(bus) -#define node_distance(a, b) __node_distance(a, b) -static inline int __node_distance(int a, int b) -{ - return 0; -} - #else /* !CONFIG_NUMA */ #define numa_node_id numa_node_id diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 324438889fe1..f09444d6aeab 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -32,7 +32,7 @@ #define USER_DS_SACF (3) #define get_fs() (current->thread.mm_segment) -#define segment_eq(a,b) (((a) & 2) == ((b) & 2)) +#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS) void set_fs(mm_segment_t fs); diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index a8f136943deb..efca70970761 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -49,6 +49,7 @@ CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index beb4b44a11d1..b6619ae9a3e0 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -2,7 +2,7 @@ /* * S/390 debug facility * - * Copyright IBM Corp. 1999, 2012 + * Copyright IBM Corp. 1999, 2020 * * Author(s): Michael Holzheu (holzheu@de.ibm.com), * Holger Smolinski (Holger.Smolinski@de.ibm.com) @@ -433,7 +433,7 @@ static int debug_format_entry(file_private_info_t *p_info) act_entry = (debug_entry_t *) ((char *)id_snap->areas[p_info->act_area] [p_info->act_page] + p_info->act_entry); - if (act_entry->id.stck == 0LL) + if (act_entry->clock == 0LL) goto out; /* empty entry */ if (view->header_proc) len += view->header_proc(id_snap, view, p_info->act_area, @@ -829,12 +829,17 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id) static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active, int level, int exception) { - active->id.stck = get_tod_clock_fast() - - *(unsigned long long *) &tod_clock_base[1]; - active->id.fields.cpuid = smp_processor_id(); + unsigned char clk[STORE_CLOCK_EXT_SIZE]; + unsigned long timestamp; + + get_tod_clock_ext(clk); + timestamp = *(unsigned long *) &clk[0] >> 4; + timestamp -= TOD_UNIX_EPOCH >> 12; + active->clock = timestamp; + active->cpu = smp_processor_id(); active->caller = __builtin_return_address(0); - active->id.fields.exception = exception; - active->id.fields.level = level; + active->exception = exception; + active->level = level; proceed_active_entry(id); if (exception) proceed_active_area(id); @@ -1398,25 +1403,24 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view, int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, int area, debug_entry_t *entry, char *out_buf) { - unsigned long base, sec, usec; + unsigned long sec, usec; unsigned long caller; unsigned int level; char *except_str; int rc = 0; - level = entry->id.fields.level; - base = (*(unsigned long *) &tod_clock_base[0]) >> 4; - sec = (entry->id.stck >> 12) + base - (TOD_UNIX_EPOCH >> 12); + level = entry->level; + sec = entry->clock; usec = do_div(sec, USEC_PER_SEC); - if (entry->id.fields.exception) + if (entry->exception) except_str = "*"; else except_str = "-"; caller = (unsigned long) entry->caller; - rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK ", + rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK ", area, sec, usec, level, except_str, - entry->id.fields.cpuid, (void *)caller); + entry->cpu, (void *)caller); return rc; } EXPORT_SYMBOL(debug_dflt_header_fn); diff --git a/arch/s390/numa/numa.c b/arch/s390/kernel/numa.c index 51c5a9f6e525..51c5a9f6e525 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/kernel/numa.c diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index b06dec1267d0..ec801d3bbb37 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -160,24 +160,6 @@ asmlinkage void execve_tail(void) asm volatile("sfpc %0" : : "d" (0)); } -/* - * fill in the FPU structure for a core dump. - */ -int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) -{ - save_fpu_regs(); - fpregs->fpc = current->thread.fpu.fpc; - fpregs->pad = 0; - if (MACHINE_HAS_VX) - convert_vx_to_fp((freg_t *)&fpregs->fprs, - current->thread.fpu.vxrs); - else - memcpy(&fpregs->fprs, current->thread.fpu.fprs, - sizeof(fpregs->fprs)); - return 1; -} -EXPORT_SYMBOL(dump_fpu); - unsigned long get_wchan(struct task_struct *p) { struct unwind_state state; diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index f1fda4375526..10456bc936fb 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -138,7 +138,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl - - 150 common mlock sys_mlock sys_mlock 151 common munlock sys_munlock sys_munlock 152 common mlockall sys_mlockall sys_mlockall diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1608fd99bbee..2f177298c663 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) struct page *page = NULL; mmap_read_lock(kvm->mm); - get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE, + get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, &page, NULL, NULL); mmap_read_unlock(kvm->mm); return page; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 66da278a67fb..6b74b92c1a58 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) r = set_guest_storage_key(current->mm, hva, keys[i], 0); if (r) { - r = fixup_user_fault(current, current->mm, hva, + r = fixup_user_fault(current->mm, hva, FAULT_FLAG_WRITE, &unlocked); if (r) break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2f721a923b54..cd74989ce0b0 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -273,7 +273,7 @@ retry: rc = get_guest_storage_key(current->mm, vmaddr, &key); if (rc) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -319,7 +319,7 @@ retry: mmap_read_lock(current->mm); rc = reset_guest_reference_bit(current->mm, vmaddr); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) m3 & SSKE_MC); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } @@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) rc = cond_set_guest_storage_key(current->mm, vmaddr, key, NULL, nq, mr, mc); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index eb382ceaa116..7c988994931f 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -64,6 +64,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, break; if (state.reliable && !addr) { pr_err("unwind state reliable but addr is 0\n"); + kfree(bt); return -EINVAL; } sprint_symbol(sym, addr); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index aebf9183bedd..4c8c063bce5b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -476,7 +476,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { fault = VM_FAULT_SIGNAL; if (flags & FAULT_FLAG_RETRY_NOWAIT) @@ -486,21 +486,7 @@ retry: if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } if (fault & VM_FAULT_RETRY) { if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) { diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 190357ff86b3..373542ca1113 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -649,7 +649,7 @@ retry: rc = vmaddr; goto out_up; } - if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, + if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked)) { rc = -EFAULT; goto out_up; @@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, BUG_ON(gmap_is_shadow(gmap)); fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0; - if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) + if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked)) return -EFAULT; if (unlocked) /* lost mmap_lock, caller has to retry __gmap_translate */ @@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], } EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->vma; + + split_huge_pmd(vma, pmd, addr); + return 0; +} + +static const struct mm_walk_ops thp_split_walk_ops = { + .pmd_entry = thp_split_walk_pmd_entry, +}; + static inline void thp_split_mm(struct mm_struct *mm) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct vm_area_struct *vma; - unsigned long addr; for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { - for (addr = vma->vm_start; - addr < vma->vm_end; - addr += PAGE_SIZE) - follow_page(vma, addr, FOLL_SPLIT); vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE; + walk_page_vma(vma, &thp_split_walk_ops, NULL); } mm->def_flags |= VM_NOHUGEPAGE; -#endif } +#else +static inline void thp_split_mm(struct mm_struct *mm) +{ +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * Remove all empty zero pages from the mapping for lazy refaulting diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile deleted file mode 100644 index c89d26f4f77d..000000000000 --- a/arch/s390/numa/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-y += numa.o diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9fc2b010e938..d20927128fce 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -1,75 +1,77 @@ # SPDX-License-Identifier: GPL-2.0 config SUPERH def_bool y + select ARCH_32BIT_OFF_T + select ARCH_HAVE_CUSTOM_GPIO_H + select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU + select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT - select HAVE_PATA_PLATFORM + select ARCH_WANT_IPC_PARSE_VERSION select CLKDEV_LOOKUP + select CPU_NO_EFFICIENT_FFS select DMA_DECLARE_COHERENT - select HAVE_IDE if HAS_IOPORT_MAP - select HAVE_OPROFILE + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST + select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP if PCI + select GENERIC_SCHED_CLOCK + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_SMP_IDLE_THREAD + select GUP_GET_PTE_LOW_HIGH if X2TLB + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_PERF_EVENTS + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE - select HAVE_FAST_GUP if MMU - select ARCH_HAVE_CUSTOM_GPIO_H - select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) - select ARCH_HAS_GCOV_PROFILE_ALL - select PERF_USE_VMALLOC select HAVE_DEBUG_KMEMLEAK - select HAVE_KERNEL_GZIP - select CPU_NO_EFFICIENT_FFS + select HAVE_DYNAMIC_FTRACE + select HAVE_FAST_GUP if MMU + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_HW_BREAKPOINT + select HAVE_IDE if HAS_IOPORT_MAP + select HAVE_IOREMAP_PROT if MMU && !X2TLB select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA - select HAVE_KERNEL_XZ select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_MIXED_BREAKPOINTS_REGS + select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER + select HAVE_NMI + select HAVE_OPROFILE + select HAVE_PATA_PLATFORM + select HAVE_PERF_EVENTS + select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_UID16 - select ARCH_WANT_IPC_PARSE_VERSION + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS - select HAVE_REGS_AND_STACK_ACCESS_API - select MAY_HAVE_SPARSE_IRQ select IRQ_FORCED_THREADING - select RTC_LIB - select GENERIC_ATOMIC64 - select GENERIC_IRQ_SHOW - select GENERIC_SMP_IDLE_THREAD - select GENERIC_IDLE_POLL_SETUP - select GENERIC_CLOCKEVENTS - select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST - select GENERIC_PCI_IOMAP if PCI - select GENERIC_SCHED_CLOCK - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER - select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER + select MAY_HAVE_SPARSE_IRQ select MODULES_USE_ELF_RELA + select NEED_SG_DMA_LENGTH + select NO_DMA if !MMU && !DMA_COHERENT select NO_GENERIC_PCI_IOPORT_MAP if PCI - select OLD_SIGSUSPEND select OLD_SIGACTION + select OLD_SIGSUSPEND select PCI_DOMAINS if PCI - select HAVE_ARCH_AUDITSYSCALL - select HAVE_FUTEX_CMPXCHG if FUTEX - select HAVE_NMI - select NEED_SG_DMA_LENGTH - select ARCH_HAS_GIGANTIC_PAGE - select ARCH_32BIT_OFF_T - select GUP_GET_PTE_LOW_HIGH if X2TLB - select HAVE_KPROBES - select HAVE_KRETPROBES - select HAVE_IOREMAP_PROT if MMU && !X2TLB - select HAVE_FUNCTION_TRACER - select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_DYNAMIC_FTRACE - select ARCH_WANT_IPC_PARSE_VERSION - select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_ARCH_KGDB - select HAVE_HW_BREAKPOINT - select HAVE_MIXED_BREAKPOINTS_REGS select PERF_EVENTS - select ARCH_HIBERNATION_POSSIBLE if MMU + select PERF_USE_VMALLOC + select RTC_LIB select SPARSE_IRQ - select HAVE_STACKPROTECTOR help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast @@ -123,8 +125,8 @@ config ARCH_HAS_ILOG2_U64 config NO_IOPORT_MAP def_bool !PCI - depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ - !SH_HP6XX && !SH_SOLUTION_ENGINE + depends on !SH_SH4202_MICRODEV && !SH_SHMIN && !SH_HP6XX && \ + !SH_SOLUTION_ENGINE config IO_TRAPPED bool @@ -136,8 +138,10 @@ config DMA_COHERENT bool config DMA_NONCOHERENT - def_bool !DMA_COHERENT + def_bool !NO_DMA && !DMA_COHERENT + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select DMA_DIRECT_REMAP config PGTABLE_LEVELS default 3 if X2TLB @@ -630,7 +634,7 @@ config SMP Y to "Enhanced Real Time Clock Support", below. See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO - available at <http://www.tldp.org/docs.html#howto>. + available at <https://www.tldp.org/docs.html#howto>. If you don't know what to do here, say N. @@ -726,7 +730,6 @@ config ZERO_PAGE_OFFSET config BOOT_LINK_OFFSET hex default "0x00210000" if SH_SHMIN - default "0x00400000" if SH_CAYMAN default "0x00810000" if SH_7780_SOLUTION_ENGINE default "0x009e0000" if SH_TITAN default "0x01800000" if SH_SDK7780 diff --git a/arch/sh/Makefile b/arch/sh/Makefile index da9cf952f33c..2faebfd72eca 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -15,11 +15,7 @@ ifneq ($(SUBARCH),$(ARCH)) endif endif -ifeq ($(ARCH),sh) KBUILD_DEFCONFIG := shx3_defconfig -else -KBUILD_DEFCONFIG := cayman_defconfig -endif isa-y := any isa-$(CONFIG_SH_DSP) := sh @@ -143,7 +139,6 @@ machdir-$(CONFIG_SH_SH7763RDP) += mach-sh7763rdp machdir-$(CONFIG_SH_SH4202_MICRODEV) += mach-microdev machdir-$(CONFIG_SH_LANDISK) += mach-landisk machdir-$(CONFIG_SH_LBOX_RE2) += mach-lboxre2 -machdir-$(CONFIG_SH_CAYMAN) += mach-cayman machdir-$(CONFIG_SH_RSK) += mach-rsk ifneq ($(machdir-y),) diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index fb0ca0c1efe1..83bcb6d2daca 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -340,12 +340,6 @@ config SH_MAGIC_PANEL_R2 help Select Magic Panel R2 if configuring for Magic Panel R2. -config SH_CAYMAN - bool "Hitachi Cayman" - depends on CPU_SUBTYPE_SH5_101 || CPU_SUBTYPE_SH5_103 - select HAVE_PCI - select ARCH_MIGHT_HAVE_PC_SERIO - config SH_POLARIS bool "SMSC Polaris" select CPU_HAS_IPR_IRQ diff --git a/arch/sh/boards/board-sh2007.c b/arch/sh/boards/board-sh2007.c index ef9c87deeb08..6ea85e480851 100644 --- a/arch/sh/boards/board-sh2007.c +++ b/arch/sh/boards/board-sh2007.c @@ -126,14 +126,14 @@ static void __init sh2007_init_irq(void) */ static void __init sh2007_setup(char **cmdline_p) { - printk(KERN_INFO "SH-2007 Setup..."); + pr_info("SH-2007 Setup..."); /* setup wait control registers for area 5 */ __raw_writel(CS5BCR_D, CS5BCR); __raw_writel(CS5WCR_D, CS5WCR); __raw_writel(CS5PCR_D, CS5PCR); - printk(KERN_INFO " done.\n"); + pr_cont(" done.\n"); } /* diff --git a/arch/sh/boards/mach-cayman/Makefile b/arch/sh/boards/mach-cayman/Makefile deleted file mode 100644 index 775a4be57434..000000000000 --- a/arch/sh/boards/mach-cayman/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the Hitachi Cayman specific parts of the kernel -# -obj-y := setup.o irq.o panic.o diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c deleted file mode 100644 index 0305d0b51730..000000000000 --- a/arch/sh/boards/mach-cayman/irq.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support - * - * This file handles the board specific parts of the Cayman interrupt system - * - * Copyright (C) 2002 Stuart Menefy - */ -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/signal.h> -#include <cpu/irq.h> -#include <asm/page.h> - -/* Setup for the SMSC FDC37C935 / LAN91C100FD */ -#define SMSC_IRQ IRQ_IRL1 - -/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */ -#define PCI2_IRQ IRQ_IRL3 - -unsigned long epld_virt; - -#define EPLD_BASE 0x04002000 -#define EPLD_STATUS_BASE (epld_virt + 0x10) -#define EPLD_MASK_BASE (epld_virt + 0x20) - -/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto - the same SH-5 interrupt */ - -static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id) -{ - printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n"); - return IRQ_NONE; -} - -static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id) -{ - printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq); - return IRQ_NONE; -} - -static void enable_cayman_irq(struct irq_data *data) -{ - unsigned int irq = data->irq; - unsigned long flags; - unsigned long mask; - unsigned int reg; - unsigned char bit; - - irq -= START_EXT_IRQS; - reg = EPLD_MASK_BASE + ((irq / 8) << 2); - bit = 1<<(irq % 8); - local_irq_save(flags); - mask = __raw_readl(reg); - mask |= bit; - __raw_writel(mask, reg); - local_irq_restore(flags); -} - -static void disable_cayman_irq(struct irq_data *data) -{ - unsigned int irq = data->irq; - unsigned long flags; - unsigned long mask; - unsigned int reg; - unsigned char bit; - - irq -= START_EXT_IRQS; - reg = EPLD_MASK_BASE + ((irq / 8) << 2); - bit = 1<<(irq % 8); - local_irq_save(flags); - mask = __raw_readl(reg); - mask &= ~bit; - __raw_writel(mask, reg); - local_irq_restore(flags); -} - -struct irq_chip cayman_irq_type = { - .name = "Cayman-IRQ", - .irq_unmask = enable_cayman_irq, - .irq_mask = disable_cayman_irq, -}; - -int cayman_irq_demux(int evt) -{ - int irq = intc_evt_to_irq[evt]; - - if (irq == SMSC_IRQ) { - unsigned long status; - int i; - - status = __raw_readl(EPLD_STATUS_BASE) & - __raw_readl(EPLD_MASK_BASE) & 0xff; - if (status == 0) { - irq = -1; - } else { - for (i=0; i<8; i++) { - if (status & (1<<i)) - break; - } - irq = START_EXT_IRQS + i; - } - } - - if (irq == PCI2_IRQ) { - unsigned long status; - int i; - - status = __raw_readl(EPLD_STATUS_BASE + 3 * sizeof(u32)) & - __raw_readl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff; - if (status == 0) { - irq = -1; - } else { - for (i=0; i<8; i++) { - if (status & (1<<i)) - break; - } - irq = START_EXT_IRQS + (3 * 8) + i; - } - } - - return irq; -} - -void init_cayman_irq(void) -{ - int i; - - epld_virt = (unsigned long)ioremap(EPLD_BASE, 1024); - if (!epld_virt) { - printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n"); - return; - } - - for (i = 0; i < NR_EXT_IRQS; i++) { - irq_set_chip_and_handler(START_EXT_IRQS + i, - &cayman_irq_type, handle_level_irq); - } - - /* Setup the SMSC interrupt */ - if (request_irq(SMSC_IRQ, cayman_interrupt_smsc, 0, "Cayman SMSC Mux", - NULL)) - pr_err("Failed to register Cayman SMSC Mux interrupt\n"); - if (request_irq(PCI2_IRQ, cayman_interrupt_pci2, 0, "Cayman PCI2 Mux", - NULL)) - pr_err("Failed to register Cayman PCI2 Mux interrupt\n"); -} diff --git a/arch/sh/boards/mach-cayman/panic.c b/arch/sh/boards/mach-cayman/panic.c deleted file mode 100644 index cfc46314e7d9..000000000000 --- a/arch/sh/boards/mach-cayman/panic.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2003 Richard Curnow, SuperH UK Limited - */ - -#include <linux/kernel.h> -#include <linux/io.h> -#include <cpu/registers.h> - -/* THIS IS A PHYSICAL ADDRESS */ -#define HDSP2534_ADDR (0x04002100) - -static void poor_mans_delay(void) -{ - int i; - - for (i = 0; i < 2500000; i++) - cpu_relax(); -} - -static void show_value(unsigned long x) -{ - int i; - unsigned nibble; - for (i = 0; i < 8; i++) { - nibble = ((x >> (i * 4)) & 0xf); - - __raw_writeb(nibble + ((nibble > 9) ? 55 : 48), - HDSP2534_ADDR + 0xe0 + ((7 - i) << 2)); - } -} - -void -panic_handler(unsigned long panicPC, unsigned long panicSSR, - unsigned long panicEXPEVT) -{ - while (1) { - /* This piece of code displays the PC on the LED display */ - show_value(panicPC); - poor_mans_delay(); - show_value(panicSSR); - poor_mans_delay(); - show_value(panicEXPEVT); - poor_mans_delay(); - } -} diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c deleted file mode 100644 index 8ef76e288da0..000000000000 --- a/arch/sh/boards/mach-cayman/setup.c +++ /dev/null @@ -1,181 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * arch/sh/mach-cayman/setup.c - * - * SH5 Cayman support - * - * Copyright (C) 2002 David J. Mckay & Benedict Gaster - * Copyright (C) 2003 - 2007 Paul Mundt - */ -#include <linux/init.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <cpu/irq.h> - -/* - * Platform Dependent Interrupt Priorities. - */ - -/* Using defaults defined in irq.h */ -#define RES NO_PRIORITY /* Disabled */ -#define IR0 IRL0_PRIORITY /* IRLs */ -#define IR1 IRL1_PRIORITY -#define IR2 IRL2_PRIORITY -#define IR3 IRL3_PRIORITY -#define PCA INTA_PRIORITY /* PCI Ints */ -#define PCB INTB_PRIORITY -#define PCC INTC_PRIORITY -#define PCD INTD_PRIORITY -#define SER TOP_PRIORITY -#define ERR TOP_PRIORITY -#define PW0 TOP_PRIORITY -#define PW1 TOP_PRIORITY -#define PW2 TOP_PRIORITY -#define PW3 TOP_PRIORITY -#define DM0 NO_PRIORITY /* DMA Ints */ -#define DM1 NO_PRIORITY -#define DM2 NO_PRIORITY -#define DM3 NO_PRIORITY -#define DAE NO_PRIORITY -#define TU0 TIMER_PRIORITY /* TMU Ints */ -#define TU1 NO_PRIORITY -#define TU2 NO_PRIORITY -#define TI2 NO_PRIORITY -#define ATI NO_PRIORITY /* RTC Ints */ -#define PRI NO_PRIORITY -#define CUI RTC_PRIORITY -#define ERI SCIF_PRIORITY /* SCIF Ints */ -#define RXI SCIF_PRIORITY -#define BRI SCIF_PRIORITY -#define TXI SCIF_PRIORITY -#define ITI TOP_PRIORITY /* WDT Ints */ - -/* Setup for the SMSC FDC37C935 */ -#define SMSC_SUPERIO_BASE 0x04000000 -#define SMSC_CONFIG_PORT_ADDR 0x3f0 -#define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR -#define SMSC_DATA_PORT_ADDR 0x3f1 - -#define SMSC_ENTER_CONFIG_KEY 0x55 -#define SMSC_EXIT_CONFIG_KEY 0xaa - -#define SMCS_LOGICAL_DEV_INDEX 0x07 -#define SMSC_DEVICE_ID_INDEX 0x20 -#define SMSC_DEVICE_REV_INDEX 0x21 -#define SMSC_ACTIVATE_INDEX 0x30 -#define SMSC_PRIMARY_BASE_INDEX 0x60 -#define SMSC_SECONDARY_BASE_INDEX 0x62 -#define SMSC_PRIMARY_INT_INDEX 0x70 -#define SMSC_SECONDARY_INT_INDEX 0x72 - -#define SMSC_IDE1_DEVICE 1 -#define SMSC_KEYBOARD_DEVICE 7 -#define SMSC_CONFIG_REGISTERS 8 - -#define SMSC_SUPERIO_READ_INDEXED(index) ({ \ - outb((index), SMSC_INDEX_PORT_ADDR); \ - inb(SMSC_DATA_PORT_ADDR); }) -#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \ - outb((index), SMSC_INDEX_PORT_ADDR); \ - outb((val), SMSC_DATA_PORT_ADDR); }) - -#define IDE1_PRIMARY_BASE 0x01f0 -#define IDE1_SECONDARY_BASE 0x03f6 - -unsigned long smsc_superio_virt; - -int platform_int_priority[NR_INTC_IRQS] = { - IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ - RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ - PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ - RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ - TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ - RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ - RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ - RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ -}; - -static int __init smsc_superio_setup(void) -{ - unsigned char devid, devrev; - - smsc_superio_virt = (unsigned long)ioremap(SMSC_SUPERIO_BASE, 1024); - if (!smsc_superio_virt) { - panic("Unable to remap SMSC SuperIO\n"); - } - - /* Initially the chip is in run state */ - /* Put it into configuration state */ - outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); - outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); - - /* Read device ID info */ - devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX); - devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX); - printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev); - - /* Select the keyboard device */ - SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); - - /* enable it */ - SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - - /* Select the interrupts */ - /* On a PC keyboard is IRQ1, mouse is IRQ12 */ - SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); - SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); - - /* - * Only IDE1 exists on the Cayman - */ - - /* Power it on */ - SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22); - - SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); - SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - - SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8, - SMSC_PRIMARY_BASE_INDEX + 0); - SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff, - SMSC_PRIMARY_BASE_INDEX + 1); - - SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8, - SMSC_SECONDARY_BASE_INDEX + 0); - SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff, - SMSC_SECONDARY_BASE_INDEX + 1); - - SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX); - - SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, - SMCS_LOGICAL_DEV_INDEX); - - SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ - SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ - SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ - SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ - - /* Exit the configuration state */ - outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); - - return 0; -} -device_initcall(smsc_superio_setup); - -static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len) -{ - if (port < 0x400) { - extern unsigned long smsc_superio_virt; - return (void __iomem *)((port << 2) | smsc_superio_virt); - } - - return (void __iomem *)port; -} - -extern void init_cayman_irq(void); - -static struct sh_machine_vector mv_cayman __initmv = { - .mv_name = "Hitachi Cayman", - .mv_ioport_map = cayman_ioport_map, - .mv_init_irq = init_cayman_irq, -}; diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 16b4d8b0bb85..2c44b94f82fb 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { + /* I/O port identity mapping */ + __set_io_port_base(0); + /* LED ON */ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index ad0e2403e56f..589d2d8a573d 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -28,10 +28,7 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ $(CONFIG_BOOT_LINK_OFFSET)]') endif -ifeq ($(CONFIG_MCOUNT),y) -ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) -endif +ccflags-remove-$(CONFIG_MCOUNT) += -pg LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ -T $(obj)/../../kernel/vmlinux.lds diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig deleted file mode 100644 index 911437c163c7..000000000000 --- a/arch/sh/configs/cayman_defconfig +++ /dev/null @@ -1,66 +0,0 @@ -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_FORCE_MAX_ZONEORDER=11 -CONFIG_MEMORY_START=0x80000000 -CONFIG_MEMORY_SIZE=0x00400000 -CONFIG_FLATMEM_MANUAL=y -CONFIG_CACHE_OFF=y -CONFIG_SH_PCLK_FREQ=50000000 -CONFIG_HEARTBEAT=y -CONFIG_PREEMPT=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -# CONFIG_IPV6 is not set -# CONFIG_FW_LOADER is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -CONFIG_HW_RANDOM=y -CONFIG_I2C=m -CONFIG_WATCHDOG=y -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_SH_MOBILE_LCDC=m -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FONTS=y -CONFIG_FONT_8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -# CONFIG_LOGO_SUPERH_MONO is not set -# CONFIG_LOGO_SUPERH_VGA16 is not set -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_MINIX_FS=y -CONFIG_ROMFS_FS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SCHEDSTATS=y -CONFIG_FRAME_POINTER=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index ae067e0b15e3..6a82c7b8ff32 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index a5b865a75d22..9a988c347e9d 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig index a92db6694ce2..70e6605d7f7e 100644 --- a/arch/sh/configs/hp6xx_defconfig +++ b/arch/sh/configs/hp6xx_defconfig @@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7709=y diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 567af752b1bb..ba6ec042606f 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index 10f6d371ce2c..05e4ac6fed5f 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig index ed84d1303acf..c65667d00313 100644 --- a/arch/sh/configs/microdev_defconfig +++ b/arch/sh/configs/microdev_defconfig @@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH4_202=y diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index 37e9521a99e5..a24cf8cd2cea 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index c97ec60cff27..e922659fdadb 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index 55fce65eb454..5978866358ec 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index 6a3cfe08295f..fc9c22152b08 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index 2b3d7d280672..ff3fd6787fd6 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 21a43f14ffac..ff5bb4489922 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -18,7 +18,6 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y # CONFIG_ELF_CORE is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 4e794e719a28..5d6c19338ebf 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig index 3264415a5931..71a672c30716 100644 --- a/arch/sh/configs/se7619_defconfig +++ b/arch/sh/configs/se7619_defconfig @@ -1,7 +1,6 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_ELF_CORE is not set diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index 4496b94b7d88..ed00a6eeadf5 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -2,7 +2,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig index b23f67542728..3f1c13799d79 100644 --- a/arch/sh/configs/se7750_defconfig +++ b/arch/sh/configs/se7750_defconfig @@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig index 162343683937..4a024065bb75 100644 --- a/arch/sh/configs/se7751_defconfig +++ b/arch/sh/configs/se7751_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig index 360592d63a2f..8422599cfb04 100644 --- a/arch/sh/configs/secureedge5410_defconfig +++ b/arch/sh/configs/secureedge5410_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 87db9a84b5ec..f0073ed39947 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=m diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig index 08426913c0e3..0d814770b07f 100644 --- a/arch/sh/configs/sh7710voipgw_defconfig +++ b/arch/sh/configs/sh7710voipgw_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index d0933a9b9799..a2700ab165af 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index d0a0aa74cecf..26c5fd02c87a 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig index a27b129b93c5..c0b6f40d01cc 100644 --- a/arch/sh/configs/shmin_defconfig +++ b/arch/sh/configs/shmin_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_BUG is not set diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 4ec961ace688..ba887f1351be 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile index a5c1e9066f83..d313fd3ce709 100644 --- a/arch/sh/drivers/pci/Makefile +++ b/arch/sh/drivers/pci/Makefile @@ -25,4 +25,3 @@ obj-$(CONFIG_SH_7780_SOLUTION_ENGINE) += fixups-sdk7780.o obj-$(CONFIG_SH_TITAN) += fixups-titan.o obj-$(CONFIG_SH_LANDISK) += fixups-landisk.o obj-$(CONFIG_SH_LBOX_RE2) += fixups-rts7751r2d.o -obj-$(CONFIG_SH_CAYMAN) += fixups-cayman.o diff --git a/arch/sh/drivers/pci/common.c b/arch/sh/drivers/pci/common.c index fe163ecd0719..2fd2b77e12ce 100644 --- a/arch/sh/drivers/pci/common.c +++ b/arch/sh/drivers/pci/common.c @@ -54,7 +54,7 @@ int __init pci_is_66mhz_capable(struct pci_channel *hose, int cap66 = -1; u16 stat; - printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n"); + pr_info("PCI: Checking 66MHz capabilities...\n"); for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { if (PCI_FUNC(pci_devfn)) @@ -134,7 +134,7 @@ unsigned int pcibios_handle_status_errors(unsigned long addr, pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT, 1); - printk("\n"); + pr_cont("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT; } @@ -143,7 +143,7 @@ unsigned int pcibios_handle_status_errors(unsigned long addr, printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); - printk("\n"); + pr_cont("\n"); cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY; diff --git a/arch/sh/drivers/pci/fixups-cayman.c b/arch/sh/drivers/pci/fixups-cayman.c deleted file mode 100644 index c797bfbe2e98..000000000000 --- a/arch/sh/drivers/pci/fixups-cayman.c +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <cpu/irq.h> -#include "pci-sh5.h" - -int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - int result = -1; - - /* The complication here is that the PCI IRQ lines from the Cayman's 2 - 5V slots get into the CPU via a different path from the IRQ lines - from the 3 3.3V slots. Thus, we have to detect whether the card's - interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' - at the point where we cross from 5V to 3.3V is not the normal case. - - The added complication is that we don't know that the 5V slots are - always bus 2, because a card containing a PCI-PCI bridge may be - plugged into a 3.3V slot, and this changes the bus numbering. - - Also, the Cayman has an intermediate PCI bus that goes a custom - expansion board header (and to the secondary bridge). This bus has - never been used in practice. - - The 1ary onboard PCI-PCI bridge is device 3 on bus 0 - The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of - the 1ary bridge. - */ - - struct slot_pin { - int slot; - int pin; - } path[4]; - int i=0; - - while (dev->bus->number > 0) { - - slot = path[i].slot = PCI_SLOT(dev->devfn); - pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; - i++; - if (i > 3) panic("PCI path to root bus too long!\n"); - } - - slot = PCI_SLOT(dev->devfn); - /* This is the slot on bus 0 through which the device is eventually - reachable. */ - - /* Now work back up. */ - if ((slot < 3) || (i == 0)) { - /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final - swizzle now. */ - result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1; - } else { - i--; - slot = path[i].slot; - pin = path[i].pin; - if (slot > 0) { - panic("PCI expansion bus device found - not handled!\n"); - } else { - if (i > 0) { - /* 5V slots */ - i--; - slot = path[i].slot; - pin = path[i].pin; - /* 'pin' was swizzled earlier wrt slot, don't do it again. */ - result = IRQ_P2INTA + (pin - 1); - } else { - /* IRQ for 2ary PCI-PCI bridge : unused */ - result = -1; - } - } - } - - return result; -} diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c index 287b3a68570c..9a624a6ee354 100644 --- a/arch/sh/drivers/pci/pci-sh7780.c +++ b/arch/sh/drivers/pci/pci-sh7780.c @@ -148,7 +148,7 @@ static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id) printk(KERN_DEBUG "PCI: system error received: "); pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1); - printk("\n"); + pr_cont("\n"); /* Deassert SERR */ __raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM); @@ -179,7 +179,7 @@ static int __init sh7780_pci_setup_irqs(struct pci_channel *hose) ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0, "PCI SERR interrupt", hose); if (unlikely(ret)) { - printk(KERN_ERR "PCI: Failed hooking SERR IRQ\n"); + pr_err("PCI: Failed hooking SERR IRQ\n"); return ret; } @@ -250,7 +250,7 @@ static int __init sh7780_pci_init(void) const char *type; int ret, i; - printk(KERN_NOTICE "PCI: Starting initialization.\n"); + pr_notice("PCI: Starting initialization.\n"); chan->reg_base = 0xfe040000; @@ -270,7 +270,7 @@ static int __init sh7780_pci_init(void) id = __raw_readw(chan->reg_base + PCI_VENDOR_ID); if (id != PCI_VENDOR_ID_RENESAS) { - printk(KERN_ERR "PCI: Unknown vendor ID 0x%04x.\n", id); + pr_err("PCI: Unknown vendor ID 0x%04x.\n", id); return -ENODEV; } @@ -281,14 +281,13 @@ static int __init sh7780_pci_init(void) (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" : NULL; if (unlikely(!type)) { - printk(KERN_ERR "PCI: Found an unsupported Renesas host " - "controller, device id 0x%04x.\n", id); + pr_err("PCI: Found an unsupported Renesas host controller, device id 0x%04x.\n", + id); return -EINVAL; } - printk(KERN_NOTICE "PCI: Found a Renesas %s host " - "controller, revision %d.\n", type, - __raw_readb(chan->reg_base + PCI_REVISION_ID)); + pr_notice("PCI: Found a Renesas %s host controller, revision %d.\n", + type, __raw_readb(chan->reg_base + PCI_REVISION_ID)); /* * Now throw it in to register initialization mode and @@ -395,9 +394,9 @@ static int __init sh7780_pci_init(void) sh7780_pci66_init(chan); - printk(KERN_NOTICE "PCI: Running at %dMHz.\n", - (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) ? - 66 : 33); + pr_notice("PCI: Running at %dMHz.\n", + (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) + ? 66 : 33); return 0; diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index c7784e156964..6ab0b7377f66 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -120,8 +120,7 @@ int register_pci_controller(struct pci_channel *hose) * Do not panic here but later - this might happen before console init. */ if (!hose->io_map_base) { - printk(KERN_WARNING - "registering PCI controller with io_map_base unset\n"); + pr_warn("registering PCI controller with io_map_base unset\n"); } /* @@ -145,7 +144,7 @@ out: for (--i; i >= 0; i--) release_resource(&hose->resources[i]); - printk(KERN_WARNING "Skipping PCI bus scan due to resource conflict\n"); + pr_warn("Skipping PCI bus scan due to resource conflict\n"); return -1; } @@ -213,8 +212,8 @@ pcibios_bus_report_status_early(struct pci_channel *hose, pci_devfn, PCI_STATUS, status & status_mask); if (warn) - printk("(%02x:%02x: %04X) ", current_bus, - pci_devfn, status); + pr_cont("(%02x:%02x: %04X) ", current_bus, pci_devfn, + status); } } @@ -249,7 +248,7 @@ pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask, pci_write_config_word(dev, PCI_STATUS, status & status_mask); if (warn) - printk("(%s: %04X) ", pci_name(dev), status); + pr_cont("(%s: %04X) ", pci_name(dev), status); } list_for_each_entry(dev, &bus->devices, bus_list) diff --git a/arch/sh/include/asm/adc.h b/arch/sh/include/asm/adc.h index 99ec66849559..feccfe639e38 100644 --- a/arch/sh/include/asm/adc.h +++ b/arch/sh/include/asm/adc.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ADC_H #define __ASM_ADC_H -#ifdef __KERNEL__ /* * Copyright (C) 2004 Andriy Skulysh */ @@ -10,5 +9,4 @@ int adc_single(unsigned int channel); -#endif /* __KERNEL__ */ #endif /* __ASM_ADC_H */ diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 34bfbcddcce0..468fba333e89 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -7,8 +7,6 @@ #ifndef __ASM_SH_ADDRSPACE_H #define __ASM_SH_ADDRSPACE_H -#ifdef __KERNEL__ - #include <cpu/addrspace.h> /* If this CPU supports segmentation, hook up the helpers */ @@ -62,5 +60,4 @@ #define P3_ADDR_MAX P4SEG #endif -#endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 445dd14c448a..450b5854d7c6 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_BITOPS_H #define __ASM_SH_BITOPS_H -#ifdef __KERNEL__ - #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif @@ -71,6 +69,4 @@ static inline unsigned long __ffs(unsigned long word) #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> -#endif /* __KERNEL__ */ - #endif /* __ASM_SH_BITOPS_H */ diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index 2408ac4873aa..a293343456af 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -8,7 +8,6 @@ */ #ifndef __ASM_SH_CACHE_H #define __ASM_SH_CACHE_H -#ifdef __KERNEL__ #include <linux/init.h> #include <cpu/cache.h> @@ -44,5 +43,4 @@ struct cache_info { unsigned long flags; }; #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHE_H */ diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index fe7400079b97..4486a865ff62 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H -#ifdef __KERNEL__ - #include <linux/mm.h> /* @@ -109,5 +107,4 @@ static inline void *sh_cacheop_vaddr(void *vaddr) return vaddr; } -#endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 4d5a21a891c0..17d23ae98c77 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -6,7 +6,6 @@ */ #ifndef __ASM_SH_DMA_H #define __ASM_SH_DMA_H -#ifdef __KERNEL__ #include <linux/spinlock.h> #include <linux/wait.h> @@ -144,5 +143,4 @@ extern int isa_dma_bridge_buggy; #define isa_dma_bridge_buggy (0) #endif -#endif /* __KERNEL__ */ #endif /* __ASM_SH_DMA_H */ diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 7661fb5d548a..2862d6d1cb64 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -90,7 +90,6 @@ typedef struct user_fpu_struct elf_fpregset_t; #endif #define ELF_ARCH EM_SH -#ifdef __KERNEL__ /* * This is used to ensure we don't load something for the wrong architecture. */ @@ -209,5 +208,4 @@ do { \ NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \ } while (0) -#endif /* __KERNEL__ */ #endif /* __ASM_SH_ELF_H */ diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 43cfaf929aa7..04584be8986c 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -37,11 +37,6 @@ struct user_regset; extern int do_fpu_inst(unsigned short, struct pt_regs *); extern int init_fpu(struct task_struct *); -extern int fpregs_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf); - static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) { if (task_thread_info(tsk)->status & TS_USEDFPU) { diff --git a/arch/sh/include/asm/freq.h b/arch/sh/include/asm/freq.h index 18133bf83738..87c23621b7ae 100644 --- a/arch/sh/include/asm/freq.h +++ b/arch/sh/include/asm/freq.h @@ -6,9 +6,7 @@ */ #ifndef __ASM_SH_FREQ_H #define __ASM_SH_FREQ_H -#ifdef __KERNEL__ #include <cpu/freq.h> -#endif /* __KERNEL__ */ #endif /* __ASM_SH_FREQ_H */ diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index b39cda09fb95..b70f3fce6ed7 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_FUTEX_H #define __ASM_SH_FUTEX_H -#ifdef __KERNEL__ - #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> @@ -71,5 +69,4 @@ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, return ret; } -#endif /* __KERNEL__ */ #endif /* __ASM_SH_FUTEX_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 26f0f9b4658b..6d5c6463bc07 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -17,13 +17,12 @@ #include <asm/cache.h> #include <asm/addrspace.h> #include <asm/machvec.h> +#include <asm/page.h> #include <linux/pgtable.h> #include <asm-generic/iomap.h> -#ifdef __KERNEL__ #define __IO_PREFIX generic #include <asm/io_generic.h> -#include <asm/io_trapped.h> #include <asm-generic/pci_iomap.h> #include <mach/mangle-port.h> @@ -243,125 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr, #define phys_to_virt(address) (__va(address)) #endif -/* - * On 32-bit SH, we traditionally have the whole physical address space - * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do - * not need to do anything but place the address in the proper segment. - * This is true for P1 and P2 addresses, as well as some P3 ones. - * However, most of the P3 addresses and newer cores using extended - * addressing need to map through page tables, so the ioremap() - * implementation becomes a bit more complicated. - * - * See arch/sh/mm/ioremap.c for additional notes on this. - * - * We cheat a bit and always return uncachable areas until we've fixed - * the drivers to handle caching properly. - * - * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply - * doesn't exist, so everything must go through page tables. - */ #ifdef CONFIG_MMU +void iounmap(void __iomem *addr); void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, pgprot_t prot, void *caller); -void iounmap(void __iomem *addr); - -static inline void __iomem * -__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) -{ - return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); -} - -static inline void __iomem * -__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) -{ -#ifdef CONFIG_29BIT - phys_addr_t last_addr = offset + size - 1; - - /* - * For P1 and P2 space this is trivial, as everything is already - * mapped. Uncached access for P1 addresses are done through P2. - * In the P3 case or for addresses outside of the 29-bit space, - * mapping must be done by the PMB or by using page tables. - */ - if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { - u64 flags = pgprot_val(prot); - - /* - * Anything using the legacy PTEA space attributes needs - * to be kicked down to page table mappings. - */ - if (unlikely(flags & _PAGE_PCC_MASK)) - return NULL; - if (unlikely(flags & _PAGE_CACHABLE)) - return (void __iomem *)P1SEGADDR(offset); - - return (void __iomem *)P2SEGADDR(offset); - } - - /* P4 above the store queues are always mapped. */ - if (unlikely(offset >= P3_ADDR_MAX)) - return (void __iomem *)P4SEGADDR(offset); -#endif - - return NULL; -} - -static inline void __iomem * -__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) -{ - void __iomem *ret; - - ret = __ioremap_trapped(offset, size); - if (ret) - return ret; - - ret = __ioremap_29bit(offset, size, prot); - if (ret) - return ret; - - return __ioremap(offset, size, prot); -} -#else -#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) -#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) -static inline void iounmap(void __iomem *addr) {} -#endif /* CONFIG_MMU */ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) { - return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); + return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, + __builtin_return_address(0)); } static inline void __iomem * ioremap_cache(phys_addr_t offset, unsigned long size) { - return __ioremap_mode(offset, size, PAGE_KERNEL); + return __ioremap_caller(offset, size, PAGE_KERNEL, + __builtin_return_address(0)); } #define ioremap_cache ioremap_cache #ifdef CONFIG_HAVE_IOREMAP_PROT -static inline void __iomem * -ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) +static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, + unsigned long flags) { - return __ioremap_mode(offset, size, __pgprot(flags)); + return __ioremap_caller(offset, size, __pgprot(flags), + __builtin_return_address(0)); } -#endif +#endif /* CONFIG_HAVE_IOREMAP_PROT */ -#ifdef CONFIG_IOREMAP_FIXED -extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); -extern int iounmap_fixed(void __iomem *); -extern void ioremap_fixed_init(void); -#else -static inline void __iomem * -ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) -{ - BUG(); - return NULL; -} - -static inline void ioremap_fixed_init(void) { } -static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } -#endif +#else /* CONFIG_MMU */ +#define iounmap(addr) do { } while (0) +#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset)) +#endif /* CONFIG_MMU */ #define ioremap_uc ioremap @@ -380,6 +292,4 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } int valid_phys_addr_range(phys_addr_t addr, size_t size); int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); -#endif /* __KERNEL__ */ - #endif /* __ASM_SH_IO_H */ diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 960545306afa..de8693fabb1d 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -12,8 +12,7 @@ enum die_val { }; /* arch/sh/kernel/dumpstack.c */ -extern void printk_address(unsigned long address, int reliable, - const char *loglvl); +extern void printk_address(unsigned long address, int reliable); extern void dump_mem(const char *str, const char *loglvl, unsigned long bottom, unsigned long top); diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 48e67d544d53..f664e51e8a15 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -8,7 +8,6 @@ #ifndef __ASM_SH_MMU_CONTEXT_H #define __ASM_SH_MMU_CONTEXT_H -#ifdef __KERNEL__ #include <cpu/mmu_context.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> @@ -177,5 +176,4 @@ static inline void disable_mmu(void) #define disable_mmu() do { } while (0) #endif -#endif /* __KERNEL__ */ #endif /* __ASM_SH_MMU_CONTEXT_H */ diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index cbaee1d1b673..6552a088dc97 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_MMZONE_H #define __ASM_SH_MMZONE_H -#ifdef __KERNEL__ - #ifdef CONFIG_NEED_MULTIPLE_NODES #include <linux/numa.h> @@ -44,5 +42,4 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, /* arch/sh/mm/init.c */ void __init allocate_pgdat(unsigned int nid); -#endif /* __KERNEL__ */ #endif /* __ASM_SH_MMZONE_H */ diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 10a36b1cf2ea..ad22e88c6657 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_PCI_H #define __ASM_SH_PCI_H -#ifdef __KERNEL__ - /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ @@ -96,6 +94,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } -#endif /* __KERNEL__ */ #endif /* __ASM_SH_PCI_H */ - diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index d44409413418..aa92cc933889 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -8,7 +8,6 @@ #ifndef __ASM_SH_PROCESSOR_32_H #define __ASM_SH_PROCESSOR_32_H -#ifdef __KERNEL__ #include <linux/compiler.h> #include <linux/linkage.h> @@ -203,5 +202,4 @@ static inline void prefetchw(const void *x) } #endif -#endif /* __KERNEL__ */ #endif /* __ASM_SH_PROCESSOR_32_H */ diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 33d1d28057cb..02e54a3335d6 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -24,8 +24,7 @@ typedef struct { #define USER_DS KERNEL_DS #endif -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sh/include/asm/smc37c93x.h b/arch/sh/include/asm/smc37c93x.h index f054c30a171a..891f2f8f2fd0 100644 --- a/arch/sh/include/asm/smc37c93x.h +++ b/arch/sh/include/asm/smc37c93x.h @@ -112,8 +112,8 @@ typedef struct uart_reg { #define FCR_RFRES 0x0200 /* Receiver FIFO reset */ #define FCR_TFRES 0x0400 /* Transmitter FIFO reset */ #define FCR_DMA 0x0800 /* DMA mode select */ -#define FCR_RTL 0x4000 /* Receiver triger (LSB) */ -#define FCR_RTM 0x8000 /* Receiver triger (MSB) */ +#define FCR_RTL 0x4000 /* Receiver trigger (LSB) */ +#define FCR_RTM 0x8000 /* Receiver trigger (MSB) */ /* Line Control Register */ diff --git a/arch/sh/include/asm/sparsemem.h b/arch/sh/include/asm/sparsemem.h index 4eb899751e45..4703cbe23844 100644 --- a/arch/sh/include/asm/sparsemem.h +++ b/arch/sh/include/asm/sparsemem.h @@ -2,16 +2,11 @@ #ifndef __ASM_SH_SPARSEMEM_H #define __ASM_SH_SPARSEMEM_H -#ifdef __KERNEL__ /* * SECTION_SIZE_BITS 2^N: how big each section will be - * MAX_PHYSADDR_BITS 2^N: how much physical address space we have - * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + * MAX_PHYSMEM_BITS 2^N: how much physical address space we have */ #define SECTION_SIZE_BITS 26 -#define MAX_PHYSADDR_BITS 32 #define MAX_PHYSMEM_BITS 32 -#endif - #endif /* __ASM_SH_SPARSEMEM_H */ diff --git a/arch/sh/include/asm/stacktrace.h b/arch/sh/include/asm/stacktrace.h index 50c173c0b9f5..4f98cdc64ec5 100644 --- a/arch/sh/include/asm/stacktrace.h +++ b/arch/sh/include/asm/stacktrace.h @@ -12,8 +12,6 @@ struct stacktrace_ops { void (*address)(void *data, unsigned long address, int reliable); - /* On negative return stop dumping */ - int (*stack)(void *data, char *name); }; void dump_trace(struct task_struct *tsk, struct pt_regs *regs, diff --git a/arch/sh/include/asm/string_32.h b/arch/sh/include/asm/string_32.h index 3558b1d7123e..a276b193d3b4 100644 --- a/arch/sh/include/asm/string_32.h +++ b/arch/sh/include/asm/string_32.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_STRING_H #define __ASM_SH_STRING_H -#ifdef __KERNEL__ - /* * Copyright (C) 1999 Niibe Yutaka * But consider these trivial functions to be public domain. @@ -28,32 +26,6 @@ static inline char *strcpy(char *__dest, const char *__src) return __xdest; } -#define __HAVE_ARCH_STRNCPY -static inline char *strncpy(char *__dest, const char *__src, size_t __n) -{ - register char *__xdest = __dest; - unsigned long __dummy; - - if (__n == 0) - return __xdest; - - __asm__ __volatile__( - "1:\n" - "mov.b @%1+, %2\n\t" - "mov.b %2, @%0\n\t" - "cmp/eq #0, %2\n\t" - "bt/s 2f\n\t" - " cmp/eq %5,%1\n\t" - "bf/s 1b\n\t" - " add #1, %0\n" - "2:" - : "=r" (__dest), "=r" (__src), "=&z" (__dummy) - : "0" (__dest), "1" (__src), "r" (__src+__n) - : "memory", "t"); - - return __xdest; -} - #define __HAVE_ARCH_STRCMP static inline int strcmp(const char *__cs, const char *__ct) { @@ -127,6 +99,4 @@ extern void *memchr(const void *__s, int __c, size_t __n); #define __HAVE_ARCH_STRLEN extern size_t strlen(const char *); -#endif /* __KERNEL__ */ - #endif /* __ASM_SH_STRING_H */ diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 0b5b8e75edac..cb51a7528384 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -40,10 +40,7 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - if (error) - regs->regs[0] = -error; - else - regs->regs[0] = val; + regs->regs[0] = (long) error ?: val; } static inline void syscall_get_arguments(struct task_struct *task, diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h index 9f9faf63b48c..5c555b864fe0 100644 --- a/arch/sh/include/asm/syscalls_32.h +++ b/arch/sh/include/asm/syscalls_32.h @@ -2,8 +2,6 @@ #ifndef __ASM_SH_SYSCALLS_32_H #define __ASM_SH_SYSCALLS_32_H -#ifdef __KERNEL__ - #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/types.h> @@ -26,5 +24,4 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, unsigned long thread_info_flags); -#endif /* __KERNEL__ */ #endif /* __ASM_SH_SYSCALLS_32_H */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 6404be69d5fa..243ea5150aa0 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -10,8 +10,6 @@ * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ -#ifdef __KERNEL__ - #include <asm/page.h> /* @@ -170,7 +168,4 @@ static inline unsigned int get_thread_fault_code(void) } #endif /* !__ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - #endif /* __ASM_SH_THREAD_INFO_H */ diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h index 624cf55acc27..5d7ddc092afd 100644 --- a/arch/sh/include/asm/uaccess_32.h +++ b/arch/sh/include/asm/uaccess_32.h @@ -26,6 +26,9 @@ do { \ case 4: \ __get_user_asm(x, ptr, retval, "l"); \ break; \ + case 8: \ + __get_user_u64(x, ptr, retval); \ + break; \ default: \ __get_user_unknown(); \ break; \ @@ -66,6 +69,56 @@ do { \ extern void __get_user_unknown(void); +#if defined(CONFIG_CPU_LITTLE_ENDIAN) +#define __get_user_u64(x, addr, err) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov.l %2,%R1\n\t" \ + "mov.l %T2,%S1\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov #0,%R1\n\t" \ + "mov #0,%S1\n\t" \ + "mov.l 4f, %0\n\t" \ + "jmp @%0\n\t" \ + " mov %3, %0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".long 1b + 2, 3b\n\t" \ + ".previous" \ + :"=&r" (err), "=&r" (x) \ + :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); }) +#else +#define __get_user_u64(x, addr, err) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov.l %2,%S1\n\t" \ + "mov.l %T2,%R1\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov #0,%S1\n\t" \ + "mov #0,%R1\n\t" \ + "mov.l 4f, %0\n\t" \ + "jmp @%0\n\t" \ + " mov %3, %0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".long 1b + 2, 3b\n\t" \ + ".previous" \ + :"=&r" (err), "=&r" (x) \ + :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); }) +#endif + #define __put_user_size(x,ptr,size,retval) \ do { \ retval = 0; \ diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index cecd0fc507f9..b9ca4c99f046 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h @@ -8,7 +8,6 @@ */ #ifndef __ASM_SH_WATCHDOG_H #define __ASM_SH_WATCHDOG_H -#ifdef __KERNEL__ #include <linux/types.h> #include <linux/io.h> @@ -157,5 +156,4 @@ static inline void sh_wdt_write_csr(__u8 val) __raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR); } #endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */ -#endif /* __KERNEL__ */ #endif /* __ASM_SH_WATCHDOG_H */ diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index b0f5574b6228..aa0fbc9202b1 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -47,5 +47,3 @@ obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o obj-$(CONFIG_DMA_NONCOHERENT) += dma-coherent.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o - -ccflags-y := -Werror diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 845543780cc5..08e1af63edd9 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -376,148 +376,148 @@ static void print_sh_insn(u32 memaddr, u16 insn) } ok: - printk("%-8s ", op->name); + pr_cont("%-8s ", op->name); lastsp = (op->arg[0] == A_END); disp_pc = 0; for (n = 0; n < 6 && op->arg[n] != A_END; n++) { if (n && op->arg[1] != A_END) - printk(", "); + pr_cont(", "); switch (op->arg[n]) { case A_IMM: - printk("#%d", (char)(imm)); + pr_cont("#%d", (char)(imm)); break; case A_R0: - printk("r0"); + pr_cont("r0"); break; case A_REG_N: - printk("r%d", rn); + pr_cont("r%d", rn); break; case A_INC_N: - printk("@r%d+", rn); + pr_cont("@r%d+", rn); break; case A_DEC_N: - printk("@-r%d", rn); + pr_cont("@-r%d", rn); break; case A_IND_N: - printk("@r%d", rn); + pr_cont("@r%d", rn); break; case A_DISP_REG_N: - printk("@(%d,r%d)", imm, rn); + pr_cont("@(%d,r%d)", imm, rn); break; case A_REG_M: - printk("r%d", rm); + pr_cont("r%d", rm); break; case A_INC_M: - printk("@r%d+", rm); + pr_cont("@r%d+", rm); break; case A_DEC_M: - printk("@-r%d", rm); + pr_cont("@-r%d", rm); break; case A_IND_M: - printk("@r%d", rm); + pr_cont("@r%d", rm); break; case A_DISP_REG_M: - printk("@(%d,r%d)", imm, rm); + pr_cont("@(%d,r%d)", imm, rm); break; case A_REG_B: - printk("r%d_bank", rb); + pr_cont("r%d_bank", rb); break; case A_DISP_PC: disp_pc = 1; disp_pc_addr = imm + 4 + (memaddr & relmask); - printk("%08x <%pS>", disp_pc_addr, - (void *)disp_pc_addr); + pr_cont("%08x <%pS>", disp_pc_addr, + (void *)disp_pc_addr); break; case A_IND_R0_REG_N: - printk("@(r0,r%d)", rn); + pr_cont("@(r0,r%d)", rn); break; case A_IND_R0_REG_M: - printk("@(r0,r%d)", rm); + pr_cont("@(r0,r%d)", rm); break; case A_DISP_GBR: - printk("@(%d,gbr)",imm); + pr_cont("@(%d,gbr)", imm); break; case A_R0_GBR: - printk("@(r0,gbr)"); + pr_cont("@(r0,gbr)"); break; case A_BDISP12: case A_BDISP8: - printk("%08x", imm + memaddr); + pr_cont("%08x", imm + memaddr); break; case A_SR: - printk("sr"); + pr_cont("sr"); break; case A_GBR: - printk("gbr"); + pr_cont("gbr"); break; case A_VBR: - printk("vbr"); + pr_cont("vbr"); break; case A_SSR: - printk("ssr"); + pr_cont("ssr"); break; case A_SPC: - printk("spc"); + pr_cont("spc"); break; case A_MACH: - printk("mach"); + pr_cont("mach"); break; case A_MACL: - printk("macl"); + pr_cont("macl"); break; case A_PR: - printk("pr"); + pr_cont("pr"); break; case A_SGR: - printk("sgr"); + pr_cont("sgr"); break; case A_DBR: - printk("dbr"); + pr_cont("dbr"); break; case FD_REG_N: case F_REG_N: - printk("fr%d", rn); + pr_cont("fr%d", rn); break; case F_REG_M: - printk("fr%d", rm); + pr_cont("fr%d", rm); break; case DX_REG_N: if (rn & 1) { - printk("xd%d", rn & ~1); + pr_cont("xd%d", rn & ~1); break; } /* else, fall through */ case D_REG_N: - printk("dr%d", rn); + pr_cont("dr%d", rn); break; case DX_REG_M: if (rm & 1) { - printk("xd%d", rm & ~1); + pr_cont("xd%d", rm & ~1); break; } /* else, fall through */ case D_REG_M: - printk("dr%d", rm); + pr_cont("dr%d", rm); break; case FPSCR_M: case FPSCR_N: - printk("fpscr"); + pr_cont("fpscr"); break; case FPUL_M: case FPUL_N: - printk("fpul"); + pr_cont("fpul"); break; case F_FR0: - printk("fr0"); + pr_cont("fr0"); break; case V_REG_N: - printk("fv%d", rn*4); + pr_cont("fv%d", rn*4); break; case V_REG_M: - printk("fv%d", rm*4); + pr_cont("fv%d", rm*4); break; case XMTRX_M4: - printk("xmtrx"); + pr_cont("xmtrx"); break; default: return; @@ -532,7 +532,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) else __get_user(val, (u32 *)disp_pc_addr); - printk(" ! %08x <%pS>", val, (void *)val); + pr_cont(" ! %08x <%pS>", val, (void *)val); } return; @@ -541,7 +541,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) } - printk(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]); + pr_info(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]); } void show_code(struct pt_regs *regs) @@ -552,20 +552,21 @@ void show_code(struct pt_regs *regs) if (regs->pc & 0x1) return; - printk("Code:\n"); + pr_info("Code:\n"); for (i = -3 ; i < 6 ; i++) { unsigned short insn; if (__get_user(insn, pc + i)) { - printk(" (Bad address in pc)\n"); + pr_err(" (Bad address in pc)\n"); break; } - printk("%s%08lx: ", (i ? " ": "->"), (unsigned long)(pc + i)); + pr_info("%s%08lx: ", (i ? " " : "->"), + (unsigned long)(pc + i)); print_sh_insn((unsigned long)(pc + i), insn); - printk("\n"); + pr_cont("\n"); } - printk("\n"); + pr_info("\n"); } diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c index d4811691b93c..cd46a9825e3c 100644 --- a/arch/sh/kernel/dma-coherent.c +++ b/arch/sh/kernel/dma-coherent.c @@ -3,60 +3,13 @@ * Copyright (C) 2004 - 2007 Paul Mundt */ #include <linux/mm.h> -#include <linux/init.h> #include <linux/dma-noncoherent.h> -#include <linux/module.h> #include <asm/cacheflush.h> #include <asm/addrspace.h> -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) +void arch_dma_prep_coherent(struct page *page, size_t size) { - void *ret, *ret_nocache; - int order = get_order(size); - - gfp |= __GFP_ZERO; - - ret = (void *)__get_free_pages(gfp, order); - if (!ret) - return NULL; - - /* - * Pages from the page allocator may have data present in - * cache. So flush the cache before using uncached memory. - */ - arch_sync_dma_for_device(virt_to_phys(ret), size, - DMA_BIDIRECTIONAL); - - ret_nocache = (void __force *)ioremap(virt_to_phys(ret), size); - if (!ret_nocache) { - free_pages((unsigned long)ret, order); - return NULL; - } - - split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); - - *dma_handle = virt_to_phys(ret); - if (!WARN_ON(!dev)) - *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); - - return ret_nocache; -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - int order = get_order(size); - unsigned long pfn = (dma_handle >> PAGE_SHIFT); - int k; - - if (!WARN_ON(!dev)) - pfn += dev->dma_pfn_offset; - - for (k = 0; k < (1 << order); k++) - __free_pages(pfn_to_page(pfn + k), 0); - - iounmap(vaddr); + __flush_purge_region(page_address(page), size); } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index a13c045804ed..758a6c89e911 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -16,8 +16,8 @@ #include <asm/unwinder.h> #include <asm/stacktrace.h> -void dump_mem(const char *str, const char *loglvl, - unsigned long bottom, unsigned long top) +void dump_mem(const char *str, const char *loglvl, unsigned long bottom, + unsigned long top) { unsigned long p; int i; @@ -31,23 +31,23 @@ void dump_mem(const char *str, const char *loglvl, unsigned int val; if (p < bottom || p >= top) - printk("%s ", loglvl); + pr_cont(" "); else { if (__get_user(val, (unsigned int __user *)p)) { - printk("%s\n", loglvl); + pr_cont("\n"); return; } - printk("%s%08x ", loglvl, val); + pr_cont("%08x ", val); } } - printk("%s\n", loglvl); + pr_cont("\n"); } } -void printk_address(unsigned long address, int reliable, const char *loglvl) +void printk_address(unsigned long address, int reliable) { - printk("%s [<%p>] %s%pS\n", loglvl, (void *) address, - reliable ? "" : "? ", (void *) address); + pr_cont(" [<%px>] %s%pS\n", (void *) address, + reliable ? "" : "? ", (void *) address); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -107,22 +107,16 @@ stack_reader_dump(struct task_struct *task, struct pt_regs *regs, } } -static int print_trace_stack(void *data, char *name) -{ - printk("%s <%s> ", (char *)data, name); - return 0; -} - /* * Print one address/symbol entries per line. */ static void print_trace_address(void *data, unsigned long addr, int reliable) { - printk_address(addr, reliable, (char *)data); + printk("%s", (char *)data); + printk_address(addr, reliable); } static const struct stacktrace_ops print_trace_ops = { - .stack = print_trace_stack, .address = print_trace_address, }; @@ -136,7 +130,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl); - printk("%s\n", loglvl); + pr_cont("\n"); if (!tsk) tsk = current; diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 9bac5bbb67f3..ad963104d22d 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -178,34 +178,6 @@ syscall_exit_work: bra resume_userspace nop - .align 2 -syscall_trace_entry: - ! Yes it is traced. - mov r15, r4 - mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies - jsr @r11 ! superior (will chomp R[0-7]) - nop - mov.l r0, @(OFF_R0,r15) ! Save return value - ! Reload R0-R4 from kernel stack, where the - ! parent may have modified them using - ! ptrace(POKEUSR). (Note that R0-R2 are - ! reloaded from the kernel stack by syscall_call - ! below, so don't need to be reloaded here.) - ! This allows the parent to rewrite system calls - ! and args on the fly. - mov.l @(OFF_R4,r15), r4 ! arg0 - mov.l @(OFF_R5,r15), r5 - mov.l @(OFF_R6,r15), r6 - mov.l @(OFF_R7,r15), r7 ! arg3 - mov.l @(OFF_R3,r15), r3 ! syscall_nr - ! - mov.l 6f, r10 ! Number of syscalls - cmp/hs r10, r3 - bf syscall_call - mov #-ENOSYS, r0 - bra syscall_exit - mov.l r0, @(OFF_R0,r15) ! Return value - __restore_all: mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register @@ -388,6 +360,37 @@ syscall_exit: bf syscall_exit_work bra __restore_all nop + + .align 2 +syscall_trace_entry: + ! Yes it is traced. + mov r15, r4 + mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies + jsr @r11 ! superior (will chomp R[0-7]) + nop + cmp/eq #-1, r0 + bt syscall_exit + mov.l r0, @(OFF_R0,r15) ! Save return value + ! Reload R0-R4 from kernel stack, where the + ! parent may have modified them using + ! ptrace(POKEUSR). (Note that R0-R2 are + ! reloaded from the kernel stack by syscall_call + ! below, so don't need to be reloaded here.) + ! This allows the parent to rewrite system calls + ! and args on the fly. + mov.l @(OFF_R4,r15), r4 ! arg0 + mov.l @(OFF_R5,r15), r5 + mov.l @(OFF_R6,r15), r6 + mov.l @(OFF_R7,r15), r7 ! arg3 + mov.l @(OFF_R3,r15), r3 ! syscall_nr + ! + mov.l 6f, r10 ! Number of syscalls + cmp/hs r10, r3 + bf syscall_call + mov #-ENOSYS, r0 + bra syscall_exit + mov.l r0, @(OFF_R0,r15) ! Return value + .align 2 #if !defined(CONFIG_CPU_SH2) 1: .long TRA diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 037aab2708b7..004ad0130b10 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -102,7 +102,6 @@ int register_trapped_io(struct trapped_io *tiop) pr_warn("unable to install trapped io filter\n"); return -1; } -EXPORT_SYMBOL_GPL(register_trapped_io); void __iomem *match_trapped_io_handler(struct list_head *list, unsigned long offset, @@ -131,7 +130,6 @@ void __iomem *match_trapped_io_handler(struct list_head *list, spin_unlock_irqrestore(&trapped_lock, flags); return NULL; } -EXPORT_SYMBOL_GPL(match_trapped_io_handler); static struct trapped_io *lookup_tiop(unsigned long address) { diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c index ef9e2c97cbb7..0a0dff4e66de 100644 --- a/arch/sh/kernel/iomap.c +++ b/arch/sh/kernel/iomap.c @@ -8,31 +8,31 @@ #include <linux/module.h> #include <linux/io.h> -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } EXPORT_SYMBOL(ioread8); -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } EXPORT_SYMBOL(ioread16); -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return be16_to_cpu(__raw_readw(addr)); } EXPORT_SYMBOL(ioread16be); -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } EXPORT_SYMBOL(ioread32); -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return be32_to_cpu(__raw_readl(addr)); } @@ -74,7 +74,7 @@ EXPORT_SYMBOL(iowrite32be); * convert to CPU byte order. We write in "IO byte * order" (we also don't have IO barriers). */ -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); @@ -83,7 +83,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) } } -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); @@ -92,7 +92,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) } } -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); @@ -125,19 +125,19 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) } } -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insb(addr, dst, count); } EXPORT_SYMBOL(ioread8_rep); -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insw(addr, dst, count); } EXPORT_SYMBOL(ioread16_rep); -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insl(addr, dst, count); } diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c index 34f8cdbbcf0b..f39446a658bd 100644 --- a/arch/sh/kernel/ioport.c +++ b/arch/sh/kernel/ioport.c @@ -7,6 +7,7 @@ */ #include <linux/module.h> #include <linux/io.h> +#include <asm/io_trapped.h> unsigned long sh_io_port_base __read_mostly = -1; EXPORT_SYMBOL(sh_io_port_base); diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index beadbbdb4486..d606679a211e 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -15,6 +15,7 @@ #include <asm/setup.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/processor.h> #define MV_NAME_SIZE 32 @@ -64,10 +65,10 @@ static int __init early_parse_mv(char *from) mvp = get_mv_byname(mv_name); if (unlikely(!mvp)) { - printk("Available vectors:\n\n\t'%s', ", sh_mv.mv_name); + pr_info("Available vectors:\n\n\t'%s', ", sh_mv.mv_name); for_each_mv(mvp) - printk("'%s', ", mvp->mv_name); - printk("\n\n"); + pr_cont("'%s', ", mvp->mv_name); + pr_cont("\n\n"); panic("Failed to select machvec '%s' -- halting.\n", mv_name); } else @@ -104,7 +105,7 @@ void __init sh_mv_setup(void) sh_mv = *(struct sh_machine_vector *)&__machvec_start; } - printk(KERN_NOTICE "Booting machvec: %s\n", get_system_type()); + pr_notice("Booting machvec: %s\n", get_system_type()); /* * Manually walk the vec, fill in anything that the board hasn't yet diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index 6281f2fdf9ca..c9d3aa18732d 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c @@ -11,11 +11,6 @@ #include <asm/unwinder.h> #include <asm/ptrace.h> -static int callchain_stack(void *data, char *name) -{ - return 0; -} - static void callchain_address(void *data, unsigned long addr, int reliable) { struct perf_callchain_entry_ctx *entry = data; @@ -25,7 +20,6 @@ static void callchain_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops callchain_ops = { - .stack = callchain_stack, .address = callchain_address, }; diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index cde0a66116d2..80a5d1c66a51 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -30,34 +30,30 @@ void show_regs(struct pt_regs * regs) { - printk("\n"); + pr_info("\n"); show_regs_print_info(KERN_DEFAULT); - printk("PC is at %pS\n", (void *)instruction_pointer(regs)); - printk("PR is at %pS\n", (void *)regs->pr); + pr_info("PC is at %pS\n", (void *)instruction_pointer(regs)); + pr_info("PR is at %pS\n", (void *)regs->pr); - printk("PC : %08lx SP : %08lx SR : %08lx ", - regs->pc, regs->regs[15], regs->sr); + pr_info("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, + regs->regs[15], regs->sr); #ifdef CONFIG_MMU - printk("TEA : %08x\n", __raw_readl(MMU_TEA)); + pr_cont("TEA : %08x\n", __raw_readl(MMU_TEA)); #else - printk("\n"); + pr_cont("\n"); #endif - printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", - regs->regs[0],regs->regs[1], - regs->regs[2],regs->regs[3]); - printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", - regs->regs[4],regs->regs[5], - regs->regs[6],regs->regs[7]); - printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", - regs->regs[8],regs->regs[9], - regs->regs[10],regs->regs[11]); - printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", - regs->regs[12],regs->regs[13], - regs->regs[14]); - printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", - regs->mach, regs->macl, regs->gbr, regs->pr); + pr_info("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", + regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3]); + pr_info("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + pr_info("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]); + pr_info("R12 : %08lx R13 : %08lx R14 : %08lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + pr_info("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", + regs->mach, regs->macl, regs->gbr, regs->pr); show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT); show_code(regs); @@ -93,24 +89,6 @@ void release_thread(struct task_struct *dead_task) /* do nothing */ } -/* Fill in the fpu structure for a core dump.. */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) -{ - int fpvalid = 0; - -#if defined(CONFIG_SH_FPU) - struct task_struct *tsk = current; - - fpvalid = !!tsk_used_math(tsk); - if (fpvalid) - fpvalid = !fpregs_get(tsk, NULL, - (struct membuf){fpu, sizeof(*fpu)}); -#endif - - return fpvalid; -} -EXPORT_SYMBOL(dump_fpu); - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 5c93bdb6c41a..b05bf92f9c32 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -165,7 +165,7 @@ static int genregs_set(struct task_struct *target, } #ifdef CONFIG_SH_FPU -int fpregs_get(struct task_struct *target, +static int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { @@ -457,8 +457,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; - secure_computing_strict(regs->regs[0]); - if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) /* @@ -468,6 +466,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; + if (secure_computing() == -1) + return -1; + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[0]); diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 2950b19ad077..daf0b53ee066 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -15,11 +15,6 @@ #include <asm/ptrace.h> #include <asm/stacktrace.h> -static int save_stack_stack(void *data, char *name) -{ - return 0; -} - /* * Save stack-backtrace addresses into a stack_trace buffer. */ @@ -40,7 +35,6 @@ static void save_stack_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops = { - .stack = save_stack_stack, .address = save_stack_address, }; @@ -73,7 +67,6 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops_nosched = { - .stack = save_stack_stack, .address = save_stack_address_nosched, }; diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 96848db9659e..ae0a00beea5f 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 058c6181bb30..b62ad0ba2395 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -482,8 +482,6 @@ asmlinkage void do_address_error(struct pt_regs *regs, error_code = lookup_exception_vector(); #endif - oldfs = get_fs(); - if (user_mode(regs)) { int si_code = BUS_ADRERR; unsigned int user_action; @@ -491,13 +489,13 @@ asmlinkage void do_address_error(struct pt_regs *regs, local_irq_enable(); inc_unaligned_user_access(); - set_fs(USER_DS); + oldfs = force_uaccess_begin(); if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), sizeof(instruction))) { - set_fs(oldfs); + force_uaccess_end(oldfs); goto uspace_segv; } - set_fs(oldfs); + force_uaccess_end(oldfs); /* shout about userspace fixups */ unaligned_fixups_notify(current, instruction, regs); @@ -520,11 +518,11 @@ fixup: goto uspace_segv; } - set_fs(USER_DS); + oldfs = force_uaccess_begin(); tmp = handle_unaligned_access(instruction, regs, &user_mem_access, 0, address); - set_fs(oldfs); + force_uaccess_end(oldfs); if (tmp == 0) return; /* sorted */ diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index d0abbe5e38b0..eb473d373ca4 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -30,5 +30,3 @@ memset-$(CONFIG_CPU_SH4) := memset-sh4.o lib-$(CONFIG_MMU) += copy_page.o __clear_user.o lib-$(CONFIG_MCOUNT) += mcount.o lib-y += $(memcpy-y) $(memset-y) $(udivsi3-y) - -ccflags-y := -Werror diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c index 540e670dbafc..dad8e6a54906 100644 --- a/arch/sh/lib/delay.c +++ b/arch/sh/lib/delay.c @@ -29,7 +29,6 @@ void __delay(unsigned long loops) : "0" (loops) : "t"); } -EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 487da0ff03b3..f69ddc70b146 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -43,5 +43,3 @@ obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o GCOV_PROFILE_pmb.o := n - -ccflags-y := -Werror diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 3169a343a5ab..0de206c1acfe 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -57,8 +57,6 @@ int __init platform_resource_setup_memory(struct platform_device *pdev, return -ENOMEM; } - memset(buf, 0, memsize); - r->flags = IORESOURCE_MEM; r->start = dma_handle; r->end = r->start + memsize - 1; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index fbe1f2fe9a8c..88a1f453d73e 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -208,13 +208,12 @@ show_fault_oops(struct pt_regs *regs, unsigned long address) if (!oops_may_print()) return; - printk(KERN_ALERT "PC:"); pr_alert("BUG: unable to handle kernel %s at %08lx\n", address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", address); pr_alert("PC:"); - printk_address(regs->pc, 1, KERN_ALERT); + printk_address(regs->pc, 1); show_pte(NULL, address); } @@ -482,22 +481,13 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) return; if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 613de8096335..4735176ab811 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -29,6 +29,7 @@ #include <asm/cache.h> #include <asm/pgalloc.h> #include <linux/sizes.h> +#include "ioremap.h" pgd_t swapper_pg_dir[PTRS_PER_PGD]; @@ -425,15 +426,6 @@ int arch_add_memory(int nid, u64 start, u64 size, return ret; } -#ifdef CONFIG_NUMA -int memory_add_physaddr_to_nid(u64 addr) -{ - /* Node 0 for now.. */ - return 0; -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif - void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap) { diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index f6d02246d665..21342581144d 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -18,12 +18,59 @@ #include <linux/mm.h> #include <linux/pci.h> #include <linux/io.h> +#include <asm/io_trapped.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/mmu.h> +#include "ioremap.h" + +/* + * On 32-bit SH, we traditionally have the whole physical address space mapped + * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do + * anything but place the address in the proper segment. This is true for P1 + * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses + * and newer cores using extended addressing need to map through page tables, so + * the ioremap() implementation becomes a bit more complicated. + */ +#ifdef CONFIG_29BIT +static void __iomem * +__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) +{ + phys_addr_t last_addr = offset + size - 1; + + /* + * For P1 and P2 space this is trivial, as everything is already + * mapped. Uncached access for P1 addresses are done through P2. + * In the P3 case or for addresses outside of the 29-bit space, + * mapping must be done by the PMB or by using page tables. + */ + if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { + u64 flags = pgprot_val(prot); + + /* + * Anything using the legacy PTEA space attributes needs + * to be kicked down to page table mappings. + */ + if (unlikely(flags & _PAGE_PCC_MASK)) + return NULL; + if (unlikely(flags & _PAGE_CACHABLE)) + return (void __iomem *)P1SEGADDR(offset); + + return (void __iomem *)P2SEGADDR(offset); + } + + /* P4 above the store queues are always mapped. */ + if (unlikely(offset >= P3_ADDR_MAX)) + return (void __iomem *)P4SEGADDR(offset); + + return NULL; +} +#else +#define __ioremap_29bit(offset, size, prot) NULL +#endif /* CONFIG_29BIT */ /* * Remap an arbitrary physical address space into the kernel virtual @@ -42,6 +89,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; + mapped = __ioremap_trapped(phys_addr, size); + if (mapped) + return mapped; + + mapped = __ioremap_29bit(phys_addr, size, pgprot); + if (mapped) + return mapped; + /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) diff --git a/arch/sh/mm/ioremap.h b/arch/sh/mm/ioremap.h new file mode 100644 index 000000000000..f2544e721a35 --- /dev/null +++ b/arch/sh/mm/ioremap.h @@ -0,0 +1,23 @@ +#ifndef _SH_MM_IORMEMAP_H +#define _SH_MM_IORMEMAP_H 1 + +#ifdef CONFIG_IOREMAP_FIXED +void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); +int iounmap_fixed(void __iomem *); +void ioremap_fixed_init(void); +#else +static inline void __iomem * +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) +{ + BUG(); + return NULL; +} +static inline void ioremap_fixed_init(void) +{ +} +static inline int iounmap_fixed(void __iomem *addr) +{ + return -EINVAL; +} +#endif /* CONFIG_IOREMAP_FIXED */ +#endif /* _SH_MM_IORMEMAP_H */ diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index aab3f82856bb..136113bcac25 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c @@ -23,6 +23,7 @@ #include <asm/tlbflush.h> #include <asm/mmu.h> #include <asm/mmu_context.h> +#include "ioremap.h" struct ioremap_map { void __iomem *addr; diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index 5c8f9247c3c2..cf7ce4b57359 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c @@ -2,8 +2,6 @@ #include <linux/mm.h> #include <linux/slab.h> -#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO - static struct kmem_cache *pgd_cachep; #if PAGETABLE_LEVELS > 2 static struct kmem_cache *pmd_cachep; @@ -13,6 +11,7 @@ void pgd_ctor(void *x) { pgd_t *pgd = x; + memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); @@ -32,7 +31,7 @@ void pgtable_cache_init(void) pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP); + return kmem_cache_alloc(pgd_cachep, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -48,7 +47,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP); + return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO); } void pmd_free(struct mm_struct *mm, pmd_t *pmd) diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c index f1205f92631d..cc16cf86cd92 100644 --- a/arch/sh/oprofile/backtrace.c +++ b/arch/sh/oprofile/backtrace.c @@ -19,12 +19,6 @@ #include <asm/sections.h> #include <asm/stacktrace.h> -static int backtrace_stack(void *data, char *name) -{ - /* Yes, we want all stacks */ - return 0; -} - static void backtrace_address(void *data, unsigned long addr, int reliable) { unsigned int *depth = data; @@ -34,7 +28,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static struct stacktrace_ops backtrace_ops = { - .stack = backtrace_stack, .address = backtrace_address, }; diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 569977e52c91..29e648855d50 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -46,7 +46,6 @@ X3PROTO SH_X3PROTO MAGICPANELR2 SH_MAGIC_PANEL_R2 R2D_PLUS RTS7751R2D_PLUS R2D_1 RTS7751R2D_1 -CAYMAN SH_CAYMAN SDK7780 SH_SDK7780 MIGOR SH_MIGOR RSK7201 SH_RSK7201 diff --git a/arch/sparc/include/asm/sparsemem.h b/arch/sparc/include/asm/sparsemem.h index 1dd1b61432db..aa9a676bc341 100644 --- a/arch/sparc/include/asm/sparsemem.h +++ b/arch/sparc/include/asm/sparsemem.h @@ -7,7 +7,6 @@ #include <asm/page.h> #define SECTION_SIZE_BITS 30 -#define MAX_PHYSADDR_BITS MAX_PHYS_ADDRESS_BITS #define MAX_PHYSMEM_BITS MAX_PHYS_ADDRESS_BITS #endif /* !(__KERNEL__) */ diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h index c7e4fb601a57..dcfad4613e18 100644 --- a/arch/sparc/include/asm/timer_64.h +++ b/arch/sparc/include/asm/timer_64.h @@ -7,6 +7,7 @@ #ifndef _SPARC64_TIMER_H #define _SPARC64_TIMER_H +#include <uapi/asm/asi.h> #include <linux/types.h> #include <linux/init.h> diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index d6d8413eca83..0a2d3ebc4bb8 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -28,7 +28,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index bf9d330073b2..698cf69f74e9 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -32,7 +32,7 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define set_fs(val) \ do { \ diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h index 0289503d1cb0..6eaf5cfcaae1 100644 --- a/arch/sparc/include/asm/vvar.h +++ b/arch/sparc/include/asm/vvar.h @@ -6,7 +6,8 @@ #define _ASM_SPARC_VVAR_DATA_H #include <asm/clocksource.h> -#include <linux/seqlock.h> +#include <asm/processor.h> +#include <asm/barrier.h> #include <linux/time.h> #include <linux/types.h> diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 3f519e1047b6..adfcaeab3ddc 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -380,55 +380,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, return 0; } -/* - * fill in the fpu structure for a core dump. - */ -int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) -{ - if (used_math()) { - memset(fpregs, 0, sizeof(*fpregs)); - fpregs->pr_q_entrysize = 8; - return 1; - } -#ifdef CONFIG_SMP - if (test_thread_flag(TIF_USEDFPU)) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - if (regs != NULL) { - regs->psr &= ~(PSR_EF); - clear_thread_flag(TIF_USEDFPU); - } - } -#else - if (current == last_task_used_math) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - if (regs != NULL) { - regs->psr &= ~(PSR_EF); - last_task_used_math = NULL; - } - } -#endif - memcpy(&fpregs->pr_fr.pr_regs[0], - ¤t->thread.float_regs[0], - (sizeof(unsigned long) * 32)); - fpregs->pr_fsr = current->thread.fsr; - fpregs->pr_qcnt = current->thread.fpqdepth; - fpregs->pr_q_entrysize = 8; - fpregs->pr_en = 1; - if(fpregs->pr_qcnt != 0) { - memcpy(&fpregs->pr_q[0], - ¤t->thread.fpqueue[0], - sizeof(struct fpq) * fpregs->pr_qcnt); - } - /* Zero out the rest. */ - memset(&fpregs->pr_q[fpregs->pr_qcnt], 0, - sizeof(struct fpq) * (32 - fpregs->pr_qcnt)); - return 1; -} - unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 04ef19b88632..a75093b993f9 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -666,72 +666,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -typedef struct { - union { - unsigned int pr_regs[32]; - unsigned long pr_dregs[16]; - } pr_fr; - unsigned int __unused; - unsigned int pr_fsr; - unsigned char pr_qcnt; - unsigned char pr_q_entrysize; - unsigned char pr_en; - unsigned int pr_q[64]; -} elf_fpregset_t32; - -/* - * fill in the fpu structure for a core dump. - */ -int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) -{ - unsigned long *kfpregs = current_thread_info()->fpregs; - unsigned long fprs = current_thread_info()->fpsaved[0]; - - if (test_thread_flag(TIF_32BIT)) { - elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; - - if (fprs & FPRS_DL) - memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, - sizeof(unsigned int) * 32); - else - memset(&fpregs32->pr_fr.pr_regs[0], 0, - sizeof(unsigned int) * 32); - fpregs32->pr_qcnt = 0; - fpregs32->pr_q_entrysize = 8; - memset(&fpregs32->pr_q[0], 0, - (sizeof(unsigned int) * 64)); - if (fprs & FPRS_FEF) { - fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; - fpregs32->pr_en = 1; - } else { - fpregs32->pr_fsr = 0; - fpregs32->pr_en = 0; - } - } else { - if(fprs & FPRS_DL) - memcpy(&fpregs->pr_regs[0], kfpregs, - sizeof(unsigned int) * 32); - else - memset(&fpregs->pr_regs[0], 0, - sizeof(unsigned int) * 32); - if(fprs & FPRS_DU) - memcpy(&fpregs->pr_regs[16], kfpregs+16, - sizeof(unsigned int) * 32); - else - memset(&fpregs->pr_regs[16], 0, - sizeof(unsigned int) * 32); - if(fprs & FPRS_FEF) { - fpregs->pr_fsr = current_thread_info()->xfsr[0]; - fpregs->pr_gsr = current_thread_info()->gsr[0]; - } else { - fpregs->pr_fsr = fpregs->pr_gsr = 0; - } - fpregs->pr_fprs = fprs; - } - return 1; -} -EXPORT_SYMBOL(dump_fpu); - unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 46024e80ee86..4af114e84f20 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -300,7 +300,7 @@ 249 64 nanosleep sys_nanosleep 250 32 mremap sys_mremap 250 64 mremap sys_64_mremap -251 common _sysctl sys_sysctl compat_sys_sysctl +251 common _sysctl sys_ni_syscall 252 common getsid sys_getsid 253 common fdatasync sys_fdatasync 254 32 nfsservctl sys_ni_syscall sys_nis_syscall diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c index 58880662b271..0e27437eb97b 100644 --- a/arch/sparc/kernel/vdso.c +++ b/arch/sparc/kernel/vdso.c @@ -7,7 +7,6 @@ * a different vsyscall implementation for Linux/IA32 and for the name. */ -#include <linux/seqlock.h> #include <linux/time.h> #include <linux/timekeeper_internal.h> diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index cfef656eda0f..8071bfd72349 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -234,7 +234,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -250,15 +250,6 @@ good_area: } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, regs, address); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, regs, address); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; @@ -410,7 +401,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags, NULL)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index a3806614e4dc..0a6bcc85fba7 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -422,7 +422,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) goto exit_exception; @@ -438,15 +438,6 @@ good_area: } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, regs, address); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, regs, address); - } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index 708cb6304c2d..f44355e46f31 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -54,7 +54,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE # optimize sibling calls. # CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \ - $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ + $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \ -fno-omit-frame-pointer -foptimize-sibling-calls \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO @@ -93,7 +93,7 @@ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) +KBUILD_CFLAGS_32 += -fno-stack-protector KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING diff --git a/arch/um/Kconfig b/arch/um/Kconfig index ef69be17ff70..eb51fec75948 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,6 +14,7 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE + select NO_DMA select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS @@ -167,9 +168,6 @@ config MMAPPER This driver allows a host file to be used as emulated IO memory inside UML. -config NO_DMA - def_bool y - config PGTABLE_LEVELS int default 3 if 3_LEVEL_PGTABLES diff --git a/arch/um/Makefile b/arch/um/Makefile index 3f27aa3ec0a6..1cea46ff9bb7 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -121,8 +121,7 @@ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie) CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ - $(call cc-option, -fno-stack-protector,) \ - $(call cc-option, -fno-stack-protector-all,) + -fno-stack-protector $(call cc-option, -fno-stack-protector-all) # Options used by linker script export LDS_START := $(START) diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 351aee52aca6..a6c4bb6c2c01 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) } break; case VHOST_USER_SLAVE_IOTLB_MSG: - /* not supported - VIRTIO_F_IOMMU_PLATFORM */ + /* not supported - VIRTIO_F_ACCESS_PLATFORM */ case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ default: diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 2b3afa354a90..ad12f78bda7e 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -71,7 +71,7 @@ good_area: do { vm_fault_t fault; - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; @@ -88,10 +88,6 @@ good_area: BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9a2849527dd7..7101ac64bb20 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -209,6 +209,7 @@ config X86 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT + select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_FUNCTION_ARG_ACCESS_API diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 1e634d7ee6eb..4346ffb2e39f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -36,8 +36,8 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ -mno-mmx -mno-sse -REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) -REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) +REALMODE_CFLAGS += -ffreestanding +REALMODE_CFLAGS += -fno-stack-protector REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) export REALMODE_CFLAGS diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index c08714ae76ec..3962f592633d 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -35,8 +35,8 @@ cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += -mno-mmx -mno-sse -KBUILD_CFLAGS += $(call cc-option,-ffreestanding) -KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) +KBUILD_CFLAGS += -ffreestanding +KBUILD_CFLAGS += -fno-stack-protector KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 29b7d52143e9..df8c017e6161 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -449,8 +449,6 @@ .macro SWITCH_TO_KERNEL_STACK - ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV - BUG_IF_WRONG_CR3 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax @@ -599,8 +597,6 @@ */ .macro SWITCH_TO_ENTRY_STACK - ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV - /* Bytes to copy */ movl $PTREGS_SIZE, %ecx @@ -872,17 +868,6 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) * will ignore all of the single-step traps generated in this range. */ -#ifdef CONFIG_XEN_PV -/* - * Xen doesn't set %esp to be precisely what the normal SYSENTER - * entry point expects, so fix it up before using the normal path. - */ -SYM_CODE_START(xen_sysenter_target) - addl $5*4, %esp /* remove xen-provided frame */ - jmp .Lsysenter_past_esp -SYM_CODE_END(xen_sysenter_target) -#endif - /* * 32-bit SYSENTER entry. * @@ -965,9 +950,8 @@ SYM_FUNC_START(entry_SYSENTER_32) movl %esp, %eax call do_SYSENTER_32 - /* XEN PV guests always use IRET path */ - ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ - "jmp .Lsyscall_32_done", X86_FEATURE_XENPV + testl %eax, %eax + jz .Lsyscall_32_done STACKLEAK_ERASE @@ -1165,95 +1149,6 @@ SYM_FUNC_END(entry_INT80_32) #endif .endm -#ifdef CONFIG_PARAVIRT -SYM_CODE_START(native_iret) - iret - _ASM_EXTABLE(native_iret, asm_iret_error) -SYM_CODE_END(native_iret) -#endif - -#ifdef CONFIG_XEN_PV -/* - * See comment in entry_64.S for further explanation - * - * Note: This is not an actual IDT entry point. It's a XEN specific entry - * point and therefore named to match the 64-bit trampoline counterpart. - */ -SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback) - /* - * Check to see if we got the event in the critical - * region in xen_iret_direct, after we've reenabled - * events and checked for pending events. This simulates - * iret instruction's behaviour where it delivers a - * pending interrupt when enabling interrupts: - */ - cmpl $xen_iret_start_crit, (%esp) - jb 1f - cmpl $xen_iret_end_crit, (%esp) - jae 1f - call xen_iret_crit_fixup -1: - pushl $-1 /* orig_ax = -1 => not a system call */ - SAVE_ALL - ENCODE_FRAME_POINTER - - mov %esp, %eax - call xen_pv_evtchn_do_upcall - jmp handle_exception_return -SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback) - -/* - * Hypervisor uses this for application faults while it executes. - * We get here for two reasons: - * 1. Fault while reloading DS, ES, FS or GS - * 2. Fault while executing IRET - * Category 1 we fix up by reattempting the load, and zeroing the segment - * register if the load fails. - * Category 2 we fix up by jumping to do_iret_error. We cannot use the - * normal Linux return path in this case because if we use the IRET hypercall - * to pop the stack frame we end up in an infinite loop of failsafe callbacks. - * We distinguish between categories by maintaining a status value in EAX. - */ -SYM_FUNC_START(xen_failsafe_callback) - pushl %eax - movl $1, %eax -1: mov 4(%esp), %ds -2: mov 8(%esp), %es -3: mov 12(%esp), %fs -4: mov 16(%esp), %gs - /* EAX == 0 => Category 1 (Bad segment) - EAX != 0 => Category 2 (Bad IRET) */ - testl %eax, %eax - popl %eax - lea 16(%esp), %esp - jz 5f - jmp asm_iret_error -5: pushl $-1 /* orig_ax = -1 => not a system call */ - SAVE_ALL - ENCODE_FRAME_POINTER - jmp handle_exception_return - -.section .fixup, "ax" -6: xorl %eax, %eax - movl %eax, 4(%esp) - jmp 1b -7: xorl %eax, %eax - movl %eax, 8(%esp) - jmp 2b -8: xorl %eax, %eax - movl %eax, 12(%esp) - jmp 3b -9: xorl %eax, %eax - movl %eax, 16(%esp) - jmp 4b -.previous - _ASM_EXTABLE(1b, 6b) - _ASM_EXTABLE(2b, 7b) - _ASM_EXTABLE(3b, 8b) - _ASM_EXTABLE(4b, 9b) -SYM_FUNC_END(xen_failsafe_callback) -#endif /* CONFIG_XEN_PV */ - SYM_CODE_START_LOCAL_NOALIGN(handle_exception) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index e31a75262c9c..9d1102873666 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -160,7 +160,7 @@ 146 i386 writev sys_writev compat_sys_writev 147 i386 getsid sys_getsid 148 i386 fdatasync sys_fdatasync -149 i386 _sysctl sys_sysctl compat_sys_sysctl +149 i386 _sysctl sys_ni_syscall 150 i386 mlock sys_mlock 151 i386 munlock sys_munlock 152 i386 mlockall sys_mlockall diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 9d82078c949a..f30d6ae9a688 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -164,7 +164,7 @@ 153 common vhangup sys_vhangup 154 common modify_ldt sys_modify_ldt 155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl +156 64 _sysctl sys_ni_syscall 157 common prctl sys_prctl 158 common arch_prctl sys_arch_prctl 159 common adjtimex sys_adjtimex diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 04e65f0698f6..215376d975a2 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -82,7 +82,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE # optimize sibling calls. # CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ - $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ + $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \ -fno-omit-frame-pointer -foptimize-sibling-calls \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO @@ -151,7 +151,7 @@ KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic -KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) +KBUILD_CFLAGS_32 += -fno-stack-protector KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING diff --git a/arch/x86/entry/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S index e78047d119f6..2cbd39939dc6 100644 --- a/arch/x86/entry/vdso/vdso32/note.S +++ b/arch/x86/entry/vdso/vdso32/note.S @@ -16,33 +16,3 @@ ELFNOTE_START(Linux, 0, "a") ELFNOTE_END BUILD_SALT - -#ifdef CONFIG_XEN -/* - * Add a special note telling glibc's dynamic linker a fake hardware - * flavor that it will use to choose the search path for libraries in the - * same way it uses real hardware capabilities like "mmx". - * We supply "nosegneg" as the fake capability, to indicate that we - * do not like negative offsets in instructions using segment overrides, - * since we implement those inefficiently. This makes it possible to - * install libraries optimized to avoid those access patterns in someplace - * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file - * corresponding to the bits here is needed to make ldconfig work right. - * It should contain: - * hwcap 1 nosegneg - * to match the mapping of bit to name that we give here. - * - * At runtime, the fake hardware feature will be considered to be present - * if its bit is set in the mask word. So, we start with the mask 0, and - * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen. - */ - -#include "../../xen/vdso.h" /* Defines VDSO_NOTE_NONEGSEG_BIT. */ - -ELFNOTE_START(GNU, 2, "a") - .long 1 /* ncaps */ -VDSO32_NOTE_MASK: /* Symbol used by arch/x86/xen/setup.c */ - .long 0 /* mask */ - .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */ -ELFNOTE_END -#endif diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 68b38820b10e..67b411f7e8c4 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -130,11 +130,17 @@ struct rapl_pmus { struct rapl_pmu *pmus[]; }; +enum rapl_unit_quirk { + RAPL_UNIT_QUIRK_NONE, + RAPL_UNIT_QUIRK_INTEL_HSW, + RAPL_UNIT_QUIRK_INTEL_SPR, +}; + struct rapl_model { struct perf_msr *rapl_msrs; unsigned long events; unsigned int msr_power_unit; - bool apply_quirk; + enum rapl_unit_quirk unit_quirk; }; /* 1/2^hw_unit Joule */ @@ -612,14 +618,28 @@ static int rapl_check_hw_unit(struct rapl_model *rm) for (i = 0; i < NR_RAPL_DOMAINS; i++) rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; + switch (rm->unit_quirk) { /* * DRAM domain on HSW server and KNL has fixed energy unit which can be * different than the unit from power unit MSR. See * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2 * of 2. Datasheet, September 2014, Reference Number: 330784-001 " */ - if (rm->apply_quirk) + case RAPL_UNIT_QUIRK_INTEL_HSW: + rapl_hw_unit[PERF_RAPL_RAM] = 16; + break; + /* + * SPR shares the same DRAM domain energy unit as HSW, plus it + * also has a fixed energy unit for Psys domain. + */ + case RAPL_UNIT_QUIRK_INTEL_SPR: rapl_hw_unit[PERF_RAPL_RAM] = 16; + rapl_hw_unit[PERF_RAPL_PSYS] = 0; + break; + default: + break; + } + /* * Calculate the timer rate: @@ -665,7 +685,7 @@ static const struct attribute_group *rapl_attr_update[] = { &rapl_events_pkg_group, &rapl_events_ram_group, &rapl_events_gpu_group, - &rapl_events_gpu_group, + &rapl_events_psys_group, NULL, }; @@ -698,7 +718,6 @@ static struct rapl_model model_snb = { .events = BIT(PERF_RAPL_PP0) | BIT(PERF_RAPL_PKG) | BIT(PERF_RAPL_PP1), - .apply_quirk = false, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; @@ -707,7 +726,6 @@ static struct rapl_model model_snbep = { .events = BIT(PERF_RAPL_PP0) | BIT(PERF_RAPL_PKG) | BIT(PERF_RAPL_RAM), - .apply_quirk = false, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; @@ -717,7 +735,6 @@ static struct rapl_model model_hsw = { BIT(PERF_RAPL_PKG) | BIT(PERF_RAPL_RAM) | BIT(PERF_RAPL_PP1), - .apply_quirk = false, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; @@ -726,7 +743,7 @@ static struct rapl_model model_hsx = { .events = BIT(PERF_RAPL_PP0) | BIT(PERF_RAPL_PKG) | BIT(PERF_RAPL_RAM), - .apply_quirk = true, + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; @@ -734,7 +751,7 @@ static struct rapl_model model_hsx = { static struct rapl_model model_knl = { .events = BIT(PERF_RAPL_PKG) | BIT(PERF_RAPL_RAM), - .apply_quirk = true, + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; @@ -745,14 +762,22 @@ static struct rapl_model model_skl = { BIT(PERF_RAPL_RAM) | BIT(PERF_RAPL_PP1) | BIT(PERF_RAPL_PSYS), - .apply_quirk = false, + .msr_power_unit = MSR_RAPL_POWER_UNIT, + .rapl_msrs = intel_rapl_msrs, +}; + +static struct rapl_model model_spr = { + .events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM) | + BIT(PERF_RAPL_PSYS), + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_SPR, .msr_power_unit = MSR_RAPL_POWER_UNIT, .rapl_msrs = intel_rapl_msrs, }; static struct rapl_model model_amd_fam17h = { .events = BIT(PERF_RAPL_PKG), - .apply_quirk = false, .msr_power_unit = MSR_AMD_RAPL_POWER_UNIT, .rapl_msrs = amd_rapl_msrs, }; @@ -787,6 +812,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr), X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h), X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h), {}, diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 49bd6cf3eec9..7c0a52ca2f4d 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -3,9 +3,6 @@ #define _ASM_X86_DEVICE_H struct dev_archdata { -#ifdef CONFIG_IOMMU_API - void *iommu; /* hook for IOMMU specific extension */ -#endif }; struct pdev_archdata { diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b9527a54db99..0f0dd645b594 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -26,9 +26,9 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> -#include <asm/acpi.h> #include <asm/apicdef.h> #include <asm/page.h> +#include <asm/pgtable_types.h> #ifdef CONFIG_X86_32 #include <linux/threads.h> #include <asm/kmap_types.h> diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 60b944dd2df1..4f77b8f22e54 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -8,6 +8,7 @@ #include <asm/io.h> #include <asm/hyperv-tlfs.h> #include <asm/nospec-branch.h> +#include <asm/paravirt.h> typedef int (*hyperv_fill_flush_list_func)( struct hv_guest_mapping_flush_list *flush, @@ -54,6 +55,17 @@ typedef int (*hyperv_fill_flush_list_func)( vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); #define hv_get_raw_timer() rdtsc_ordered() +/* + * Reference to pv_ops must be inline so objtool + * detection of noinstr violations can work correctly. + */ +static __always_inline void hv_setup_sched_clock(void *sched_clock) +{ +#ifdef CONFIG_PARAVIRT + pv_ops.time.sched_clock = sched_clock; +#endif +} + void hyperv_vector_handler(struct pt_regs *regs); static inline void hv_enable_stimer0_percpu_irq(int irq) {} diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 6e81788a30c1..28996fe19301 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -25,7 +25,7 @@ void entry_SYSENTER_compat(void); void __end_entry_SYSENTER_compat(void); void entry_SYSCALL_compat(void); void entry_INT80_compat(void); -#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) +#ifdef CONFIG_XEN_PV void xen_entry_INT80_compat(void); #endif #endif diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 6669164abadc..9646c300f128 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -301,7 +301,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; extern void early_ignore_irq(void); -#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) +#ifdef CONFIG_XEN_PV extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; #endif diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index e15f364efbcc..c0538f82c9a2 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -5,16 +5,6 @@ #include <linux/cpumask.h> #include <asm/percpu.h> -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifdef CONFIG_X86_LOCAL_APIC -# include <asm/mpspec.h> -# include <asm/apic.h> -# ifdef CONFIG_X86_IO_APIC -# include <asm/io_apic.h> -# endif -#endif #include <asm/thread_info.h> #include <asm/cpumask.h> diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index b7b2624fba86..01a300a9700b 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -6,6 +6,7 @@ #define _ASM_X86_TSC_H #include <asm/processor.h> +#include <asm/cpufeature.h> /* * Standard way to access the cycle counter. diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 2f3e8f2a958f..ecefaffd15d4 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -33,7 +33,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (current->thread.addr_limit.seg) /* diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index fb81fea99093..df01d7349d79 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -241,7 +241,8 @@ static u64 vread_hvclock(void) } #endif -static inline u64 __arch_get_hw_counter(s32 clock_mode) +static inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) { if (likely(clock_mode == VDSO_CLOCKMODE_TSC)) return (u64)rdtsc_ordered(); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index d1175533d125..c3daf0aaa0ee 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -875,8 +875,6 @@ static void *__text_poke(void *addr, const void *opcode, size_t len) */ BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); - local_irq_save(flags); - /* * Map the page without the global bit, as TLB flushing is done with * flush_tlb_mm_range(), which is intended for non-global PTEs. @@ -893,6 +891,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len) */ VM_BUG_ON(!ptep); + local_irq_save(flags); + pte = mk_pte(pages[0], pgprot); set_pte_at(poking_mm, poking_addr, ptep, pte); @@ -942,8 +942,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len) */ BUG_ON(memcmp(addr, opcode, len)); - pte_unmap_unlock(ptep, ptl); local_irq_restore(flags); + pte_unmap_unlock(ptep, ptl); return addr; } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ccf726cc87b7..5f943b938167 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -46,6 +46,7 @@ #include <asm/proto.h> #include <asm/traps.h> #include <asm/apic.h> +#include <asm/acpi.h> #include <asm/io_apic.h> #include <asm/desc.h> #include <asm/hpet.h> diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 98c9bb75d185..780c702969b7 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -10,6 +10,7 @@ * like self-ipi, etc... */ #include <linux/cpumask.h> +#include <linux/thread_info.h> #include <asm/apic.h> diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 38b5b51d42f6..98d015a4405a 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -9,6 +9,7 @@ #include <linux/smp.h> #include <asm/apic.h> +#include <asm/io_apic.h> #include "local.h" diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d1fc62a67320..34a992e275ef 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -9,6 +9,7 @@ * Bits copied from original nmi.c file * */ +#include <linux/thread_info.h> #include <asm/apic.h> #include <asm/nmi.h> diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ca0f91372fd..387154e39e08 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -2,6 +2,7 @@ #include <linux/cpumask.h> #include <linux/smp.h> +#include <asm/io_apic.h> #include "local.h" diff --git a/arch/x86/kernel/apic/local.h b/arch/x86/kernel/apic/local.h index 04797f05ce94..a997d849509a 100644 --- a/arch/x86/kernel/apic/local.h +++ b/arch/x86/kernel/apic/local.h @@ -10,6 +10,7 @@ #include <linux/jump_label.h> +#include <asm/irq_vectors.h> #include <asm/apic.h> /* APIC flat 64 */ diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 67b33d67002f..7bda71def557 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/smp.h> +#include <asm/io_apic.h> #include <asm/apic.h> #include <asm/acpi.h> diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 29f0e0984557..bd3835d6b535 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include <linux/thread_info.h> #include <asm/apic.h> #include "local.h" diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index dba6a83bc349..93792b457b81 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -17,8 +17,7 @@ KCOV_INSTRUMENT_perf_event.o := n KCSAN_SANITIZE_common.o := n # Make sure load_percpu_segment has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_common.o := $(nostackp) +CFLAGS_common.o := -fno-stack-protector obj-y := cacheinfo.o scattered.o topology.o obj-y += common.o diff --git a/arch/x86/kernel/cpu/acrn.c b/arch/x86/kernel/cpu/acrn.c index 1da9b1c9a2db..0b2c03943ac6 100644 --- a/arch/x86/kernel/cpu/acrn.c +++ b/arch/x86/kernel/cpu/acrn.c @@ -11,14 +11,15 @@ #include <linux/interrupt.h> #include <asm/apic.h> +#include <asm/cpufeatures.h> #include <asm/desc.h> #include <asm/hypervisor.h> #include <asm/idtentry.h> #include <asm/irq_regs.h> -static uint32_t __init acrn_detect(void) +static u32 __init acrn_detect(void) { - return hypervisor_cpuid_base("ACRNACRNACRN\0\0", 0); + return hypervisor_cpuid_base("ACRNACRNACRN", 0); } static void __init acrn_init_platform(void) @@ -29,12 +30,7 @@ static void __init acrn_init_platform(void) static bool acrn_x2apic_available(void) { - /* - * x2apic is not supported for now. Future enablement will have to check - * X86_FEATURE_X2APIC to determine whether x2apic is supported in the - * guest. - */ - return false; + return boot_cpu_has(X86_FEATURE_X2APIC); } static void (*acrn_intr_handler)(void); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d4806eac9325..dcc3d943c68f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include <asm/cpu.h> #include <asm/spec-ctrl.h> #include <asm/smp.h> +#include <asm/numa.h> #include <asm/pci-direct.h> #include <asm/delay.h> #include <asm/debugreg.h> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f0b743a2fe9c..d3f0db463f96 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -31,6 +31,7 @@ #include <asm/intel-family.h> #include <asm/e820/api.h> #include <asm/hypervisor.h> +#include <asm/tlbflush.h> #include "cpu.h" @@ -1549,7 +1550,12 @@ static ssize_t l1tf_show_state(char *buf) static ssize_t itlb_multihit_show_state(char *buf) { - if (itlb_multihit_kvm_mitigation) + if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || + !boot_cpu_has(X86_FEATURE_VMX)) + return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); + else if (!(cr4_read_shadow() & X86_CR4_VMXE)) + return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); + else if (itlb_multihit_kvm_mitigation) return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); else return sprintf(buf, "KVM: Vulnerable\n"); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 965474d78cef..c5d6f17d9b9d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -45,6 +45,7 @@ #include <asm/mtrr.h> #include <asm/hwcap2.h> #include <linux/numa.h> +#include <asm/numa.h> #include <asm/asm.h> #include <asm/bugs.h> #include <asm/cpu.h> diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4e28c1fc8749..ac6c30e5801d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -10,6 +10,7 @@ #include <asm/cpu.h> #include <asm/smp.h> +#include <asm/numa.h> #include <asm/cacheinfo.h> #include <asm/spec-ctrl.h> #include <asm/delay.h> diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b6b7b38dff5f..59a1e3ce3f14 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -23,6 +23,7 @@ #include <asm/cmdline.h> #include <asm/traps.h> #include <asm/resctrl.h> +#include <asm/numa.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index af94f05a5c66..31125448b174 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -361,13 +361,6 @@ static void __init ms_hyperv_init_platform(void) #endif } -void hv_setup_sched_clock(void *sched_clock) -{ -#ifdef CONFIG_PARAVIRT - pv_ops.time.sched_clock = sched_clock; -#endif -} - const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index fd87b59452a3..a8f3af257e26 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -230,7 +230,7 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem) int ret = 0; /* Exclude the low 1M because it is always reserved */ - ret = crash_exclude_mem_range(cmem, 0, 1<<20); + ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1); if (ret) return ret; diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 8d85e00bb40a..a0e8fc7d85f1 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -20,6 +20,7 @@ #include <asm/irqdomain.h> #include <asm/hpet.h> #include <asm/apic.h> +#include <asm/io_apic.h> #include <asm/pci_x86.h> #include <asm/setup.h> #include <asm/i8259.h> diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 7a2bf884fede..038e19c0019e 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -611,6 +611,10 @@ static void check_xstate_against_struct(int nr) * This essentially double-checks what the cpu told us about * how large the XSAVE buffer needs to be. We are recalculating * it to be safe. + * + * Dynamic XSAVE features allocate their own buffers and are not + * covered by these checks. Only the size of the buffer for task->fpu + * is checked here. */ static void do_extra_xstate_size_checks(void) { @@ -673,6 +677,33 @@ static unsigned int __init get_xsaves_size(void) return ebx; } +/* + * Get the total size of the enabled xstates without the dynamic supervisor + * features. + */ +static unsigned int __init get_xsaves_size_no_dynamic(void) +{ + u64 mask = xfeatures_mask_dynamic(); + unsigned int size; + + if (!mask) + return get_xsaves_size(); + + /* Disable dynamic features. */ + wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); + + /* + * Ask the hardware what size is required of the buffer. + * This is the size required for the task->fpu buffer. + */ + size = get_xsaves_size(); + + /* Re-enable dynamic features so XSAVES will work on them again. */ + wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); + + return size; +} + static unsigned int __init get_xsave_size(void) { unsigned int eax, ebx, ecx, edx; @@ -710,7 +741,7 @@ static int __init init_xstate_size(void) xsave_size = get_xsave_size(); if (boot_cpu_has(X86_FEATURE_XSAVES)) - possible_xstate_size = get_xsaves_size(); + possible_xstate_size = get_xsaves_size_no_dynamic(); else possible_xstate_size = xsave_size; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f66a6b90f954..7ed84c282233 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -134,38 +134,7 @@ SYM_CODE_START(startup_32) movl %eax,pa(initial_page_table+0xffc) #endif -#ifdef CONFIG_PARAVIRT - /* This is can only trip for a broken bootloader... */ - cmpw $0x207, pa(boot_params + BP_version) - jb .Ldefault_entry - - /* Paravirt-compatible boot parameters. Look to see what architecture - we're booting under. */ - movl pa(boot_params + BP_hardware_subarch), %eax - cmpl $num_subarch_entries, %eax - jae .Lbad_subarch - - movl pa(subarch_entries)(,%eax,4), %eax - subl $__PAGE_OFFSET, %eax - jmp *%eax - -.Lbad_subarch: -SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK) - /* Unknown implementation; there's really - nothing we can do at this point. */ - ud2a - - __INITDATA - -subarch_entries: - .long .Ldefault_entry /* normal x86/PC */ - .long xen_entry /* Xen hypervisor */ - .long .Ldefault_entry /* Moorestown MID */ -num_subarch_entries = (. - subarch_entries) / 4 -.previous -#else jmp .Ldefault_entry -#endif /* CONFIG_PARAVIRT */ SYM_CODE_END(startup_32) #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index dd73135d7cee..beb1bada1b0a 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -22,6 +22,8 @@ #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/desc.h> +#include <asm/io_apic.h> +#include <asm/acpi.h> #include <asm/apic.h> #include <asm/setup.h> #include <asm/i8259.h> diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 6eb8b50ea07e..4eb8f2d19a87 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -13,6 +13,8 @@ #include <linux/reboot.h> #include <linux/serial_8250.h> #include <asm/apic.h> +#include <asm/io_apic.h> +#include <asm/acpi.h> #include <asm/cpu.h> #include <asm/hypervisor.h> #include <asm/i8259.h> diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c27b82b62c8b..411af4aa7b51 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -19,6 +19,8 @@ #include <linux/smp.h> #include <linux/pci.h> +#include <asm/io_apic.h> +#include <asm/acpi.h> #include <asm/irqdomain.h> #include <asm/mtrr.h> #include <asm/mpspec.h> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index d6f946707270..9afefe325acb 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -390,7 +390,7 @@ unsigned long x86_fsgsbase_read_task(struct task_struct *task, */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; - if (unlikely(idx >= ldt->nr_entries)) + if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b9a68d8e06d8..3511736fbc74 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -25,6 +25,7 @@ #include <xen/xen.h> #include <asm/apic.h> +#include <asm/numa.h> #include <asm/bios_ebda.h> #include <asm/bugs.h> #include <asm/cpu.h> diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index b8810ebbc8ae..0a2ec801b63f 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/irq.h> +#include <asm/io_apic.h> #include <asm/cpu.h> static DEFINE_PER_CPU(struct x86_cpu, cpu_devices); diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 4fec6f3a1858..6555a857a1e6 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -7,6 +7,7 @@ */ #include <linux/kernel.h> +#include <linux/thread_info.h> #include <asm/apic.h> #include <asm/cpu_device_id.h> @@ -133,10 +134,15 @@ static const struct freq_desc freq_desc_ann = { .mask = 0x0f, }; -/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */ +/* + * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz + * Frequency step for Lightning Mountain SoC is fixed to 78 MHz, + * so all the frequency entries are 78000. + */ static const struct freq_desc freq_desc_lgm = { .use_msr_plat = true, - .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, + .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, + 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, .mask = 0x0f, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 539ea1cd6020..432ef34b9ea9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3581,6 +3581,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SMALLER_MAXPHYADDR: r = (int) allow_smaller_maxphyaddr; break; + case KVM_CAP_STEAL_TIME: + r = sched_info_on(); + break; default: break; } @@ -10667,11 +10670,17 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); + int ret; irqfd->producer = prod; + kvm_arch_start_assignment(irqfd->kvm); + ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, + prod->irq, irqfd->gsi, 1); + + if (ret) + kvm_arch_end_assignment(irqfd->kvm); - return kvm_x86_ops.update_pi_irte(irqfd->kvm, - prod->irq, irqfd->gsi, 1); + return ret; } void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, @@ -10694,6 +10703,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, if (ret) printk(KERN_INFO "irq bypass consumer (token %p) unregistration" " fails: %d\n", irqfd->consumer.token, ret); + + kvm_arch_end_assignment(irqfd->kvm); } int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 6110bce7237b..d46fff11f06f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_cmdline.o = -pg endif -CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector) +CFLAGS_cmdline.o := -fno-stack-protector endif inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index f7fd0e868c9c..5864219221ca 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -22,10 +22,9 @@ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \ obj-y += pat/ # Make sure __phys_addr has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_physaddr.o := $(nostackp) -CFLAGS_setup_nx.o := $(nostackp) -CFLAGS_mem_encrypt_identity.o := $(nostackp) +CFLAGS_physaddr.o := -fno-stack-protector +CFLAGS_setup_nx.o := -fno-stack-protector +CFLAGS_mem_encrypt_identity.o := -fno-stack-protector CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0c7643d9f7cb..35f1498e9832 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1139,7 +1139,7 @@ void do_user_addr_fault(struct pt_regs *regs, struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; - vm_fault_t fault, major = 0; + vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; tsk = current; @@ -1291,8 +1291,7 @@ good_area: * userland). The return to userland is identified whenever * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. */ - fault = handle_mm_fault(vma, address, flags); - major |= fault & VM_FAULT_MAJOR; + fault = handle_mm_fault(vma, address, flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -1319,18 +1318,6 @@ good_area: return; } - /* - * Major/minor page fault accounting. If any of the events - * returned VM_FAULT_MAJOR, we account it as a major fault. - */ - if (major) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - } - check_v8086_mode(regs, address, tsk); } NOKPROBE_SYMBOL(do_user_addr_fault); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4cb958419fb0..7c055259de3a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -52,6 +52,7 @@ #include <asm/cpu_entry_area.h> #include <asm/init.h> #include <asm/pgtable_areas.h> +#include <asm/numa.h> #include "mm_internal.h" diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 3b246ae40c8f..a4ac13cc3fdc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1452,6 +1452,15 @@ static unsigned long probe_memory_block_size(void) goto done; } + /* + * Use max block size to minimize overhead on bare metal, where + * alignment for memory hotplug isn't a concern. + */ + if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + bz = MAX_BLOCK_SIZE; + goto done; + } + /* Find the largest allowed block size that aligns to memory end */ for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { if (IS_ALIGNED(boot_mem_end, bz)) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index b05f45e5e8e2..aa76ec2d359b 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -929,5 +929,4 @@ int memory_add_physaddr_to_nid(u64 start) nid = numa_meminfo.blk[0].nid; return nid; } -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile index 37923d715741..6907b523e856 100644 --- a/arch/x86/power/Makefile +++ b/arch/x86/power/Makefile @@ -3,8 +3,7 @@ OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y # __restore_processor_state() restores %gs after S3 resume and so should not # itself be stack-protected -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_cpu.o := $(nostackp) +CFLAGS_cpu.o := -fno-stack-protector obj-$(CONFIG_PM_SLEEP) += cpu.o obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 088bd764e0b7..95ea17a9d20c 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -32,9 +32,9 @@ KCOV_INSTRUMENT := n # make up the standalone purgatory.ro PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel -PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss +PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0 PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING -PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector) +PURGATORY_CFLAGS += -fno-stack-protector # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not @@ -64,6 +64,9 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_string.o += $(PURGATORY_CFLAGS) +AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 +AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 0caddd6acb22..5943387e3f35 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -42,7 +42,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # optimize sibling calls. # CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ - $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ + $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \ -fno-omit-frame-pointer -foptimize-sibling-calls $(vobjs): KBUILD_CFLAGS += $(CFL) diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1aded63a95cb..218acbd5c7a0 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -19,6 +19,7 @@ config XEN_PV bool "Xen PV guest support" default y depends on XEN + depends on X86_64 select PARAVIRT_XXL select XEN_HAVE_PVMMU select XEN_HAVE_VPMU @@ -50,7 +51,7 @@ config XEN_PVHVM_SMP config XEN_512GB bool "Limit Xen pv-domain memory to 512GB" - depends on XEN_PV && X86_64 + depends on XEN_PV default y help Limit paravirtualized user domains to 512GB of RAM. diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 084de77a109e..fc5c5ba4aacb 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y +OBJECT_FILES_NON_STANDARD_xen-asm.o := y ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities @@ -9,9 +9,8 @@ CFLAGS_REMOVE_irq.o = -pg endif # Make sure early boot has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_enlighten_pv.o := $(nostackp) -CFLAGS_mmu_pv.o := $(nostackp) +CFLAGS_enlighten_pv.o := -fno-stack-protector +CFLAGS_mmu_pv.o := -fno-stack-protector obj-y += enlighten.o obj-y += mmu.o @@ -34,7 +33,6 @@ obj-$(CONFIG_XEN_PV) += mmu_pv.o obj-$(CONFIG_XEN_PV) += irq.o obj-$(CONFIG_XEN_PV) += multicalls.o obj-$(CONFIG_XEN_PV) += xen-asm.o -obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index 5e53bfbe5823..e82fd1910dae 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> +#include <linux/thread_info.h> #include <asm/x86_init.h> #include <asm/apic.h> +#include <asm/io_apic.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> @@ -58,10 +60,6 @@ static u32 xen_apic_read(u32 reg) if (reg == APIC_LVR) return 0x14; -#ifdef CONFIG_X86_32 - if (reg == APIC_LDR) - return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); -#endif if (reg != APIC_ID) return 0; @@ -127,14 +125,6 @@ static int xen_phys_pkg_id(int initial_apic_id, int index_msb) return initial_apic_id >> index_msb; } -#ifdef CONFIG_X86_32 -static int xen_x86_32_early_logical_apicid(int cpu) -{ - /* Match with APIC_LDR read. Otherwise setup_local_APIC complains. */ - return 1 << cpu; -} -#endif - static void xen_noop(void) { } @@ -197,11 +187,6 @@ static struct apic xen_pv_apic = { .icr_write = xen_apic_icr_write, .wait_icr_idle = xen_noop, .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, - -#ifdef CONFIG_X86_32 - /* generic_processor_info and setup_local_APIC. */ - .x86_32_early_logical_apicid = xen_x86_32_early_logical_apicid, -#endif }; static void __init xen_apic_check(void) diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 3e89b0067ff0..9e87ab010c82 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -11,6 +11,7 @@ #include <asm/cpu.h> #include <asm/smp.h> +#include <asm/io_apic.h> #include <asm/reboot.h> #include <asm/setup.h> #include <asm/idtentry.h> diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2aab43a13a8c..22e741e0b10c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -119,14 +119,6 @@ static void __init xen_banner(void) printk(KERN_INFO "Xen version: %d.%d%s%s\n", version >> 16, version & 0xffff, extra.extraversion, xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); - -#ifdef CONFIG_X86_32 - pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n" - "Support for running as 32-bit PV-guest under Xen will soon be removed\n" - "from the Linux kernel!\n" - "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n" - "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"); -#endif } static void __init xen_pv_init_platform(void) @@ -353,15 +345,13 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte_t *ptep; pte_t pte; unsigned long pfn; - struct page *page; unsigned char dummy; + void *va; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); pfn = pte_pfn(*ptep); - page = pfn_to_page(pfn); - pte = pfn_pte(pfn, prot); /* @@ -391,14 +381,10 @@ static void set_aliased_prot(void *v, pgprot_t prot) if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); - if (!PageHighMem(page)) { - void *av = __va(PFN_PHYS(pfn)); + va = __va(PFN_PHYS(pfn)); - if (av != v) - if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) - BUG(); - } else - kmap_flush_unused(); + if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) + BUG(); preempt_enable(); } @@ -538,30 +524,12 @@ static void load_TLS_descriptor(struct thread_struct *t, static void xen_load_tls(struct thread_struct *t, unsigned int cpu) { /* - * XXX sleazy hack: If we're being called in a lazy-cpu zone - * and lazy gs handling is enabled, it means we're in a - * context switch, and %gs has just been saved. This means we - * can zero it out to prevent faults on exit from the - * hypervisor if the next process has no %gs. Either way, it - * has been saved, and the new value will get loaded properly. - * This will go away as soon as Xen has been modified to not - * save/restore %gs for normal hypercalls. - * - * On x86_64, this hack is not used for %gs, because gs points - * to KERNEL_GS_BASE (and uses it for PDA references), so we - * must not zero %gs on x86_64 - * - * For x86_64, we need to zero %fs, otherwise we may get an + * In lazy mode we need to zero %fs, otherwise we may get an * exception between the new %fs descriptor being loaded and * %fs being effectively cleared at __switch_to(). */ - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { -#ifdef CONFIG_X86_32 - lazy_load_gs(0); -#else + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) loadsegment(fs, 0); -#endif - } xen_mc_batch(); @@ -572,13 +540,11 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu) xen_mc_issue(PARAVIRT_LAZY_CPU); } -#ifdef CONFIG_X86_64 static void xen_load_gs_index(unsigned int idx) { if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) BUG(); } -#endif static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, const void *ptr) @@ -597,7 +563,6 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, preempt_enable(); } -#ifdef CONFIG_X86_64 void noist_exc_debug(struct pt_regs *regs); DEFINE_IDTENTRY_RAW(xenpv_exc_nmi) @@ -697,7 +662,6 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist) return true; } -#endif static int cvt_gate_to_trap(int vector, const gate_desc *val, struct trap_info *info) @@ -710,10 +674,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val, info->vector = vector; addr = gate_offset(val); -#ifdef CONFIG_X86_64 if (!get_trap_addr((void **)&addr, val->bits.ist)) return 0; -#endif /* CONFIG_X86_64 */ info->address = addr; info->cs = gate_segment(val); @@ -958,15 +920,12 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; -#ifdef CONFIG_X86_64 unsigned int which; u64 base; -#endif ret = 0; switch (msr) { -#ifdef CONFIG_X86_64 case MSR_FS_BASE: which = SEGBASE_FS; goto set; case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; @@ -976,7 +935,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) if (HYPERVISOR_set_segment_base(which, base) != 0) ret = -EIO; break; -#endif case MSR_STAR: case MSR_CSTAR: @@ -1058,9 +1016,7 @@ void __init xen_setup_vcpu_info_placement(void) static const struct pv_info xen_info __initconst = { .shared_kernel_pmd = 0, -#ifdef CONFIG_X86_64 .extra_user_64bit_cs = FLAT_USER_CS64, -#endif .name = "Xen", }; @@ -1086,18 +1042,14 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_pmc = xen_read_pmc, .iret = xen_iret, -#ifdef CONFIG_X86_64 .usergs_sysret64 = xen_sysret64, -#endif .load_tr_desc = paravirt_nop, .set_ldt = xen_set_ldt, .load_gdt = xen_load_gdt, .load_idt = xen_load_idt, .load_tls = xen_load_tls, -#ifdef CONFIG_X86_64 .load_gs_index = xen_load_gs_index, -#endif .alloc_ldt = xen_alloc_ldt, .free_ldt = xen_free_ldt, @@ -1364,15 +1316,7 @@ asmlinkage __visible void __init xen_start_kernel(void) /* keep using Xen gdt for now; no urgent need to change it */ -#ifdef CONFIG_X86_32 - pv_info.kernel_rpl = 1; - if (xen_feature(XENFEAT_supervisor_mode_kernel)) - pv_info.kernel_rpl = 0; -#else pv_info.kernel_rpl = 0; -#endif - /* set the limit of our address space */ - xen_reserve_top(); /* * We used to do this in xen_arch_setup, but that is too late @@ -1384,12 +1328,6 @@ asmlinkage __visible void __init xen_start_kernel(void) if (rc != 0) xen_raw_printk("physdev_op failed %d\n", rc); -#ifdef CONFIG_X86_32 - /* set up basic CPUID stuff */ - cpu_detect(&new_cpu_data); - set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU); - new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1); -#endif if (xen_start_info->mod_start) { if (xen_start_info->flags & SIF_MOD_START_PFN) @@ -1458,12 +1396,8 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_efi_init(&boot_params); /* Start the world */ -#ifdef CONFIG_X86_32 - i386_start_kernel(); -#else cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ x86_64_start_reservations((char *)__pa_symbol(&boot_params)); -#endif } static int xen_cpu_up_prepare_pv(unsigned int cpu) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index a58d9c69807a..3273c985d3dd 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -86,19 +86,8 @@ #include "mmu.h" #include "debugfs.h" -#ifdef CONFIG_X86_32 -/* - * Identity map, in addition to plain kernel map. This needs to be - * large enough to allocate page table pages to allocate the rest. - * Each page can map 2MB. - */ -#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) -static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); -#endif -#ifdef CONFIG_X86_64 /* l3 pud for userspace vsyscall mapping */ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; -#endif /* CONFIG_X86_64 */ /* * Protects atomic reservation decrease/increase against concurrent increases. @@ -280,10 +269,7 @@ static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) if (!xen_batched_set_pte(ptep, pteval)) { /* * Could call native_set_pte() here and trap and - * emulate the PTE write but with 32-bit guests this - * needs two traps (one for each of the two 32-bit - * words in the PTE) so do one hypercall directly - * instead. + * emulate the PTE write, but a hypercall is much cheaper. */ struct mmu_update u; @@ -439,26 +425,6 @@ static void xen_set_pud(pud_t *ptr, pud_t val) xen_set_pud_hyper(ptr, val); } -#ifdef CONFIG_X86_PAE -static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) -{ - trace_xen_mmu_set_pte_atomic(ptep, pte); - __xen_set_pte(ptep, pte); -} - -static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - trace_xen_mmu_pte_clear(mm, addr, ptep); - __xen_set_pte(ptep, native_make_pte(0)); -} - -static void xen_pmd_clear(pmd_t *pmdp) -{ - trace_xen_mmu_pmd_clear(pmdp); - set_pmd(pmdp, __pmd(0)); -} -#endif /* CONFIG_X86_PAE */ - __visible pmd_t xen_make_pmd(pmdval_t pmd) { pmd = pte_pfn_to_mfn(pmd); @@ -466,7 +432,6 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); -#ifdef CONFIG_X86_64 __visible pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); @@ -571,27 +536,27 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ -#endif /* CONFIG_X86_64 */ -static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, - int (*func)(struct mm_struct *mm, struct page *, enum pt_level), - bool last, unsigned long limit) +static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) { - int i, nr, flush = 0; + int i, nr; nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD; for (i = 0; i < nr; i++) { if (!pmd_none(pmd[i])) - flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE); + (*func)(mm, pmd_page(pmd[i]), PT_PTE); } - return flush; } -static int xen_pud_walk(struct mm_struct *mm, pud_t *pud, - int (*func)(struct mm_struct *mm, struct page *, enum pt_level), - bool last, unsigned long limit) +static void xen_pud_walk(struct mm_struct *mm, pud_t *pud, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) { - int i, nr, flush = 0; + int i, nr; nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD; for (i = 0; i < nr; i++) { @@ -602,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud, pmd = pmd_offset(&pud[i], 0); if (PTRS_PER_PMD > 1) - flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); - flush |= xen_pmd_walk(mm, pmd, func, - last && i == nr - 1, limit); + (*func)(mm, virt_to_page(pmd), PT_PMD); + xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit); } - return flush; } -static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, - int (*func)(struct mm_struct *mm, struct page *, enum pt_level), - bool last, unsigned long limit) +static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) { - int flush = 0; pud_t *pud; if (p4d_none(*p4d)) - return flush; + return; pud = pud_offset(p4d, 0); if (PTRS_PER_PUD > 1) - flush |= (*func)(mm, virt_to_page(pud), PT_PUD); - flush |= xen_pud_walk(mm, pud, func, last, limit); - return flush; + (*func)(mm, virt_to_page(pud), PT_PUD); + xen_pud_walk(mm, pud, func, last, limit); } /* @@ -636,32 +598,27 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, * will be STACK_TOP_MAX, but at boot we need to pin up to * FIXADDR_TOP. * - * For 32-bit the important bit is that we don't pin beyond there, - * because then we start getting into Xen's ptes. - * - * For 64-bit, we must skip the Xen hole in the middle of the address - * space, just after the big x86-64 virtual hole. + * We must skip the Xen hole in the middle of the address space, just after + * the big x86-64 virtual hole. */ -static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, - int (*func)(struct mm_struct *mm, struct page *, - enum pt_level), - unsigned long limit) +static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + unsigned long limit) { - int i, nr, flush = 0; + int i, nr; unsigned hole_low = 0, hole_high = 0; /* The limit is the last byte to be touched */ limit--; BUG_ON(limit >= FIXADDR_TOP); -#ifdef CONFIG_X86_64 /* * 64-bit has a great big hole in the middle of the address * space, which contains the Xen mappings. */ hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); hole_high = pgd_index(GUARD_HOLE_END_ADDR); -#endif nr = pgd_index(limit) + 1; for (i = 0; i < nr; i++) { @@ -674,22 +631,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, continue; p4d = p4d_offset(&pgd[i], 0); - flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); + xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); } /* Do the top level last, so that the callbacks can use it as a cue to do final things like tlb flushes. */ - flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); - - return flush; + (*func)(mm, virt_to_page(pgd), PT_PGD); } -static int xen_pgd_walk(struct mm_struct *mm, - int (*func)(struct mm_struct *mm, struct page *, - enum pt_level), - unsigned long limit) +static void xen_pgd_walk(struct mm_struct *mm, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + unsigned long limit) { - return __xen_pgd_walk(mm, mm->pgd, func, limit); + __xen_pgd_walk(mm, mm->pgd, func, limit); } /* If we're using split pte locks, then take the page's lock and @@ -722,26 +677,17 @@ static void xen_do_pin(unsigned level, unsigned long pfn) xen_extend_mmuext_op(&op); } -static int xen_pin_page(struct mm_struct *mm, struct page *page, - enum pt_level level) +static void xen_pin_page(struct mm_struct *mm, struct page *page, + enum pt_level level) { unsigned pgfl = TestSetPagePinned(page); - int flush; - - if (pgfl) - flush = 0; /* already pinned */ - else if (PageHighMem(page)) - /* kmaps need flushing if we found an unpinned - highpage */ - flush = 1; - else { + + if (!pgfl) { void *pt = lowmem_page_address(page); unsigned long pfn = page_to_pfn(page); struct multicall_space mcs = __xen_mc_entry(0); spinlock_t *ptl; - flush = 0; - /* * We need to hold the pagetable lock between the time * we make the pagetable RO and when we actually pin @@ -778,8 +724,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, xen_mc_callback(xen_pte_unlock, ptl); } } - - return flush; } /* This is called just after a mm has been created, but it has not @@ -787,39 +731,22 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { + pgd_t *user_pgd = xen_get_user_pgd(pgd); + trace_xen_mmu_pgd_pin(mm, pgd); xen_mc_batch(); - if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { - /* re-enable interrupts for flushing */ - xen_mc_issue(0); + __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); - kmap_flush_unused(); + xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); - xen_mc_batch(); + if (user_pgd) { + xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); + xen_do_pin(MMUEXT_PIN_L4_TABLE, + PFN_DOWN(__pa(user_pgd))); } -#ifdef CONFIG_X86_64 - { - pgd_t *user_pgd = xen_get_user_pgd(pgd); - - xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); - - if (user_pgd) { - xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); - xen_do_pin(MMUEXT_PIN_L4_TABLE, - PFN_DOWN(__pa(user_pgd))); - } - } -#else /* CONFIG_X86_32 */ -#ifdef CONFIG_X86_PAE - /* Need to make sure unshared kernel PMD is pinnable */ - xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), - PT_PMD); -#endif - xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); -#endif /* CONFIG_X86_64 */ xen_mc_issue(0); } @@ -854,11 +781,10 @@ void xen_mm_pin_all(void) spin_unlock(&pgd_lock); } -static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, - enum pt_level level) +static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page, + enum pt_level level) { SetPagePinned(page); - return 0; } /* @@ -870,18 +796,16 @@ static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, static void __init xen_after_bootmem(void) { static_branch_enable(&xen_struct_pages_ready); -#ifdef CONFIG_X86_64 SetPagePinned(virt_to_page(level3_user_vsyscall)); -#endif xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); } -static int xen_unpin_page(struct mm_struct *mm, struct page *page, - enum pt_level level) +static void xen_unpin_page(struct mm_struct *mm, struct page *page, + enum pt_level level) { unsigned pgfl = TestClearPagePinned(page); - if (pgfl && !PageHighMem(page)) { + if (pgfl) { void *pt = lowmem_page_address(page); unsigned long pfn = page_to_pfn(page); spinlock_t *ptl = NULL; @@ -912,36 +836,24 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page, xen_mc_callback(xen_pte_unlock, ptl); } } - - return 0; /* never need to flush on unpin */ } /* Release a pagetables pages back as normal RW */ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) { + pgd_t *user_pgd = xen_get_user_pgd(pgd); + trace_xen_mmu_pgd_unpin(mm, pgd); xen_mc_batch(); xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); -#ifdef CONFIG_X86_64 - { - pgd_t *user_pgd = xen_get_user_pgd(pgd); - - if (user_pgd) { - xen_do_pin(MMUEXT_UNPIN_TABLE, - PFN_DOWN(__pa(user_pgd))); - xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); - } + if (user_pgd) { + xen_do_pin(MMUEXT_UNPIN_TABLE, + PFN_DOWN(__pa(user_pgd))); + xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); } -#endif - -#ifdef CONFIG_X86_PAE - /* Need to make sure unshared kernel PMD is unpinned */ - xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), - PT_PMD); -#endif __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); @@ -1089,7 +1001,6 @@ static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn) BUG(); } -#ifdef CONFIG_X86_64 static void __init xen_cleanhighmap(unsigned long vaddr, unsigned long vaddr_end) { @@ -1273,17 +1184,15 @@ static void __init xen_pagetable_cleanhighmap(void) xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2)); xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); } -#endif static void __init xen_pagetable_p2m_setup(void) { xen_vmalloc_p2m_tree(); -#ifdef CONFIG_X86_64 xen_pagetable_p2m_free(); xen_pagetable_cleanhighmap(); -#endif + /* And revector! Bye bye old array */ xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; } @@ -1420,6 +1329,8 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) } static void xen_write_cr3(unsigned long cr3) { + pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); + BUG_ON(preemptible()); xen_mc_batch(); /* disables interrupts */ @@ -1430,20 +1341,14 @@ static void xen_write_cr3(unsigned long cr3) __xen_write_cr3(true, cr3); -#ifdef CONFIG_X86_64 - { - pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); - if (user_pgd) - __xen_write_cr3(false, __pa(user_pgd)); - else - __xen_write_cr3(false, 0); - } -#endif + if (user_pgd) + __xen_write_cr3(false, __pa(user_pgd)); + else + __xen_write_cr3(false, 0); xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ } -#ifdef CONFIG_X86_64 /* * At the start of the day - when Xen launches a guest, it has already * built pagetables for the guest. We diligently look over them @@ -1478,49 +1383,39 @@ static void __init xen_write_cr3_init(unsigned long cr3) xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ } -#endif static int xen_pgd_alloc(struct mm_struct *mm) { pgd_t *pgd = mm->pgd; - int ret = 0; + struct page *page = virt_to_page(pgd); + pgd_t *user_pgd; + int ret = -ENOMEM; BUG_ON(PagePinned(virt_to_page(pgd))); + BUG_ON(page->private != 0); -#ifdef CONFIG_X86_64 - { - struct page *page = virt_to_page(pgd); - pgd_t *user_pgd; + user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + page->private = (unsigned long)user_pgd; - BUG_ON(page->private != 0); - - ret = -ENOMEM; - - user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - page->private = (unsigned long)user_pgd; - - if (user_pgd != NULL) { + if (user_pgd != NULL) { #ifdef CONFIG_X86_VSYSCALL_EMULATION - user_pgd[pgd_index(VSYSCALL_ADDR)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + user_pgd[pgd_index(VSYSCALL_ADDR)] = + __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); #endif - ret = 0; - } - - BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); + ret = 0; } -#endif + + BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); + return ret; } static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#ifdef CONFIG_X86_64 pgd_t *user_pgd = xen_get_user_pgd(pgd); if (user_pgd) free_page((unsigned long)user_pgd); -#endif } /* @@ -1539,7 +1434,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) */ __visible pte_t xen_make_pte_init(pteval_t pte) { -#ifdef CONFIG_X86_64 unsigned long pfn; /* @@ -1553,7 +1447,7 @@ __visible pte_t xen_make_pte_init(pteval_t pte) pfn >= xen_start_info->first_p2m_pfn && pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) pte &= ~_PAGE_RW; -#endif + pte = pte_pfn_to_mfn(pte); return native_make_pte(pte); } @@ -1561,13 +1455,6 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) { -#ifdef CONFIG_X86_32 - /* If there's an existing pte, then don't allow _PAGE_RW to be set */ - if (pte_mfn(pte) != INVALID_P2M_ENTRY - && pte_val_ma(*ptep) & _PAGE_PRESENT) - pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & - pte_val_ma(pte)); -#endif __xen_set_pte(ptep, pte); } @@ -1642,20 +1529,14 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, if (static_branch_likely(&xen_struct_pages_ready)) SetPagePinned(page); - if (!PageHighMem(page)) { - xen_mc_batch(); + xen_mc_batch(); - __set_pfn_prot(pfn, PAGE_KERNEL_RO); + __set_pfn_prot(pfn, PAGE_KERNEL_RO); - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) - __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); - xen_mc_issue(PARAVIRT_LAZY_MMU); - } else { - /* make sure there are no stray mappings of - this page */ - kmap_flush_unused(); - } + xen_mc_issue(PARAVIRT_LAZY_MMU); } } @@ -1678,16 +1559,15 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level) trace_xen_mmu_release_ptpage(pfn, level, pinned); if (pinned) { - if (!PageHighMem(page)) { - xen_mc_batch(); + xen_mc_batch(); - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) - __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) + __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); - __set_pfn_prot(pfn, PAGE_KERNEL); + __set_pfn_prot(pfn, PAGE_KERNEL); + + xen_mc_issue(PARAVIRT_LAZY_MMU); - xen_mc_issue(PARAVIRT_LAZY_MMU); - } ClearPagePinned(page); } } @@ -1702,7 +1582,6 @@ static void xen_release_pmd(unsigned long pfn) xen_release_ptpage(pfn, PT_PMD); } -#ifdef CONFIG_X86_64 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PUD); @@ -1712,20 +1591,6 @@ static void xen_release_pud(unsigned long pfn) { xen_release_ptpage(pfn, PT_PUD); } -#endif - -void __init xen_reserve_top(void) -{ -#ifdef CONFIG_X86_32 - unsigned long top = HYPERVISOR_VIRT_START; - struct xen_platform_parameters pp; - - if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) - top = pp.virt_start; - - reserve_top_address(-top); -#endif /* CONFIG_X86_32 */ -} /* * Like __va(), but returns address in the kernel mapping (which is @@ -1733,11 +1598,7 @@ void __init xen_reserve_top(void) */ static void * __init __ka(phys_addr_t paddr) { -#ifdef CONFIG_X86_64 return (void *)(paddr + __START_KERNEL_map); -#else - return __va(paddr); -#endif } /* Convert a machine address to physical address */ @@ -1771,56 +1632,7 @@ static void __init set_page_prot(void *addr, pgprot_t prot) { return set_page_prot_flags(addr, prot, UVMF_NONE); } -#ifdef CONFIG_X86_32 -static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) -{ - unsigned pmdidx, pteidx; - unsigned ident_pte; - unsigned long pfn; - - level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, - PAGE_SIZE); - - ident_pte = 0; - pfn = 0; - for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { - pte_t *pte_page; - - /* Reuse or allocate a page of ptes */ - if (pmd_present(pmd[pmdidx])) - pte_page = m2v(pmd[pmdidx].pmd); - else { - /* Check for free pte pages */ - if (ident_pte == LEVEL1_IDENT_ENTRIES) - break; - - pte_page = &level1_ident_pgt[ident_pte]; - ident_pte += PTRS_PER_PTE; - - pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); - } - - /* Install mappings */ - for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { - pte_t pte; - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - - if (!pte_none(pte_page[pteidx])) - continue; - - pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); - pte_page[pteidx] = pte; - } - } - - for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) - set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); - - set_page_prot(pmd, PAGE_KERNEL_RO); -} -#endif void __init xen_setup_machphys_mapping(void) { struct xen_machphys_mapping mapping; @@ -1831,13 +1643,8 @@ void __init xen_setup_machphys_mapping(void) } else { machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } -#ifdef CONFIG_X86_32 - WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) - < machine_to_phys_mapping); -#endif } -#ifdef CONFIG_X86_64 static void __init convert_pfn_mfn(void *v) { pte_t *pte = v; @@ -2168,105 +1975,6 @@ void __init xen_relocate_p2m(void) xen_start_info->nr_p2m_frames = n_frames; } -#else /* !CONFIG_X86_64 */ -static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); -static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); -RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE); -RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE); - -static void __init xen_write_cr3_init(unsigned long cr3) -{ - unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); - - BUG_ON(read_cr3_pa() != __pa(initial_page_table)); - BUG_ON(cr3 != __pa(swapper_pg_dir)); - - /* - * We are switching to swapper_pg_dir for the first time (from - * initial_page_table) and therefore need to mark that page - * read-only and then pin it. - * - * Xen disallows sharing of kernel PMDs for PAE - * guests. Therefore we must copy the kernel PMD from - * initial_page_table into a new kernel PMD to be used in - * swapper_pg_dir. - */ - swapper_kernel_pmd = - extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - copy_page(swapper_kernel_pmd, initial_kernel_pmd); - swapper_pg_dir[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); - set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); - - set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); - xen_write_cr3(cr3); - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); - - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, - PFN_DOWN(__pa(initial_page_table))); - set_page_prot(initial_page_table, PAGE_KERNEL); - set_page_prot(initial_kernel_pmd, PAGE_KERNEL); - - pv_ops.mmu.write_cr3 = &xen_write_cr3; -} - -/* - * For 32 bit domains xen_start_info->pt_base is the pgd address which might be - * not the first page table in the page table pool. - * Iterate through the initial page tables to find the real page table base. - */ -static phys_addr_t __init xen_find_pt_base(pmd_t *pmd) -{ - phys_addr_t pt_base, paddr; - unsigned pmdidx; - - pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd)); - - for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) - if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) { - paddr = m2p(pmd[pmdidx].pmd); - pt_base = min(pt_base, paddr); - } - - return pt_base; -} - -void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) -{ - pmd_t *kernel_pmd; - - kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); - - xen_pt_base = xen_find_pt_base(kernel_pmd); - xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE; - - initial_kernel_pmd = - extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - - max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024); - - copy_page(initial_kernel_pmd, kernel_pmd); - - xen_map_identity_early(initial_kernel_pmd, max_pfn); - - copy_page(initial_page_table, pgd); - initial_page_table[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); - - set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); - set_page_prot(initial_page_table, PAGE_KERNEL_RO); - set_page_prot(empty_zero_page, PAGE_KERNEL_RO); - - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); - - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, - PFN_DOWN(__pa(initial_page_table))); - xen_write_cr3(__pa(initial_page_table)); - - memblock_reserve(xen_pt_base, xen_pt_size); -} -#endif /* CONFIG_X86_64 */ - void __init xen_reserve_special_pages(void) { phys_addr_t paddr; @@ -2300,12 +2008,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: -#ifdef CONFIG_X86_32 - case FIX_WP_TEST: -# ifdef CONFIG_HIGHMEM - case FIX_KMAP_BEGIN ... FIX_KMAP_END: -# endif -#elif defined(CONFIG_X86_VSYSCALL_EMULATION) +#ifdef CONFIG_X86_VSYSCALL_EMULATION case VSYSCALL_PAGE: #endif /* All local page mappings */ @@ -2357,9 +2060,7 @@ static void __init xen_post_allocator_init(void) pv_ops.mmu.set_pte = xen_set_pte; pv_ops.mmu.set_pmd = xen_set_pmd; pv_ops.mmu.set_pud = xen_set_pud; -#ifdef CONFIG_X86_64 pv_ops.mmu.set_p4d = xen_set_p4d; -#endif /* This will work as long as patching hasn't happened yet (which it hasn't) */ @@ -2367,15 +2068,11 @@ static void __init xen_post_allocator_init(void) pv_ops.mmu.alloc_pmd = xen_alloc_pmd; pv_ops.mmu.release_pte = xen_release_pte; pv_ops.mmu.release_pmd = xen_release_pmd; -#ifdef CONFIG_X86_64 pv_ops.mmu.alloc_pud = xen_alloc_pud; pv_ops.mmu.release_pud = xen_release_pud; -#endif pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte); -#ifdef CONFIG_X86_64 pv_ops.mmu.write_cr3 = &xen_write_cr3; -#endif } static void xen_leave_lazy_mmu(void) @@ -2420,17 +2117,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), -#ifdef CONFIG_X86_PAE - .set_pte_atomic = xen_set_pte_atomic, - .pte_clear = xen_pte_clear, - .pmd_clear = xen_pmd_clear, -#endif /* CONFIG_X86_PAE */ .set_pud = xen_set_pud_hyper, .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), -#ifdef CONFIG_X86_64 .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_p4d = xen_set_p4d_hyper, @@ -2442,7 +2133,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .p4d_val = PV_CALLEE_SAVE(xen_p4d_val), .make_p4d = PV_CALLEE_SAVE(xen_make_p4d), #endif -#endif /* CONFIG_X86_64 */ .activate_mm = xen_activate_mm, .dup_mmap = xen_dup_mmap, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 0acba2c712ab..be4151f42611 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -379,12 +379,8 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) if (type == P2M_TYPE_PFN || i < chunk) { /* Use initial p2m page contents. */ -#ifdef CONFIG_X86_64 mfns = alloc_p2m_page(); copy_page(mfns, xen_p2m_addr + pfn); -#else - mfns = xen_p2m_addr + pfn; -#endif ptep = populate_extra_pte((unsigned long)(p2m + pfn)); set_pte(ptep, pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); @@ -467,7 +463,7 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine); * Allocate new pmd(s). It is checked whether the old pmd is still in place. * If not, nothing is changed. This is okay as the only reason for allocating * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual - * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! + * pmd. */ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) { diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3566e37241d7..7eab14d56369 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -32,7 +32,6 @@ #include <xen/features.h> #include <xen/hvc-console.h> #include "xen-ops.h" -#include "vdso.h" #include "mmu.h" #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) @@ -545,13 +544,10 @@ static unsigned long __init xen_get_pages_limit(void) { unsigned long limit; -#ifdef CONFIG_X86_32 - limit = GB(64) / PAGE_SIZE; -#else limit = MAXMEM / PAGE_SIZE; if (!xen_initial_domain() && xen_512gb_limit) limit = GB(512) / PAGE_SIZE; -#endif + return limit; } @@ -722,17 +718,8 @@ static void __init xen_reserve_xen_mfnlist(void) if (!xen_is_e820_reserved(start, size)) return; -#ifdef CONFIG_X86_32 - /* - * Relocating the p2m on 32 bit system to an arbitrary virtual address - * is not supported, so just give up. - */ - xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n"); - BUG(); -#else xen_relocate_p2m(); memblock_free(start, size); -#endif } /** @@ -921,20 +908,6 @@ char * __init xen_memory_setup(void) return "Xen"; } -/* - * Set the bit indicating "nosegneg" library variants should be used. - * We only need to bother in pure 32-bit mode; compat 32-bit processes - * can have un-truncated segments, so wrapping around is allowed. - */ -static void __init fiddle_vdso(void) -{ -#ifdef CONFIG_X86_32 - u32 *mask = vdso_image_32.data + - vdso_image_32.sym_VDSO32_NOTE_MASK; - *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; -#endif -} - static int register_callback(unsigned type, const void *func) { struct callback_register callback = { @@ -951,11 +924,7 @@ void xen_enable_sysenter(void) int ret; unsigned sysenter_feature; -#ifdef CONFIG_X86_32 - sysenter_feature = X86_FEATURE_SEP; -#else sysenter_feature = X86_FEATURE_SYSENTER32; -#endif if (!boot_cpu_has(sysenter_feature)) return; @@ -967,7 +936,6 @@ void xen_enable_sysenter(void) void xen_enable_syscall(void) { -#ifdef CONFIG_X86_64 int ret; ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); @@ -983,7 +951,6 @@ void xen_enable_syscall(void) if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } -#endif /* CONFIG_X86_64 */ } static void __init xen_pvmmu_arch_setup(void) @@ -1024,7 +991,6 @@ void __init xen_arch_setup(void) disable_cpuidle(); disable_cpufreq(); WARN_ON(xen_set_default_idle()); - fiddle_vdso(); #ifdef CONFIG_NUMA numa_off = 1; #endif diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c index f8d39440b292..f5e7db4f82ab 100644 --- a/arch/x86/xen/smp_hvm.c +++ b/arch/x86/xen/smp_hvm.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/thread_info.h> #include <asm/smp.h> #include <xen/events.h> diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 9ea598dcc132..c2ac319f11a4 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -29,6 +29,7 @@ #include <asm/idtentry.h> #include <asm/desc.h> #include <asm/cpu.h> +#include <asm/io_apic.h> #include <xen/interface/xen.h> #include <xen/interface/vcpu.h> @@ -210,15 +211,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void) * sure the old memory can be recycled. */ make_lowmem_page_readwrite(xen_initial_gdt); -#ifdef CONFIG_X86_32 - /* - * Xen starts us with XEN_FLAT_RING1_DS, but linux code - * expects __USER_DS - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); -#endif - xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); @@ -299,10 +291,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) gdt = get_cpu_gdt_rw(cpu); -#ifdef CONFIG_X86_32 - ctxt->user_regs.fs = __KERNEL_PERCPU; - ctxt->user_regs.gs = __KERNEL_STACK_CANARY; -#endif memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); /* @@ -340,12 +328,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->kernel_ss = __KERNEL_DS; ctxt->kernel_sp = task_top_of_stack(idle); -#ifdef CONFIG_X86_32 - ctxt->event_callback_cs = __KERNEL_CS; - ctxt->failsafe_callback_cs = __KERNEL_CS; -#else ctxt->gs_base_kernel = per_cpu_offset(cpu); -#endif ctxt->event_callback_eip = (unsigned long)xen_asm_exc_xen_hypervisor_callback; ctxt->failsafe_callback_eip = diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c index 8303b58c79a9..cae9660f4c67 100644 --- a/arch/x86/xen/suspend_pv.c +++ b/arch/x86/xen/suspend_pv.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> -#include <asm/fixmap.h> - #include <asm/xen/hypercall.h> #include <asm/xen/page.h> +#include <asm/fixmap.h> + #include "xen-ops.h" void xen_pv_pre_suspend(void) diff --git a/arch/x86/xen/vdso.h b/arch/x86/xen/vdso.h deleted file mode 100644 index 873c54c488fe..000000000000 --- a/arch/x86/xen/vdso.h +++ /dev/null @@ -1,6 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Bit used for the pseudo-hwcap for non-negative segments. We use - bit 1 to avoid bugs in some versions of glibc when bit 0 is - used; the choice is otherwise arbitrary. */ -#define VDSO_NOTE_NONEGSEG_BIT 1 diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 508fe204520b..1cb0e84b9161 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -6,12 +6,18 @@ * operations here; the indirect forms are better handled in C. */ +#include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/processor-flags.h> -#include <asm/frame.h> +#include <asm/segment.h> +#include <asm/thread_info.h> #include <asm/asm.h> +#include <asm/frame.h> +#include <xen/interface/xen.h> + +#include <linux/init.h> #include <linux/linkage.h> /* @@ -76,11 +82,7 @@ SYM_FUNC_END(xen_save_fl_direct) */ SYM_FUNC_START(xen_restore_fl_direct) FRAME_BEGIN -#ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di -#else - testb $X86_EFLAGS_IF>>8, %ah -#endif setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* * Preempt here doesn't matter because that will deal with any @@ -104,15 +106,6 @@ SYM_FUNC_END(xen_restore_fl_direct) */ SYM_FUNC_START(check_events) FRAME_BEGIN -#ifdef CONFIG_X86_32 - push %eax - push %ecx - push %edx - call xen_force_evtchn_callback - pop %edx - pop %ecx - pop %eax -#else push %rax push %rcx push %rdx @@ -132,7 +125,6 @@ SYM_FUNC_START(check_events) pop %rdx pop %rcx pop %rax -#endif FRAME_END ret SYM_FUNC_END(check_events) @@ -151,3 +143,175 @@ SYM_FUNC_START(xen_read_cr2_direct) FRAME_END ret SYM_FUNC_END(xen_read_cr2_direct); + +.macro xen_pv_trap name +SYM_CODE_START(xen_\name) + pop %rcx + pop %r11 + jmp \name +SYM_CODE_END(xen_\name) +_ASM_NOKPROBE(xen_\name) +.endm + +xen_pv_trap asm_exc_divide_error +xen_pv_trap asm_xenpv_exc_debug +xen_pv_trap asm_exc_int3 +xen_pv_trap asm_xenpv_exc_nmi +xen_pv_trap asm_exc_overflow +xen_pv_trap asm_exc_bounds +xen_pv_trap asm_exc_invalid_op +xen_pv_trap asm_exc_device_not_available +xen_pv_trap asm_exc_double_fault +xen_pv_trap asm_exc_coproc_segment_overrun +xen_pv_trap asm_exc_invalid_tss +xen_pv_trap asm_exc_segment_not_present +xen_pv_trap asm_exc_stack_segment +xen_pv_trap asm_exc_general_protection +xen_pv_trap asm_exc_page_fault +xen_pv_trap asm_exc_spurious_interrupt_bug +xen_pv_trap asm_exc_coprocessor_error +xen_pv_trap asm_exc_alignment_check +#ifdef CONFIG_X86_MCE +xen_pv_trap asm_exc_machine_check +#endif /* CONFIG_X86_MCE */ +xen_pv_trap asm_exc_simd_coprocessor_error +#ifdef CONFIG_IA32_EMULATION +xen_pv_trap entry_INT80_compat +#endif +xen_pv_trap asm_exc_xen_hypervisor_callback + + __INIT +SYM_CODE_START(xen_early_idt_handler_array) + i = 0 + .rept NUM_EXCEPTION_VECTORS + pop %rcx + pop %r11 + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE + i = i + 1 + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +SYM_CODE_END(xen_early_idt_handler_array) + __FINIT + +hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 +/* + * Xen64 iret frame: + * + * ss + * rsp + * rflags + * cs + * rip <-- standard iret frame + * + * flags + * + * rcx } + * r11 }<-- pushed by hypercall page + * rsp->rax } + */ +SYM_CODE_START(xen_iret) + pushq $0 + jmp hypercall_iret +SYM_CODE_END(xen_iret) + +SYM_CODE_START(xen_sysret64) + /* + * We're already on the usermode stack at this point, but + * still with the kernel gs, so we can easily switch back. + * + * tss.sp2 is scratch space. + */ + movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + + pushq $__USER_DS + pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) + pushq %r11 + pushq $__USER_CS + pushq %rcx + + pushq $VGCF_in_syscall + jmp hypercall_iret +SYM_CODE_END(xen_sysret64) + +/* + * Xen handles syscall callbacks much like ordinary exceptions, which + * means we have: + * - kernel gs + * - kernel rsp + * - an iret-like stack frame on the stack (including rcx and r11): + * ss + * rsp + * rflags + * cs + * rip + * r11 + * rsp->rcx + */ + +/* Normal 64-bit system call target */ +SYM_FUNC_START(xen_syscall_target) + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER_DS and __USER_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER_DS, 4*8(%rsp) + movq $__USER_CS, 1*8(%rsp) + + jmp entry_SYSCALL_64_after_hwframe +SYM_FUNC_END(xen_syscall_target) + +#ifdef CONFIG_IA32_EMULATION + +/* 32-bit compat syscall target */ +SYM_FUNC_START(xen_syscall32_target) + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER32_DS and __USER32_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER32_DS, 4*8(%rsp) + movq $__USER32_CS, 1*8(%rsp) + + jmp entry_SYSCALL_compat_after_hwframe +SYM_FUNC_END(xen_syscall32_target) + +/* 32-bit compat sysenter target */ +SYM_FUNC_START(xen_sysenter_target) + /* + * NB: Xen is polite and clears TF from EFLAGS for us. This means + * that we don't need to guard against single step exceptions here. + */ + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER32_DS and __USER32_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER32_DS, 4*8(%rsp) + movq $__USER32_CS, 1*8(%rsp) + + jmp entry_SYSENTER_compat_after_hwframe +SYM_FUNC_END(xen_sysenter_target) + +#else /* !CONFIG_IA32_EMULATION */ + +SYM_FUNC_START_ALIAS(xen_syscall32_target) +SYM_FUNC_START(xen_sysenter_target) + lea 16(%rsp), %rsp /* strip %rcx, %r11 */ + mov $-ENOSYS, %rax + pushq $0 + jmp hypercall_iret +SYM_FUNC_END(xen_sysenter_target) +SYM_FUNC_END_ALIAS(xen_syscall32_target) + +#endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S deleted file mode 100644 index 4757cec33abe..000000000000 --- a/arch/x86/xen/xen-asm_32.S +++ /dev/null @@ -1,185 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Asm versions of Xen pv-ops, suitable for direct use. - * - * We only bother with direct forms (ie, vcpu in pda) of the - * operations here; the indirect forms are better handled in C. - */ - -#include <asm/thread_info.h> -#include <asm/processor-flags.h> -#include <asm/segment.h> -#include <asm/asm.h> - -#include <xen/interface/xen.h> - -#include <linux/linkage.h> - -/* Pseudo-flag used for virtual NMI, which we don't implement yet */ -#define XEN_EFLAGS_NMI 0x80000000 - -/* - * This is run where a normal iret would be run, with the same stack setup: - * 8: eflags - * 4: cs - * esp-> 0: eip - * - * This attempts to make sure that any pending events are dealt with - * on return to usermode, but there is a small window in which an - * event can happen just before entering usermode. If the nested - * interrupt ends up setting one of the TIF_WORK_MASK pending work - * flags, they will not be tested again before returning to - * usermode. This means that a process can end up with pending work, - * which will be unprocessed until the process enters and leaves the - * kernel again, which could be an unbounded amount of time. This - * means that a pending signal or reschedule event could be - * indefinitely delayed. - * - * The fix is to notice a nested interrupt in the critical window, and - * if one occurs, then fold the nested interrupt into the current - * interrupt stack frame, and re-process it iteratively rather than - * recursively. This means that it will exit via the normal path, and - * all pending work will be dealt with appropriately. - * - * Because the nested interrupt handler needs to deal with the current - * stack state in whatever form its in, we keep things simple by only - * using a single register which is pushed/popped on the stack. - */ - -.macro POP_FS -1: - popw %fs -.pushsection .fixup, "ax" -2: movw $0, (%esp) - jmp 1b -.popsection - _ASM_EXTABLE(1b,2b) -.endm - -SYM_CODE_START(xen_iret) - /* test eflags for special cases */ - testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) - jnz hyper_iret - - push %eax - ESP_OFFSET=4 # bytes pushed onto stack - - /* Store vcpu_info pointer for easy access */ -#ifdef CONFIG_SMP - pushw %fs - movl $(__KERNEL_PERCPU), %eax - movl %eax, %fs - movl %fs:xen_vcpu, %eax - POP_FS -#else - movl %ss:xen_vcpu, %eax -#endif - - /* check IF state we're restoring */ - testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) - - /* - * Maybe enable events. Once this happens we could get a - * recursive event, so the critical region starts immediately - * afterwards. However, if that happens we don't end up - * resuming the code, so we don't have to be worried about - * being preempted to another CPU. - */ - setz %ss:XEN_vcpu_info_mask(%eax) -xen_iret_start_crit: - - /* check for unmasked and pending */ - cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) - - /* - * If there's something pending, mask events again so we can - * jump back into exc_xen_hypervisor_callback. Otherwise do not - * touch XEN_vcpu_info_mask. - */ - jne 1f - movb $1, %ss:XEN_vcpu_info_mask(%eax) - -1: popl %eax - - /* - * From this point on the registers are restored and the stack - * updated, so we don't need to worry about it if we're - * preempted - */ -iret_restore_end: - - /* - * Jump to hypervisor_callback after fixing up the stack. - * Events are masked, so jumping out of the critical region is - * OK. - */ - je xen_asm_exc_xen_hypervisor_callback - -1: iret -xen_iret_end_crit: - _ASM_EXTABLE(1b, asm_iret_error) - -hyper_iret: - /* put this out of line since its very rarely used */ - jmp hypercall_page + __HYPERVISOR_iret * 32 -SYM_CODE_END(xen_iret) - - .globl xen_iret_start_crit, xen_iret_end_crit - -/* - * This is called by xen_asm_exc_xen_hypervisor_callback in entry_32.S when it sees - * that the EIP at the time of interrupt was between - * xen_iret_start_crit and xen_iret_end_crit. - * - * The stack format at this point is: - * ---------------- - * ss : (ss/esp may be present if we came from usermode) - * esp : - * eflags } outer exception info - * cs } - * eip } - * ---------------- - * eax : outer eax if it hasn't been restored - * ---------------- - * eflags } - * cs } nested exception info - * eip } - * return address : (into xen_asm_exc_xen_hypervisor_callback) - * - * In order to deliver the nested exception properly, we need to discard the - * nested exception frame such that when we handle the exception, we do it - * in the context of the outer exception rather than starting a new one. - * - * The only caveat is that if the outer eax hasn't been restored yet (i.e. - * it's still on stack), we need to restore its value here. -*/ -.pushsection .noinstr.text, "ax" -SYM_CODE_START(xen_iret_crit_fixup) - /* - * Paranoia: Make sure we're really coming from kernel space. - * One could imagine a case where userspace jumps into the - * critical range address, but just before the CPU delivers a - * PF, it decides to deliver an interrupt instead. Unlikely? - * Definitely. Easy to avoid? Yes. - */ - testb $2, 2*4(%esp) /* nested CS */ - jnz 2f - - /* - * If eip is before iret_restore_end then stack - * hasn't been restored yet. - */ - cmpl $iret_restore_end, 1*4(%esp) - jae 1f - - movl 4*4(%esp), %eax /* load outer EAX */ - ret $4*4 /* discard nested EIP, CS, and EFLAGS as - * well as the just restored EAX */ - -1: - ret $3*4 /* discard nested EIP, CS, and EFLAGS */ - -2: - ret -SYM_CODE_END(xen_iret_crit_fixup) -.popsection diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S deleted file mode 100644 index aab1d99b2b48..000000000000 --- a/arch/x86/xen/xen-asm_64.S +++ /dev/null @@ -1,192 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Asm versions of Xen pv-ops, suitable for direct use. - * - * We only bother with direct forms (ie, vcpu in pda) of the - * operations here; the indirect forms are better handled in C. - */ - -#include <asm/errno.h> -#include <asm/percpu.h> -#include <asm/processor-flags.h> -#include <asm/segment.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> -#include <asm/asm.h> - -#include <xen/interface/xen.h> - -#include <linux/init.h> -#include <linux/linkage.h> - -.macro xen_pv_trap name -SYM_CODE_START(xen_\name) - pop %rcx - pop %r11 - jmp \name -SYM_CODE_END(xen_\name) -_ASM_NOKPROBE(xen_\name) -.endm - -xen_pv_trap asm_exc_divide_error -xen_pv_trap asm_xenpv_exc_debug -xen_pv_trap asm_exc_int3 -xen_pv_trap asm_xenpv_exc_nmi -xen_pv_trap asm_exc_overflow -xen_pv_trap asm_exc_bounds -xen_pv_trap asm_exc_invalid_op -xen_pv_trap asm_exc_device_not_available -xen_pv_trap asm_exc_double_fault -xen_pv_trap asm_exc_coproc_segment_overrun -xen_pv_trap asm_exc_invalid_tss -xen_pv_trap asm_exc_segment_not_present -xen_pv_trap asm_exc_stack_segment -xen_pv_trap asm_exc_general_protection -xen_pv_trap asm_exc_page_fault -xen_pv_trap asm_exc_spurious_interrupt_bug -xen_pv_trap asm_exc_coprocessor_error -xen_pv_trap asm_exc_alignment_check -#ifdef CONFIG_X86_MCE -xen_pv_trap asm_exc_machine_check -#endif /* CONFIG_X86_MCE */ -xen_pv_trap asm_exc_simd_coprocessor_error -#ifdef CONFIG_IA32_EMULATION -xen_pv_trap entry_INT80_compat -#endif -xen_pv_trap asm_exc_xen_hypervisor_callback - - __INIT -SYM_CODE_START(xen_early_idt_handler_array) - i = 0 - .rept NUM_EXCEPTION_VECTORS - pop %rcx - pop %r11 - jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - i = i + 1 - .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc - .endr -SYM_CODE_END(xen_early_idt_handler_array) - __FINIT - -hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 -/* - * Xen64 iret frame: - * - * ss - * rsp - * rflags - * cs - * rip <-- standard iret frame - * - * flags - * - * rcx } - * r11 }<-- pushed by hypercall page - * rsp->rax } - */ -SYM_CODE_START(xen_iret) - pushq $0 - jmp hypercall_iret -SYM_CODE_END(xen_iret) - -SYM_CODE_START(xen_sysret64) - /* - * We're already on the usermode stack at this point, but - * still with the kernel gs, so we can easily switch back. - * - * tss.sp2 is scratch space. - */ - movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - - pushq $__USER_DS - pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) - pushq %r11 - pushq $__USER_CS - pushq %rcx - - pushq $VGCF_in_syscall - jmp hypercall_iret -SYM_CODE_END(xen_sysret64) - -/* - * Xen handles syscall callbacks much like ordinary exceptions, which - * means we have: - * - kernel gs - * - kernel rsp - * - an iret-like stack frame on the stack (including rcx and r11): - * ss - * rsp - * rflags - * cs - * rip - * r11 - * rsp->rcx - */ - -/* Normal 64-bit system call target */ -SYM_FUNC_START(xen_syscall_target) - popq %rcx - popq %r11 - - /* - * Neither Xen nor the kernel really knows what the old SS and - * CS were. The kernel expects __USER_DS and __USER_CS, so - * report those values even though Xen will guess its own values. - */ - movq $__USER_DS, 4*8(%rsp) - movq $__USER_CS, 1*8(%rsp) - - jmp entry_SYSCALL_64_after_hwframe -SYM_FUNC_END(xen_syscall_target) - -#ifdef CONFIG_IA32_EMULATION - -/* 32-bit compat syscall target */ -SYM_FUNC_START(xen_syscall32_target) - popq %rcx - popq %r11 - - /* - * Neither Xen nor the kernel really knows what the old SS and - * CS were. The kernel expects __USER32_DS and __USER32_CS, so - * report those values even though Xen will guess its own values. - */ - movq $__USER32_DS, 4*8(%rsp) - movq $__USER32_CS, 1*8(%rsp) - - jmp entry_SYSCALL_compat_after_hwframe -SYM_FUNC_END(xen_syscall32_target) - -/* 32-bit compat sysenter target */ -SYM_FUNC_START(xen_sysenter_target) - /* - * NB: Xen is polite and clears TF from EFLAGS for us. This means - * that we don't need to guard against single step exceptions here. - */ - popq %rcx - popq %r11 - - /* - * Neither Xen nor the kernel really knows what the old SS and - * CS were. The kernel expects __USER32_DS and __USER32_CS, so - * report those values even though Xen will guess its own values. - */ - movq $__USER32_DS, 4*8(%rsp) - movq $__USER32_CS, 1*8(%rsp) - - jmp entry_SYSENTER_compat_after_hwframe -SYM_FUNC_END(xen_sysenter_target) - -#else /* !CONFIG_IA32_EMULATION */ - -SYM_FUNC_START_ALIAS(xen_syscall32_target) -SYM_FUNC_START(xen_sysenter_target) - lea 16(%rsp), %rsp /* strip %rcx, %r11 */ - mov $-ENOSYS, %rax - pushq $0 - jmp hypercall_iret -SYM_FUNC_END(xen_sysenter_target) -SYM_FUNC_END_ALIAS(xen_syscall32_target) - -#endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 1ba601df3a37..2d7c8f34f56c 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -35,13 +35,8 @@ SYM_CODE_START(startup_xen) rep __ASM_SIZE(stos) mov %_ASM_SI, xen_start_info -#ifdef CONFIG_X86_64 mov initial_stack(%rip), %rsp -#else - mov initial_stack, %esp -#endif -#ifdef CONFIG_X86_64 /* Set up %gs. * * The base of %gs always points to fixed_percpu_data. If the @@ -53,7 +48,6 @@ SYM_CODE_START(startup_xen) movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax cdq wrmsr -#endif call xen_start_kernel SYM_CODE_END(startup_xen) diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 53b224fd6177..45d556f71858 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -33,7 +33,6 @@ void xen_setup_mfn_list_list(void); void xen_build_mfn_list_list(void); void xen_setup_machphys_mapping(void); void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); -void xen_reserve_top(void); void __init xen_reserve_special_pages(void); void __init xen_pt_check_e820(void); diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index 0ebc9827f7e5..f7c775d53012 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -26,7 +26,7 @@ $(obj)/Image.o: $(obj)/../vmlinux.bin $(OBJS) $(OBJS) $@ $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds - $(Q)$(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) \ + $(Q)$(LD) $(KBUILD_LDFLAGS) \ -T $(obj)/boot.lds \ --build-id=none \ -o $@ $(obj)/Image.o diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index e57f0d0a88d8..b9758119feca 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,7 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index d216ccba42f7..6276e3c2d3fc 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -222,7 +222,7 @@ 204 common quotactl sys_quotactl # 205 was old nfsservctl 205 common nfsservctl sys_ni_syscall -206 common _sysctl sys_sysctl +206 common _sysctl sys_ni_syscall 207 common bdflush sys_bdflush 208 common uname sys_newuname 209 common sysinfo sys_sysinfo diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index c128dcc7c85b..7666408ce12a 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -72,6 +72,9 @@ void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + retry: mmap_read_lock(mm); vma = find_vma(mm, address); @@ -107,7 +110,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -122,10 +125,6 @@ good_area: BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; @@ -139,12 +138,6 @@ good_area: } mmap_read_unlock(mm); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (flags & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - else - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - return; /* Something tried to access memory that isn't in our memory map.. |