diff options
Diffstat (limited to 'arch/arm')
67 files changed, 519 insertions, 216 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 59baf6c132a7..d96209b20736 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -3,6 +3,7 @@ config ARM bool default y select ARCH_32BIT_OFF_T + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE @@ -64,7 +65,6 @@ config ARM select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD - select HANDLE_DOMAIN_IRQ select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 @@ -91,7 +91,8 @@ config ARM select HAVE_FAST_GUP if ARM_LPAE select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG - select HAVE_FUNCTION_TRACER if !XIP_KERNEL + select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG) + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_IRQ_TIME_ACCOUNTING @@ -125,6 +126,7 @@ config ARM select PERF_USE_VMALLOC select RTC_LIB select SYS_SUPPORTS_APM_EMULATION + select THREAD_INFO_IN_TASK if CURRENT_POINTER_IN_TPIDRURO select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M # Above selects are sorted alphabetically; please add new ones # according to that. Thanks. @@ -264,10 +266,12 @@ config PHYS_OFFSET hex "Physical address of main memory" if MMU depends on !ARM_PATCH_PHYS_VIRT default DRAM_BASE if !MMU - default 0x00000000 if ARCH_FOOTBRIDGE + default 0x00000000 if ARCH_FOOTBRIDGE || ARCH_IXP4XX default 0x10000000 if ARCH_OMAP1 || ARCH_RPC - default 0x20000000 if ARCH_S5PV210 - default 0xc0000000 if ARCH_SA1100 + default 0x30000000 if ARCH_S3C24XX + default 0xa0000000 if ARCH_IOP32X || ARCH_PXA + default 0xc0000000 if ARCH_EP93XX || ARCH_SA1100 + default 0 help Please provide the physical address corresponding to the location of main memory in your system. @@ -432,6 +436,7 @@ config ARCH_PXA config ARCH_RPC bool "RiscPC" depends on MMU + depends on !CC_IS_CLANG && GCC_VERSION < 90100 && GCC_VERSION >= 60000 select ARCH_ACORN select ARCH_MAY_HAVE_PC_FDC select ARCH_SPARSEMEM_ENABLE @@ -1157,6 +1162,11 @@ config SMP_ON_UP If you don't know what to do here, say Y. + +config CURRENT_POINTER_IN_TPIDRURO + def_bool y + depends on SMP && CPU_32v6K && !CPU_V6 + config ARM_CPU_TOPOLOGY bool "Support cpu topology definition" depends on SMP && CPU_V7 @@ -1600,7 +1610,7 @@ config XEN config STACKPROTECTOR_PER_TASK bool "Use a unique stack canary value for each task" - depends on GCC_PLUGINS && STACKPROTECTOR && SMP && !XIP_DEFLATED_DATA + depends on GCC_PLUGINS && STACKPROTECTOR && THREAD_INFO_IN_TASK && !XIP_DEFLATED_DATA select GCC_PLUGIN_ARM_SSP_PER_TASK default y help diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 847c31e7c368..1c540157e283 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -113,6 +113,10 @@ ifeq ($(CONFIG_CC_IS_CLANG),y) CFLAGS_ABI += -meabi gnu endif +ifeq ($(CONFIG_CURRENT_POINTER_IN_TPIDRURO),y) +CFLAGS_ABI += -mtp=cp15 +endif + # Accept old syntax despite ".syntax unified" AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) @@ -273,11 +277,8 @@ ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare stack_protector_prepare: prepare0 $(eval SSP_PLUGIN_CFLAGS := \ - -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \ - awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\ - include/generated/asm-offsets.h) \ -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \ - awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\ + awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}'\ include/generated/asm-offsets.h)) $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS)) $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS)) diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index aa075d8372ea..74255e819831 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -47,7 +47,10 @@ extern char * strchrnul(const char *, int); #endif #ifdef CONFIG_KERNEL_XZ +/* Prevent KASAN override of string helpers in decompressor */ +#undef memmove #define memmove memmove +#undef memcpy #define memcpy memcpy #include "../../../../lib/decompress_unxz.c" #endif diff --git a/arch/arm/boot/compressed/fdt_check_mem_start.c b/arch/arm/boot/compressed/fdt_check_mem_start.c index 62450d824c3c..9291a2661bdf 100644 --- a/arch/arm/boot/compressed/fdt_check_mem_start.c +++ b/arch/arm/boot/compressed/fdt_check_mem_start.c @@ -55,16 +55,17 @@ static uint64_t get_val(const fdt32_t *cells, uint32_t ncells) * DTB, and, if out-of-range, replace it by the real start address. * To preserve backwards compatibility (systems reserving a block of memory * at the start of physical memory, kdump, ...), the traditional method is - * always used if it yields a valid address. + * used if it yields a valid address, unless the "linux,usable-memory-range" + * property is present. * * Return value: start address of physical memory to use */ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) { - uint32_t addr_cells, size_cells, base; + uint32_t addr_cells, size_cells, usable_base, base; uint32_t fdt_mem_start = 0xffffffff; - const fdt32_t *reg, *endp; - uint64_t size, end; + const fdt32_t *usable, *reg, *endp; + uint64_t size, usable_end, end; const char *type; int offset, len; @@ -80,6 +81,27 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) if (addr_cells > 2 || size_cells > 2) return mem_start; + /* + * Usable memory in case of a crash dump kernel + * This property describes a limitation: memory within this range is + * only valid when also described through another mechanism + */ + usable = get_prop(fdt, "/chosen", "linux,usable-memory-range", + (addr_cells + size_cells) * sizeof(fdt32_t)); + if (usable) { + size = get_val(usable + addr_cells, size_cells); + if (!size) + return mem_start; + + if (addr_cells > 1 && fdt32_ld(usable)) { + /* Outside 32-bit address space */ + return mem_start; + } + + usable_base = fdt32_ld(usable + addr_cells - 1); + usable_end = usable_base + size; + } + /* Walk all memory nodes and regions */ for (offset = fdt_next_node(fdt, -1, NULL); offset >= 0; offset = fdt_next_node(fdt, offset, NULL)) { @@ -107,7 +129,20 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) base = fdt32_ld(reg + addr_cells - 1); end = base + size; - if (mem_start >= base && mem_start < end) { + if (usable) { + /* + * Clip to usable range, which takes precedence + * over mem_start + */ + if (base < usable_base) + base = usable_base; + + if (end > usable_end) + end = usable_end; + + if (end <= base) + continue; + } else if (mem_start >= base && mem_start < end) { /* Calculated address is valid, use it */ return mem_start; } @@ -123,7 +158,8 @@ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) } /* - * The calculated address is not usable. + * The calculated address is not usable, or was overridden by the + * "linux,usable-memory-range" property. * Use the lowest usable physical memory address from the DTB instead, * and make sure this is a multiple of 2 MiB for phys/virt patching. */ diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c index 8c0fa276d994..fcc678fce045 100644 --- a/arch/arm/boot/compressed/string.c +++ b/arch/arm/boot/compressed/string.c @@ -5,6 +5,7 @@ * Small subset of simple string routines */ +#define __NO_FORTIFY #include <linux/string.h> /* diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts index f24bdd0870a5..72ce80fbf266 100644 --- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts +++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts @@ -40,8 +40,8 @@ regulator-always-on; regulator-settling-time-us = <5000>; gpios = <&expgpio 4 GPIO_ACTIVE_HIGH>; - states = <1800000 0x1 - 3300000 0x0>; + states = <1800000 0x1>, + <3300000 0x0>; status = "okay"; }; @@ -217,15 +217,16 @@ }; &pcie0 { - pci@1,0 { + pci@0,0 { + device_type = "pci"; #address-cells = <3>; #size-cells = <2>; ranges; reg = <0 0 0 0 0>; - usb@1,0 { - reg = <0x10000 0 0 0 0>; + usb@0,0 { + reg = <0 0 0 0 0>; resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>; }; }; diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index b8a4096192aa..3b60297af7f6 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -300,6 +300,14 @@ status = "disabled"; }; + vec: vec@7ec13000 { + compatible = "brcm,bcm2711-vec"; + reg = <0x7ec13000 0x1000>; + clocks = <&clocks BCM2835_CLOCK_VEC>; + interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + dvp: clock@7ef00000 { compatible = "brcm,brcm2711-dvp"; reg = <0x7ef00000 0x10>; @@ -532,8 +540,8 @@ compatible = "brcm,genet-mdio-v5"; reg = <0xe14 0x8>; reg-names = "mdio"; - #address-cells = <0x0>; - #size-cells = <0x1>; + #address-cells = <0x1>; + #size-cells = <0x0>; }; }; }; diff --git a/arch/arm/boot/dts/bcm2835-common.dtsi b/arch/arm/boot/dts/bcm2835-common.dtsi index 4119271c979d..c25e797b9060 100644 --- a/arch/arm/boot/dts/bcm2835-common.dtsi +++ b/arch/arm/boot/dts/bcm2835-common.dtsi @@ -106,6 +106,14 @@ status = "okay"; }; + vec: vec@7e806000 { + compatible = "brcm,bcm2835-vec"; + reg = <0x7e806000 0x1000>; + clocks = <&clocks BCM2835_CLOCK_VEC>; + interrupts = <2 27>; + status = "disabled"; + }; + pixelvalve@7e807000 { compatible = "brcm,bcm2835-pixelvalve2"; reg = <0x7e807000 0x100>; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 0f3be55201a5..a3e06b680947 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -464,14 +464,6 @@ status = "disabled"; }; - vec: vec@7e806000 { - compatible = "brcm,bcm2835-vec"; - reg = <0x7e806000 0x1000>; - clocks = <&clocks BCM2835_CLOCK_VEC>; - interrupts = <2 27>; - status = "disabled"; - }; - usb: usb@7e980000 { compatible = "brcm,bcm2835-usb"; reg = <0x7e980000 0x10000>; diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi index f266b7b03482..cc88ebe7a60c 100644 --- a/arch/arm/boot/dts/spear3xx.dtsi +++ b/arch/arm/boot/dts/spear3xx.dtsi @@ -47,7 +47,7 @@ }; gmac: eth@e0800000 { - compatible = "st,spear600-gmac"; + compatible = "snps,dwmac-3.40a"; reg = <0xe0800000 0x8000>; interrupts = <23 22>; interrupt-names = "macirq", "eth_wake_irq"; diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts index 8077f1716fbc..ecb91fb899ff 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts @@ -112,7 +112,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index 6edb961bd6c1..e74c5bfdc6d3 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@ -240,9 +240,6 @@ static int scoop_remove(struct platform_device *pdev) { struct scoop_dev *sdev = platform_get_drvdata(pdev); - if (!sdev) - return -EINVAL; - if (sdev->gpio.base != -1) gpiochip_remove(&sdev->gpio); diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b4f74454f20f..33572998dbbe 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -197,7 +197,6 @@ CONFIG_PCI_EPF_TEST=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_OMAP_OCP2SCP=y -CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y diff --git a/arch/arm/configs/oxnas_v6_defconfig b/arch/arm/configs/oxnas_v6_defconfig index cae0db6b4eaf..de37f7e90999 100644 --- a/arch/arm/configs/oxnas_v6_defconfig +++ b/arch/arm/configs/oxnas_v6_defconfig @@ -46,7 +46,6 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 -CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index d9a27e4e0914..18d2a960b2d2 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -40,7 +40,6 @@ CONFIG_PCI_RCAR_GEN2=y CONFIG_PCIE_RCAR_HOST=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y -CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 99175812d903..bb129b6d2366 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -7,6 +7,7 @@ #include <asm/hwcap.h> #include <linux/clocksource.h> #include <linux/init.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/types.h> #include <clocksource/arm_arch_timer.h> @@ -24,29 +25,35 @@ int arch_timer_arch_init(void); * the code. At least it does so with a recent GCC (4.6.3). */ static __always_inline -void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) +void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val) { if (access == ARCH_TIMER_PHYS_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); + asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val)); + isb(); break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); + case ARCH_TIMER_REG_CVAL: + asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val)); break; + default: + BUILD_BUG(); } } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); + asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val)); + isb(); break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); + case ARCH_TIMER_REG_CVAL: + asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val)); break; + default: + BUILD_BUG(); } + } else { + BUILD_BUG(); } - - isb(); } static __always_inline @@ -59,19 +66,19 @@ u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) case ARCH_TIMER_REG_CTRL: asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); - break; + default: + BUILD_BUG(); } } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); - break; + default: + BUILD_BUG(); } + } else { + BUILD_BUG(); } return val; diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index e2b1fd558bf3..7d23d4bb2168 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -199,14 +199,43 @@ .endm .endr + .macro get_current, rd +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO + mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register +#else + get_thread_info \rd + ldr \rd, [\rd, #TI_TASK] +#endif + .endm + + .macro set_current, rn +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO + mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register +#endif + .endm + + .macro reload_current, t1:req, t2:req +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO + adr_l \t1, __entry_task @ get __entry_task base address + mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset + ldr \t1, [\t1, \t2] @ load variable + mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO +#endif + .endm + /* * Get current thread_info. */ .macro get_thread_info, rd +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* thread_info is the first member of struct task_struct */ + get_current \rd +#else ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT ) THUMB( mov \rd, sp ) THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT ) mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT +#endif .endm /* diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 5e56288e343b..e68fb879e4f9 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -290,6 +290,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr */ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); +void flush_dcache_folio(struct folio *folio); #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 static inline void flush_kernel_vmap_range(void *addr, int size) diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h new file mode 100644 index 000000000000..6bf0aad672c3 --- /dev/null +++ b/arch/arm/include/asm/current.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Keith Packard <keithp@keithp.com> + * Copyright (c) 2021 Google, LLC <ardb@kernel.org> + */ + +#ifndef _ASM_ARM_CURRENT_H +#define _ASM_ARM_CURRENT_H + +#ifndef __ASSEMBLY__ + +struct task_struct; + +static inline void set_current(struct task_struct *cur) +{ + if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) + return; + + /* Set TPIDRURO */ + asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory"); +} + +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO + +static inline struct task_struct *get_current(void) +{ + struct task_struct *cur; + +#if __has_builtin(__builtin_thread_pointer) && \ + !(defined(CONFIG_THUMB2_KERNEL) && \ + defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001) + /* + * Use the __builtin helper when available - this results in better + * code, especially when using GCC in combination with the per-task + * stack protector, as the compiler will recognize that it needs to + * load the TLS register only once in every function. + * + * Clang < 13.0.1 gets this wrong for Thumb2 builds: + * https://github.com/ClangBuiltLinux/linux/issues/1485 + */ + cur = __builtin_thread_pointer(); +#else + asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(cur)); +#endif + return cur; +} + +#define current get_current() +#else +#include <asm-generic/current.h> +#endif /* CONFIG_CURRENT_POINTER_IN_TPIDRURO */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARM_CURRENT_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index f74944c6fe8d..c576fa7d9bf8 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -138,6 +138,7 @@ extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); +void __arm_iomem_set_ro(void __iomem *ptr, size_t size); extern void __iounmap(volatile void __iomem *addr); extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 6bff94b2372b..38e3eabff5c3 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -110,12 +110,17 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) #define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) #define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) -#ifndef CONFIG_CPU_ENDIAN_BE32 +#ifdef CONFIG_CPU_ENDIAN_BE32 +#ifndef __ASSEMBLY__ /* * On BE32 systems, using 32-bit accesses to store Thumb instructions will not * work in all cases, due to alignment constraints. For now, a correct - * version is not provided for BE32. + * version is not provided for BE32, but the prototype needs to be there + * to compile patch.c. */ +extern __u32 __opcode_to_mem_thumb32(__u32); +#endif +#else #define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) #define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 9e6b97286307..6af68edfa53a 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -84,7 +84,7 @@ struct task_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -unsigned long get_wchan(struct task_struct *p); +unsigned long __get_wchan(struct task_struct *p); #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 3ae68a1b3de6..ba0872a8dcda 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -19,7 +19,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } extern int arm_add_memory(u64 start, u64 size); -extern void early_print(const char *str, ...); +extern __printf(1, 2) void early_print(const char *str, ...); extern void dump_machine_table(void); #ifdef CONFIG_ATAGS_PROC diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 5d508f5d56c4..f16cbbd5cda4 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -48,7 +48,7 @@ extern void set_smp_ipi_range(int ipi_base, int nr_ipi); * Called from platform specific assembly code, this is the * secondary CPU entry point. */ -asmlinkage void secondary_start_kernel(void); +asmlinkage void secondary_start_kernel(struct task_struct *task); /* @@ -61,6 +61,7 @@ struct secondary_data { }; unsigned long swapper_pg_dir; void *stack; + struct task_struct *task; }; extern struct secondary_data secondary_data; extern void secondary_startup(void); diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h index 72a20c3a0a90..088d03161be5 100644 --- a/arch/arm/include/asm/stackprotector.h +++ b/arch/arm/include/asm/stackprotector.h @@ -39,8 +39,6 @@ static __always_inline void boot_init_stack_canary(void) current->stack_canary = canary; #ifndef CONFIG_STACKPROTECTOR_PER_TASK __stack_chk_guard = current->stack_canary; -#else - current_thread_info()->stack_canary = current->stack_canary; #endif } diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index 2d76a2e29f05..8f54f9ad8a9b 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -3,6 +3,7 @@ #define __ASM_STACKTRACE_H #include <asm/ptrace.h> +#include <linux/llist.h> struct stackframe { /* @@ -13,6 +14,10 @@ struct stackframe { unsigned long sp; unsigned long lr; unsigned long pc; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; + struct task_struct *tsk; +#endif }; static __always_inline @@ -22,6 +27,10 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) frame->sp = regs->ARM_sp; frame->lr = regs->ARM_lr; frame->pc = regs->ARM_pc; +#ifdef CONFIG_KRETPROBES + frame->kr_cur = NULL; + frame->tsk = current; +#endif } extern int unwind_frame(struct stackframe *frame); diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index 007d8fea7157..b55c7b2755e4 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -23,9 +23,25 @@ */ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); +static inline void set_ti_cpu(struct task_struct *p) +{ +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * The core code no longer maintains the thread_info::cpu field once + * CONFIG_THREAD_INFO_IN_TASK is in effect, but we rely on it for + * raw_smp_processor_id(), which cannot access struct task_struct* + * directly for reasons of circular #inclusion hell. + */ + task_thread_info(p)->cpu = task_cpu(p); +#endif +} + #define switch_to(prev,next,last) \ do { \ __complete_pending_tlbi(); \ + set_ti_cpu(next); \ + if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \ + __this_cpu_write(__entry_task, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 9a18da3e10cc..164e15f26485 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -29,6 +29,8 @@ struct task_struct; +DECLARE_PER_CPU(struct task_struct *, __entry_task); + #include <asm/types.h> struct cpu_context_save { @@ -52,12 +54,11 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ +#ifndef CONFIG_THREAD_INFO_IN_TASK struct task_struct *task; /* main task structure */ +#endif __u32 cpu; /* cpu */ __u32 cpu_domain; /* cpu domain */ -#ifdef CONFIG_STACKPROTECTOR_PER_TASK - unsigned long stack_canary; -#endif struct cpu_context_save cpu_context; /* cpu context */ __u32 abi_syscall; /* ABI type and syscall nr */ __u8 used_cp[16]; /* thread used copro */ @@ -71,11 +72,27 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ - .task = &tsk, \ + INIT_THREAD_INFO_TASK(tsk) \ .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ } +#ifdef CONFIG_THREAD_INFO_IN_TASK +#define INIT_THREAD_INFO_TASK(tsk) + +static inline struct task_struct *thread_task(struct thread_info* ti) +{ + return (struct task_struct *)ti; +} + +#else +#define INIT_THREAD_INFO_TASK(tsk) .task = &(tsk), + +static inline struct task_struct *thread_task(struct thread_info* ti) +{ + return ti->task; +} + /* * how to get the thread information struct from C */ @@ -86,6 +103,7 @@ static inline struct thread_info *current_thread_info(void) return (struct thread_info *) (current_stack_pointer & ~(THREAD_SIZE - 1)); } +#endif #define thread_saved_pc(tsk) \ ((unsigned long)(task_thread_info(tsk)->cpu_context.pc)) diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 5a66c3b13c92..c3296499176c 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -12,8 +12,8 @@ .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register - mcr p15, 0, \tp, c13, c0, 3 @ set TLS register - mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register + @ TLS register update is deferred until return to user space + mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it .endm @@ -38,18 +38,22 @@ #ifdef CONFIG_TLS_REG_EMUL #define tls_emu 1 #define has_tls_reg 1 +#define defer_tls_reg_update 0 #define switch_tls switch_tls_none #elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) +#define defer_tls_reg_update 0 #define switch_tls switch_tls_v6 #elif defined(CONFIG_CPU_32v6K) #define tls_emu 0 #define has_tls_reg 1 +#define defer_tls_reg_update 1 #define switch_tls switch_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 +#define defer_tls_reg_update 0 #define switch_tls switch_tls_software #endif @@ -77,7 +81,7 @@ static inline void set_tls(unsigned long val) */ barrier(); - if (!tls_emu) { + if (!tls_emu && !defer_tls_reg_update) { if (has_tls_reg) { asm("mcr p15, 0, %0, c13, c0, 3" : : "r" (val)); diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 084d1c07c2d0..36fbc3329252 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -176,6 +176,7 @@ extern int __get_user_64t_4(void *); register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ unsigned int __ua_flags = uaccess_save_and_enable(); \ + int __tmp_e; \ switch (sizeof(*(__p))) { \ case 1: \ if (sizeof((x)) >= 8) \ @@ -203,9 +204,10 @@ extern int __get_user_64t_4(void *); break; \ default: __e = __get_user_bad(); break; \ } \ + __tmp_e = __e; \ uaccess_restore(__ua_flags); \ x = (typeof(*(p))) __r2; \ - __e; \ + __tmp_e; \ }) #define get_user(x, p) \ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index a646a3f6440f..645845e4982a 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -43,7 +43,9 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); +#ifndef CONFIG_THREAD_INFO_IN_TASK DEFINE(TI_TASK, offsetof(struct thread_info, task)); +#endif DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); @@ -63,10 +65,6 @@ int main(void) #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif -#ifdef CONFIG_STACKPROTECTOR_PER_TASK - DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary)); -#endif - DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER); BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 241b73d64df7..deff286eb5ea 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -38,14 +38,11 @@ */ .macro irq_handler #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER - ldr r1, =handle_arch_irq mov r0, sp - badr lr, 9997f - ldr pc, [r1] + bl generic_handle_arch_irq #else arch_irq_handler_default #endif -9997: .endm .macro pabt_helper @@ -384,6 +381,8 @@ ENDPROC(__fiq_abt) ATRAP( teq r8, r7) ATRAP( mcrne p15, 0, r8, c1, c0, 0) + reload_current r7, r8 + @ @ Clear FP to mark the first stack frame @ @@ -762,6 +761,8 @@ ENTRY(__switch_to) add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK .endif ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK] +#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) + mov r7, r2 @ Preserve 'next' #endif #ifdef CONFIG_CPU_USE_DOMAINS mcr p15, 0, r6, c3, c0, 0 @ Set domain register @@ -776,6 +777,7 @@ ENTRY(__switch_to) #endif THUMB( mov ip, r4 ) mov r0, r5 + set_current r7 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously THUMB( ldr sp, [ip], #4 ) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index d9c99db50243..ac86c34682bb 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -170,6 +170,7 @@ ENTRY(vector_swi) str saved_psr, [sp, #S_PSR] @ Save CPSR str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif + reload_current r10, ip zero_fp alignment_trap r10, ip, __cr_alignment asm_trace_hardirqs_on save=0 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 40db0f9188b6..ae24dd54e9ef 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -292,6 +292,14 @@ .macro restore_user_regs, fast = 0, offset = 0 +#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6) + @ The TLS register update is deferred until return to user space so we + @ can use it for other things while running in the kernel + get_thread_info r1 + ldr r1, [r1, #TI_TP_VALUE] + mcr p15, 0, r1, c13, c0, 3 @ set TLS register +#endif + uaccess_enable r1, isb=0 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 3c83b5d29697..a006585e1c09 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -193,11 +193,6 @@ int ftrace_make_nop(struct module *mod, return ret; } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 29b2eda136bb..da18e0a17dc2 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -105,6 +105,11 @@ __mmap_switched: mov r1, #0 bl __memset @ clear .bss +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO + adr_l r0, init_task @ get swapper task_struct + set_current r0 +#endif + ldmia r4, {r0, r1, r2, r3} str r9, [r0] @ Save processor ID str r7, [r1] @ Save machine type diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 0fc814bbc34b..fadfee9e2b45 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -115,6 +115,7 @@ ENTRY(secondary_startup) ret r12 1: bl __after_proc_init ldr sp, [r7, #12] @ set up the stack pointer + ldr r0, [r7, #16] @ set up task pointer mov fp, #0 b secondary_start_kernel ENDPROC(secondary_startup) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 29070eb8df7d..c04dd94630c7 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -253,7 +253,7 @@ __create_page_tables: add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) -#ifdef CONFIG_CPU_ENDIAN_BE8 +#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 str r8, [r5, #4] @ Save physical start of kernel (BE) #else str r8, [r5] @ Save physical start of kernel (LE) @@ -266,7 +266,7 @@ __create_page_tables: bls 1b eor r3, r3, r7 @ Remove the MMU flags adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) -#ifdef CONFIG_CPU_ENDIAN_BE8 +#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 str r3, [r5, #4] @ Save physical end of kernel (BE) #else str r3, [r5] @ Save physical end of kernel (LE) @@ -424,8 +424,9 @@ ENDPROC(secondary_startup) ENDPROC(secondary_startup_arm) ENTRY(__secondary_switched) - ldr_l r7, secondary_data + 12 @ get secondary_data.stack - mov sp, r7 + adr_l r7, secondary_data + 12 @ get secondary_data.stack + ldr sp, [r7] + ldr r0, [r7, #4] @ get secondary_data.task mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 20ab1e607522..b79975bd988c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -63,11 +63,8 @@ int arch_show_interrupts(struct seq_file *p, int prec) */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { - struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc; - irq_enter(); - /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. @@ -81,9 +78,6 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs) handle_irq_desc(desc); else ack_bad_irq(irq); - - irq_exit(); - set_irq_regs(old_regs); } /* @@ -92,7 +86,15 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs) asmlinkage void __exception_irq_entry asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { + struct pt_regs *old_regs; + + irq_enter(); + old_regs = set_irq_regs(regs); + handle_IRQ(irq, regs); + + set_irq_regs(old_regs); + irq_exit(); } void __init init_IRQ(void) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 0e2d3051741e..d47159f3791c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -36,6 +36,10 @@ #include "signal.h" +#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO +DEFINE_PER_CPU(struct task_struct *, __entry_task); +#endif + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; @@ -269,20 +273,14 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, thread_notify(THREAD_NOTIFY_COPY, thread); -#ifdef CONFIG_STACKPROTECTOR_PER_TASK - thread->stack_canary = p->stack_canary; -#endif - return 0; } -unsigned long get_wchan(struct task_struct *p) +unsigned long __get_wchan(struct task_struct *p) { struct stackframe frame; unsigned long stack_page; int count = 0; - if (!p || p == current || task_is_running(p)) - return 0; frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 7b42ac010fdf..00c11579406c 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -42,6 +42,10 @@ void *return_address(unsigned int level) frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)return_address; +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = current; +#endif walk_stackframe(&frame, save_return_addr, &data); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 842427ff2b3c..cde5b6d8bac5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -153,6 +153,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif + secondary_data.task = idle; + if (IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)) + task_thread_info(idle)->cpu = cpu; + sync_cache_w(&secondary_data); /* @@ -375,9 +379,12 @@ void arch_cpu_idle_dead(void) */ __asm__("mov sp, %0\n" " mov fp, #0\n" + " mov r0, %1\n" " b secondary_start_kernel" : - : "r" (task_stack_page(current) + THREAD_SIZE - 8)); + : "r" (task_stack_page(current) + THREAD_SIZE - 8), + "r" (current) + : "r0"); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -400,11 +407,13 @@ static void smp_store_cpu_info(unsigned int cpuid) * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(struct task_struct *task) { struct mm_struct *mm = &init_mm; unsigned int cpu; + set_current(task); + secondary_biglittle_init(); /* diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 76ea4178a55c..75e905508f27 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/stacktrace.h> @@ -54,8 +55,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = frame->fp; frame->fp = *(unsigned long *)(fp); - frame->pc = frame->lr; - frame->lr = *(unsigned long *)(fp + 4); + frame->pc = *(unsigned long *)(fp + 4); #else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) @@ -66,6 +66,11 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(frame->tsk, + (void *)frame->fp, &frame->kr_cur); +#endif return 0; } @@ -157,6 +162,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk, frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)__save_stack_trace; } +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = tsk; +#endif walk_stackframe(&frame, save_trace, &data); } @@ -174,6 +183,10 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; frame.pc = regs->ARM_pc; +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = current; +#endif walk_stackframe(&frame, save_trace, &data); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 4a7edc6e848f..195dff58bafc 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -136,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; - if (get_kernel_nofault(val, (unsigned long *)p)) + if (!get_kernel_nofault(val, (unsigned long *)p)) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 50136828f5b5..76678732c60d 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -40,6 +40,10 @@ SECTIONS ARM_DISCARD *(.alt.smp.init) *(.pv_table) +#ifndef CONFIG_ARM_UNWIND + *(.ARM.exidx) *(.ARM.exidx.*) + *(.ARM.extab) *(.ARM.extab.*) +#endif } . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); @@ -162,7 +166,9 @@ SECTIONS * binutils is too old (for other reasons as well) */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") +#ifndef CONFIG_COMPILE_TEST ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") +#endif #ifdef CONFIG_XIP_DEFLATED_DATA /* @@ -172,7 +178,7 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") #endif -#ifdef CONFIG_ARM_MPU +#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST) /* * Due to PMSAv7 restriction on base address and size we have to * enforce minimal alignment restrictions. It was seen that weaker diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 20c4f6d20c7a..f02d617e3359 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -174,6 +174,8 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT); * binutils is too old (for other reasons as well) */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") +#ifndef CONFIG_COMPILE_TEST ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") +#endif #endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 2890e61b2b46..bd3f82788ebc 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -161,7 +161,6 @@ config ARCH_BCM2835 select ARM_TIMER_SP804 select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7 select BCM2835_TIMER - select BRCMSTB_L2_IRQ select PINCTRL select PINCTRL_BCM2835 select MFD_CORE @@ -209,9 +208,6 @@ config ARCH_BRCMSTB select ARM_GIC select ARM_ERRATA_798181 if SMP select HAVE_ARM_ARCH_TIMER - select BCM7038_L1_IRQ - select BRCMSTB_L2_IRQ - select BCM7120_L2_IRQ select ZONE_DMA if ARM_LPAE select SOC_BRCMSTB select SOC_BUS diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c index 21bce4049cec..cf6546ddc7a3 100644 --- a/arch/arm/mach-imx/avic.c +++ b/arch/arm/mach-imx/avic.c @@ -154,7 +154,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) if (nivector == 0xffff) break; - handle_domain_irq(domain, nivector, regs); + generic_handle_domain_irq(domain, nivector); } while (1); } diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index f2ecca339910..045b9fdd342d 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -572,6 +572,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); + __arm_iomem_set_ro(suspend_ocram_base, MX6Q_SUSPEND_OCRAM_SIZE); + goto put_device; pl310_cache_map_failed: diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 95fd1fbb0826..59a8e8cc4469 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -9,6 +9,7 @@ #include <linux/iopoll.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/smp.h> #include <asm/smp_plat.h> @@ -81,11 +82,6 @@ static const struct reset_control_ops imx_src_ops = { .reset = imx_src_reset_module, }; -static struct reset_controller_dev imx_reset_controller = { - .ops = &imx_src_ops, - .nr_resets = ARRAY_SIZE(sw_reset_bits), -}; - static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) { writel_relaxed(enable, gpc_base + offset); @@ -177,10 +173,6 @@ void __init imx_src_init(void) src_base = of_iomap(np, 0); WARN_ON(!src_base); - imx_reset_controller.of_node = np; - if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) - reset_controller_register(&imx_reset_controller); - /* * force warm reset sources to generate cold reset * for a more reliable restart @@ -214,3 +206,33 @@ void __init imx7_src_init(void) if (!gpc_base) return; } + +static const struct of_device_id imx_src_dt_ids[] = { + { .compatible = "fsl,imx51-src" }, + { /* sentinel */ } +}; + +static int imx_src_probe(struct platform_device *pdev) +{ + struct reset_controller_dev *rcdev; + + rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL); + if (!rcdev) + return -ENOMEM; + + rcdev->ops = &imx_src_ops; + rcdev->dev = &pdev->dev; + rcdev->of_node = pdev->dev.of_node; + rcdev->nr_resets = ARRAY_SIZE(sw_reset_bits); + + return devm_reset_controller_register(&pdev->dev, rcdev); +} + +static struct platform_driver imx_src_driver = { + .driver = { + .name = "imx-src", + .of_match_table = imx_src_dt_ids, + }, + .probe = imx_src_probe, +}; +builtin_platform_driver(imx_src_driver); diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c index 479a01bdac56..8b3d98d288d9 100644 --- a/arch/arm/mach-imx/tzic.c +++ b/arch/arm/mach-imx/tzic.c @@ -134,7 +134,7 @@ static void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs) while (stat) { handled = 1; irqofs = fls(stat) - 1; - handle_domain_irq(domain, irqofs + i * 32, regs); + generic_handle_domain_irq(domain, irqofs + i * 32); stat &= ~(1 << irqofs); } } diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c index b11edc8a46f0..ee6a93083154 100644 --- a/arch/arm/mach-omap1/irq.c +++ b/arch/arm/mach-omap1/irq.c @@ -165,7 +165,7 @@ asmlinkage void __exception_irq_entry omap1_handle_irq(struct pt_regs *regs) } irq: if (irqnr) - handle_domain_irq(domain, irqnr, regs); + generic_handle_domain_irq(domain, irqnr); else break; } while (irqnr); diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 7f13adf26e61..02c253de9b6e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -112,7 +112,6 @@ config ARCH_OMAP2PLUS select PM_GENERIC_DOMAINS select PM_GENERIC_DOMAINS_OF select RESET_CONTROLLER - select SIMPLE_PM_BUS select SOC_BUS select TI_SYSC select OMAP_IRQCHIP diff --git a/arch/arm/mach-s3c/irq-s3c24xx.c b/arch/arm/mach-s3c/irq-s3c24xx.c index 3edc5f614eef..45dfd546e6fa 100644 --- a/arch/arm/mach-s3c/irq-s3c24xx.c +++ b/arch/arm/mach-s3c/irq-s3c24xx.c @@ -354,7 +354,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, if (!(pnd & (1 << offset))) offset = __ffs(pnd); - handle_domain_irq(intc->domain, intc_offset + offset, regs); + generic_handle_domain_irq(intc->domain, intc_offset + offset); return true; } diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 2012fa8c28cf..9919e0f32c4b 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -84,7 +84,7 @@ void ASSABET_BCR_frob(unsigned int mask, unsigned int val) } EXPORT_SYMBOL(ASSABET_BCR_frob); -static int __init assabet_init_gpio(void __iomem *reg, u32 def_val) +static void __init assabet_init_gpio(void __iomem *reg, u32 def_val) { struct gpio_chip *gc; @@ -94,11 +94,9 @@ static int __init assabet_init_gpio(void __iomem *reg, u32 def_val) assabet_names, NULL, NULL); if (IS_ERR(gc)) - return PTR_ERR(gc); + return; assabet_bcr_gc = gc; - - return gc->base; } /* @@ -475,16 +473,23 @@ static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = { }, }; +static struct gpiod_lookup_table assabet_leds_gpio_table = { + .dev_id = "leds-gpio", + .table = { + GPIO_LOOKUP("assabet", 13, NULL, GPIO_ACTIVE_LOW), + GPIO_LOOKUP("assabet", 14, NULL, GPIO_ACTIVE_LOW), + { }, + }, +}; + static struct gpio_led assabet_leds[] __initdata = { { .name = "assabet:red", .default_trigger = "cpu0", - .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_KEEP, }, { .name = "assabet:green", .default_trigger = "heartbeat", - .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_KEEP, }, }; @@ -603,6 +608,7 @@ static void __init assabet_init(void) &assabet_keys_pdata, sizeof(assabet_keys_pdata)); + gpiod_add_lookup_table(&assabet_leds_gpio_table); gpio_led_register_device(-1, &assabet_leds_pdata); #ifndef ASSABET_PAL_VIDEO @@ -739,7 +745,6 @@ static void __init assabet_map_io(void) void __init assabet_init_irq(void) { - unsigned int assabet_gpio_base; u32 def_val; sa1100_init_irq(); @@ -754,10 +759,7 @@ void __init assabet_init_irq(void) * * This must precede any driver calls to BCR_set() or BCR_clear(). */ - assabet_gpio_base = assabet_init_gpio((void *)&ASSABET_BCR, def_val); - - assabet_leds[0].gpio = assabet_gpio_base + 13; - assabet_leds[1].gpio = assabet_gpio_base + 14; + assabet_init_gpio((void *)&ASSABET_BCR, def_val); } MACHINE_START(ASSABET, "Intel-Assabet") diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 8355c3895894..58afba346729 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -675,7 +675,7 @@ config ARM_PV_FIXUP config ARM_THUMB bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT - depends on CPU_THUMB_CAPABLE + depends on CPU_THUMB_CAPABLE && !CPU_32v4 default y help Say Y if you want to include kernel support for running user space @@ -750,7 +750,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V6K || CPU_V7 + default CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b7525b433f3e..48091870db89 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -109,7 +109,7 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, if (cmd != THREAD_NOTIFY_SWITCH) return NOTIFY_DONE; - pid = task_pid_nr(thread->task) << ASID_BITS; + pid = task_pid_nr(thread_task(thread)) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" " and %0, %0, %2\n" diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index efa402025031..bc8779d54a64 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -37,7 +37,6 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) if (!mm) mm = &init_mm; - printk("%spgd = %p\n", lvl, mm->pgd); pgd = pgd_offset(mm, addr); printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd)); @@ -100,6 +99,21 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) { } #endif /* CONFIG_MMU */ +static void die_kernel_fault(const char *msg, struct mm_struct *mm, + unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + bust_spinlocks(1); + pr_alert("8<--- cut here ---\n"); + pr_alert("Unable to handle kernel %s at virtual address %08lx\n", + msg, addr); + + show_pte(KERN_ALERT, mm, addr); + die("Oops", regs, fsr); + bust_spinlocks(0); + do_exit(SIGKILL); +} + /* * Oops. The kernel tried to access some page that wasn't present. */ @@ -107,6 +121,7 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + const char *msg; /* * Are we prepared to handle this kernel fault? */ @@ -116,16 +131,12 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, /* * No handler, we'll have to terminate things with extreme prejudice. */ - bust_spinlocks(1); - pr_alert("8<--- cut here ---\n"); - pr_alert("Unable to handle kernel %s at virtual address %08lx\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); + if (addr < PAGE_SIZE) + msg = "NULL pointer dereference"; + else + msg = "paging request"; - show_pte(KERN_ALERT, mm, addr); - die("Oops", regs, fsr); - bust_spinlocks(0); - do_exit(SIGKILL); + die_kernel_fault(msg, mm, addr, fsr, regs); } /* @@ -183,73 +194,58 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -/* - * Check that the permissions on the VMA allow for the fault which occurred. - * If we encountered a write fault, we must have write permission, otherwise - * we allow any permission. - */ -static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) +static inline bool is_permission_fault(unsigned int fsr) { - unsigned int mask = VM_ACCESS_FLAGS; - - if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) - mask = VM_WRITE; - if (fsr & FSR_LNX_PF) - mask = VM_EXEC; - - return vma->vm_flags & mask ? false : true; + int fs = fsr_fs(fsr); +#ifdef CONFIG_ARM_LPAE + if ((fs & FS_PERM_NOLL_MASK) == FS_PERM_NOLL) + return true; +#else + if (fs == FS_L1_PERM || fs == FS_L2_PERM) + return true; +#endif + return false; } static vm_fault_t __kprobes -__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - unsigned int flags, struct task_struct *tsk, - struct pt_regs *regs) +__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, + unsigned long vma_flags, struct pt_regs *regs) { - struct vm_area_struct *vma; - vm_fault_t fault; - - vma = find_vma(mm, addr); - fault = VM_FAULT_BADMAP; + struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) - goto out; - if (unlikely(vma->vm_start > addr)) - goto check_stack; + return VM_FAULT_BADMAP; + + if (unlikely(vma->vm_start > addr)) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + return VM_FAULT_BADMAP; + if (addr < FIRST_USER_ADDRESS) + return VM_FAULT_BADMAP; + if (expand_stack(vma, addr)) + return VM_FAULT_BADMAP; + } /* - * Ok, we have a good vm_area for this - * memory access, so we can handle it. + * ok, we have a good vm_area for this memory access, check the + * permissions on the VMA allow for the fault which occurred. */ -good_area: - if (access_error(fsr, vma)) { - fault = VM_FAULT_BADACCESS; - goto out; - } + if (!(vma->vm_flags & vma_flags)) + return VM_FAULT_BADACCESS; return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); - -check_stack: - /* Don't allow expansion below FIRST_USER_ADDRESS */ - if (vma->vm_flags & VM_GROWSDOWN && - addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) - goto good_area; -out: - return fault; } static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - struct task_struct *tsk; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; + unsigned long vm_flags = VM_ACCESS_FLAGS; if (kprobe_page_fault(regs, fsr)) return 0; - tsk = current; - mm = tsk->mm; /* Enable interrupts if they were enabled in the parent context. */ if (interrupts_enabled(regs)) @@ -264,8 +260,19 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) + + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) { flags |= FAULT_FLAG_WRITE; + vm_flags = VM_WRITE; + } + + if (fsr & FSR_LNX_PF) { + vm_flags = VM_EXEC; + + if (is_permission_fault(fsr) && !user_mode(regs)) + die_kernel_fault("execution of memory", + mm, addr, fsr, regs); + } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); @@ -293,7 +300,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs); + fault = __do_page_fault(mm, addr, flags, vm_flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 9ecc2097a87a..83b5ab32d7a4 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -14,6 +14,8 @@ #ifdef CONFIG_ARM_LPAE #define FSR_FS_AEA 17 +#define FS_PERM_NOLL 0xC +#define FS_PERM_NOLL_MASK 0x3C static inline int fsr_fs(unsigned int fsr) { @@ -21,6 +23,8 @@ static inline int fsr_fs(unsigned int fsr) } #else #define FSR_FS_AEA 22 +#define FS_L1_PERM 0xD +#define FS_L2_PERM 0xF static inline int fsr_fs(unsigned int fsr) { diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80fb5a4a5c05..6e830b9418c9 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -36,6 +36,7 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#include <asm/set_memory.h> #include <asm/system_info.h> #include <asm/mach/map.h> @@ -401,6 +402,11 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) __builtin_return_address(0)); } +void __arm_iomem_set_ro(void __iomem *ptr, size_t size) +{ + set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE); +} + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) { return (__force void *)arch_ioremap_caller(phys_addr, size, diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 9c348042a724..4b1619584b23 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -226,7 +226,7 @@ void __init kasan_init(void) BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != pgd_index(KASAN_SHADOW_END)); memcpy(tmp_pmd_table, - pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), + (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), sizeof(tmp_pmd_table)); set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index e2c743aa2eb2..fa6999e24b07 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -30,8 +30,7 @@ * act_mm - get current->active_mm */ .macro act_mm, rd - get_thread_info \rd - ldr \rd, [\rd, #TI_TASK] + get_current \rd .if (TSK_ACTIVE_MM > IMM12_MASK) add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK .endif @@ -340,6 +339,7 @@ ENTRY(\name\()_cache_fns) .macro define_tlb_functions name:req, flags_up:req, flags_smp .type \name\()_tlb_fns, #object + .align 2 ENTRY(\name\()_tlb_fns) .long \name\()_flush_user_tlb_range .long \name\()_flush_kern_tlb_range diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a903b26cde40..eeb6dc0ecf46 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1882,11 +1882,6 @@ static int validate_code(struct jit_ctx *ctx) return 0; } -void bpf_jit_compile(struct bpf_prog *prog) -{ - /* Nothing to do here. We support Internal BPF. */ -} - bool bpf_jit_needs_zext(void) { return true; diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 27e0af78e88b..9090c3a74dcc 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -11,6 +11,8 @@ * Copyright (C) 2007 Marvell Ltd. */ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> @@ -278,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected.\n"); + pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); fallthrough; default: @@ -366,19 +368,41 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, /* * When a retprobed function returns, trampoline_handler() is called, * calling the kretprobe's handler. We construct a struct pt_regs to - * give a view of registers r0-r11 to the user return-handler. This is - * not a complete pt_regs structure, but that should be plenty sufficient - * for kretprobe handlers which should normally be interested in r0 only - * anyway. + * give a view of registers r0-r11, sp, lr, and pc to the user + * return-handler. This is not a complete pt_regs structure, but that + * should be enough for stacktrace from the return handler with or + * without pt_regs. */ -void __naked __kprobes kretprobe_trampoline(void) +void __naked __kprobes __kretprobe_trampoline(void) { __asm__ __volatile__ ( +#ifdef CONFIG_FRAME_POINTER + "ldr lr, =__kretprobe_trampoline \n\t" + /* __kretprobe_trampoline makes a framepointer on pt_regs. */ +#ifdef CONFIG_CC_IS_CLANG + "stmdb sp, {sp, lr, pc} \n\t" + "sub sp, sp, #12 \n\t" + /* In clang case, pt_regs->ip = lr. */ + "stmdb sp!, {r0 - r11, lr} \n\t" + /* fp points regs->r11 (fp) */ + "add fp, sp, #44 \n\t" +#else /* !CONFIG_CC_IS_CLANG */ + /* In gcc case, pt_regs->ip = fp. */ + "stmdb sp, {fp, sp, lr, pc} \n\t" + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" + /* fp points regs->r15 (pc) */ + "add fp, sp, #60 \n\t" +#endif /* CONFIG_CC_IS_CLANG */ +#else /* !CONFIG_FRAME_POINTER */ + "sub sp, sp, #16 \n\t" "stmdb sp!, {r0 - r11} \n\t" +#endif /* CONFIG_FRAME_POINTER */ "mov r0, sp \n\t" "bl trampoline_handler \n\t" "mov lr, r0 \n\t" "ldmia sp!, {r0 - r11} \n\t" + "add sp, sp, #16 \n\t" #ifdef CONFIG_THUMB2_KERNEL "bx lr \n\t" #else @@ -387,11 +411,10 @@ void __naked __kprobes kretprobe_trampoline(void) : : : "memory"); } -/* Called from kretprobe_trampoline */ +/* Called from __kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, - (void *)regs->ARM_fp); + return (void *)kretprobe_trampoline_handler(regs, (void *)regs->ARM_fp); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, @@ -401,7 +424,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = (void *)regs->ARM_fp; /* Replace the return addr with trampoline addr. */ - regs->ARM_lr = (unsigned long)&kretprobe_trampoline; + regs->ARM_lr = (unsigned long)&__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) @@ -439,7 +462,7 @@ static struct undef_hook kprobes_arm_break_hook = { #endif /* !CONFIG_THUMB2_KERNEL */ -int __init arch_init_kprobes() +int __init arch_init_kprobes(void) { arm_probes_decode_init(); #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index c78180172120..dbef34ed933f 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist, } int arch_within_optimized_kprobe(struct optimized_kprobe *op, - unsigned long addr) + kprobe_opcode_t *addr) { - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); + return (op->kp.addr <= addr && + op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr); + } void arch_remove_optimized_kprobe(struct optimized_kprobe *op) diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h index f1d5583e7bbb..56ad3c0aaeea 100644 --- a/arch/arm/probes/kprobes/test-core.h +++ b/arch/arm/probes/kprobes/test-core.h @@ -98,7 +98,7 @@ struct test_arg_end { #if VERBOSE #define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__) #else -#define verbose(fmt, ...) +#define verbose(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #define TEST_GROUP(title) \ diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index e842209e135d..543100151f2b 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -462,3 +462,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv |