diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2021-05-17 17:48:02 -0400 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2021-05-17 17:48:02 -0400 |
commit | d22fe808f9a3456f16015e79f1b86a10ce13099f (patch) | |
tree | 8e8c99d742696a810297d54d650f308f6156d466 /arch | |
parent | 1a7910368cba1e76b992b116fc8ba28503e6dcc1 (diff) | |
parent | 6efb943b8616ec53a5e444193dccf1af9ad627b5 (diff) | |
download | lwn-d22fe808f9a3456f16015e79f1b86a10ce13099f.tar.gz lwn-d22fe808f9a3456f16015e79f1b86a10ce13099f.zip |
Merge drm/drm-next into drm-intel-next
Time to get back in sync...
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'arch')
1951 files changed, 57241 insertions, 31212 deletions
diff --git a/arch/.gitignore b/arch/.gitignore index 4191da401dbb..756c19c34f99 100644 --- a/arch/.gitignore +++ b/arch/.gitignore @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -i386 -x86_64 +/i386/ +/x86_64/ diff --git a/arch/Kconfig b/arch/Kconfig index ecfd3520b676..c45b770d3579 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -631,8 +631,7 @@ config ARCH_SUPPORTS_LTO_CLANG_THIN config HAS_LTO_CLANG def_bool y # Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510 - depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD - depends on $(success,test $(LLVM_IAS) -eq 1) + depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD && AS_IS_LLVM depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm) depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm) depends on ARCH_SUPPORTS_LTO_CLANG @@ -692,6 +691,51 @@ config LTO_CLANG_THIN If unsure, say Y. endchoice +config ARCH_SUPPORTS_CFI_CLANG + bool + help + An architecture should select this option if it can support Clang's + Control-Flow Integrity (CFI) checking. + +config CFI_CLANG + bool "Use Clang's Control Flow Integrity (CFI)" + depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG + # Clang >= 12: + # - https://bugs.llvm.org/show_bug.cgi?id=46258 + # - https://bugs.llvm.org/show_bug.cgi?id=47479 + depends on CLANG_VERSION >= 120000 + select KALLSYMS + help + This option enables Clang’s forward-edge Control Flow Integrity + (CFI) checking, where the compiler injects a runtime check to each + indirect function call to ensure the target is a valid function with + the correct static type. This restricts possible call targets and + makes it more difficult for an attacker to exploit bugs that allow + the modification of stored function pointers. More information can be + found from Clang's documentation: + + https://clang.llvm.org/docs/ControlFlowIntegrity.html + +config CFI_CLANG_SHADOW + bool "Use CFI shadow to speed up cross-module checks" + default y + depends on CFI_CLANG && MODULES + help + If you select this option, the kernel builds a fast look-up table of + CFI check functions in loaded modules to reduce performance overhead. + + If unsure, say Y. + +config CFI_PERMISSIVE + bool "Use CFI in permissive mode" + depends on CFI_CLANG + help + When selected, Control Flow Integrity (CFI) violations result in a + warning instead of a kernel panic. This option should only be used + for finding indirect call type mismatches during development. + + If unsure, say N. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help @@ -785,6 +829,17 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD config HAVE_ARCH_HUGE_VMAP bool +# +# Archs that select this would be capable of PMD-sized vmaps (i.e., +# arch_vmap_pmd_supported() returns true), and they must make no assumptions +# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag +# can be used to prohibit arch-specific allocations from using hugepages to +# help with this (e.g., modules may require it). +# +config HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMAP + bool + config ARCH_WANT_HUGE_PMD_SHARE bool @@ -1013,6 +1068,13 @@ config COMPAT_32BIT_TIME config ARCH_NO_PREEMPT bool +config ARCH_EPHEMERAL_INODES + def_bool n + help + An arch should select this symbol if it doesn't keep track of inode + instances on its own, but instead relies on something else (e.g. the + host kernel for an UML kernel). + config ARCH_SUPPORTS_RT bool @@ -1054,6 +1116,29 @@ config VMAP_STACK backing virtual mappings with real shadow memory, and KASAN_VMALLOC must be enabled. +config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + def_bool n + help + An arch should select this symbol if it can support kernel stack + offset randomization with calls to add_random_kstack_offset() + during syscall entry and choose_random_kstack_offset() during + syscall exit. Careful removal of -fstack-protector-strong and + -fstack-protector should also be applied to the entry code and + closely examined, as the artificial stack bump looks like an array + to the compiler, so it will attempt to add canary checks regardless + of the static branch state. + +config RANDOMIZE_KSTACK_OFFSET_DEFAULT + bool "Randomize kernel stack offset on syscall entry" + depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + help + The kernel stack offset can be randomized (after pt_regs) by + roughly 5 bits of entropy, frustrating memory corruption + attacks that depend on stack address determinism or + cross-syscall address exposures. This feature is controlled + by kernel boot param "randomize_kstack_offset=on/off", and this + config chooses the default boot state. + config ARCH_OPTIONAL_KERNEL_RWX def_bool n diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 1f6a909d1fa5..0fab5ac90775 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -602,11 +602,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #endif /* __KERNEL__ */ #endif /* __ALPHA_IO_H */ diff --git a/arch/alpha/kernel/pc873xx.c b/arch/alpha/kernel/pc873xx.c index 63aee5d86e02..82b19c9e59d6 100644 --- a/arch/alpha/kernel/pc873xx.c +++ b/arch/alpha/kernel/pc873xx.c @@ -13,12 +13,12 @@ static char *pc873xx_names[] = { static unsigned int base, model; -unsigned int __init pc873xx_get_base() +unsigned int __init pc873xx_get_base(void) { return base; } -char *__init pc873xx_get_model() +char *__init pc873xx_get_model(void) { return pc873xx_names[model]; } diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile index 285aaba832d9..6713c65a25e1 100644 --- a/arch/alpha/kernel/syscalls/Makefile +++ b/arch/alpha/kernel/syscalls/Makefile @@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 02f0244e005c..5622578742fd 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -482,3 +482,7 @@ 550 common process_madvise sys_process_madvise 551 common epoll_pwait2 sys_epoll_pwait2 552 common mount_setattr sys_mount_setattr +553 common quotactl_path sys_quotactl_path +554 common landlock_create_ruleset sys_landlock_create_ruleset +555 common landlock_add_rule sys_landlock_add_rule +556 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/alpha/kernel/syscalls/syscallhdr.sh b/arch/alpha/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 1780e861492a..000000000000 --- a/arch/alpha/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_ALPHA_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/alpha/kernel/syscalls/syscalltbl.sh b/arch/alpha/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad..000000000000 --- a/arch/alpha/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 9704f22ed5e3..68f3e4f329eb 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -7,10 +7,9 @@ #include <asm/unistd.h> -#define __SYSCALL(nr, entry, nargs) .quad entry +#define __SYSCALL(nr, entry) .quad entry .data .align 3 .globl sys_call_table sys_call_table: #include <asm/syscall_table.h> -#undef __SYSCALL diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index dc68efbe9367..1931a04af85a 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <net/checksum.h> #define ldq_u(x,y) \ diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3c42b3147fd6..a97650a618f1 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -282,5 +282,4 @@ mem_init(void) set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); } diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index bc8d6aecfbbd..2d98501c0897 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -6,6 +6,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_PTE_SPECIAL @@ -28,6 +29,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_KMEMLEAK select HAVE_FUTEX_CMPXCHG if FUTEX @@ -48,9 +50,6 @@ config ARC select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 select SET_FS -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y @@ -86,10 +85,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARC_MMU_V4 - menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" diff --git a/arch/arc/boot/dts/haps_hs.dts b/arch/arc/boot/dts/haps_hs.dts index 60d578e2781f..76ad527a0847 100644 --- a/arch/arc/boot/dts/haps_hs.dts +++ b/arch/arc/boot/dts/haps_hs.dts @@ -16,7 +16,7 @@ memory { device_type = "memory"; /* CONFIG_LINUX_RAM_BASE needs to match low mem start */ - reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */ + reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ }; diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index a78d8f745a67..fdbe06c98895 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); - return err; + return err ? -EFAULT : 0; } static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) @@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); if (err) - return err; + return -EFAULT; set_current_blocked(&set); regs->bta = uregs.scratch.bta; diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 74ad4256022e..47bab67f8649 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name, const void *table_start, unsigned long table_size, const u8 *header_start, unsigned long header_size) { - const u8 *ptr = header_start + 4; - const u8 *end = header_start + header_size; - table->core.pc = (unsigned long)core_start; table->core.range = core_size; table->init.pc = (unsigned long)init_start; table->init.range = init_size; table->address = table_start; table->size = table_size; - - /* See if the linker provided table looks valid. */ - if (header_size <= 4 - || header_start[0] != 1 - || (void *)read_pointer(&ptr, end, header_start[1]) != table_start - || header_start[2] == DW_EH_PE_omit - || read_pointer(&ptr, end, header_start[2]) <= 0 - || header_start[3] == DW_EH_PE_omit) - header_start = NULL; - + /* To avoid the pointer addition with NULL pointer.*/ + if (header_start != NULL) { + const u8 *ptr = header_start + 4; + const u8 *end = header_start + header_size; + /* See if the linker provided table looks valid. */ + if (header_size <= 4 + || header_start[0] != 1 + || (void *)read_pointer(&ptr, end, header_start[1]) + != table_start + || header_start[2] == DW_EH_PE_omit + || read_pointer(&ptr, end, header_start[2]) <= 0 + || header_start[3] == DW_EH_PE_omit) + header_start = NULL; + } table->hdrsz = header_size; smp_wmb(); table->header = header_start; diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ce07e697916c..33832e36bdb7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -194,7 +194,6 @@ void __init mem_init(void) { memblock_free_all(); highmem_init(); - mem_init_print_info(NULL); } #ifdef CONFIG_HIGHMEM diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5da96f5df48f..24804f11302d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -31,8 +31,10 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_MEMTEST select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -76,6 +78,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING @@ -1293,9 +1296,15 @@ config KASAN_SHADOW_OFFSET config NR_CPUS int "Maximum number of CPUs (2-32)" - range 2 32 + range 2 16 if DEBUG_KMAP_LOCAL + range 2 32 if !DEBUG_KMAP_LOCAL depends on SMP default "4" + help + The maximum number of CPUs that the kernel can support. + Up to 32 CPUs can be supported, or up to 16 if kmap_local() + debugging is enabled, which uses half of the per-CPU fixmap + slots as guard regions. config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" @@ -1321,7 +1330,7 @@ config ARM_PSCI # selected platforms. config ARCH_NR_GPIO int - default 2048 if ARCH_SOCFPGA + default 2048 if ARCH_INTEL_SOCFPGA default 1024 if ARCH_BRCMSTB || ARCH_RENESAS || ARCH_TEGRA || \ ARCH_ZYNQ || ARCH_ASPEED default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || \ @@ -1504,14 +1513,6 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on ARM_LPAE - -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARM_LPAE - config ARCH_WANT_GENERAL_HUGETLB def_bool y diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 9e0b5e7f12af..36016497b1b3 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1087,7 +1087,7 @@ choice on SD5203 UART. config DEBUG_SOCFPGA_UART0 - depends on ARCH_SOCFPGA + depends on ARCH_INTEL_SOCFPGA bool "Use SOCFPGA UART0 for low-level debug" select DEBUG_UART_8250 help @@ -1095,7 +1095,7 @@ choice on SOCFPGA(Cyclone 5 and Arria 5) based platforms. config DEBUG_SOCFPGA_ARRIA10_UART1 - depends on ARCH_SOCFPGA + depends on ARCH_INTEL_SOCFPGA bool "Use SOCFPGA Arria10 UART1 for low-level debug" select DEBUG_UART_8250 help @@ -1103,7 +1103,7 @@ choice on SOCFPGA(Arria 10) based platforms. config DEBUG_SOCFPGA_CYCLONE5_UART1 - depends on ARCH_SOCFPGA + depends on ARCH_INTEL_SOCFPGA bool "Use SOCFPGA Cyclone 5 UART1 for low-level debug" select DEBUG_UART_8250 help diff --git a/arch/arm/Makefile b/arch/arm/Makefile index dad5502ecc28..415c3514573a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -209,7 +209,7 @@ machine-$(CONFIG_PLAT_SAMSUNG) += s3c machine-$(CONFIG_ARCH_S5PV210) += s5pv210 machine-$(CONFIG_ARCH_SA1100) += sa1100 machine-$(CONFIG_ARCH_RENESAS) += shmobile -machine-$(CONFIG_ARCH_SOCFPGA) += socfpga +machine-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga machine-$(CONFIG_ARCH_STI) += sti machine-$(CONFIG_ARCH_STM32) += stm32 machine-$(CONFIG_ARCH_SUNXI) += sunxi diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index fd94e27ba4fa..8eb70c1febce 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -96,13 +96,6 @@ endif $(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \ $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector)) -# These were previously generated C files. When you are building the kernel -# with O=, make sure to remove the stale files in the output tree. Otherwise, -# the build system wrongly compiles the stale ones. -ifdef building_out_of_srctree -$(shell rm -f $(addprefix $(obj)/, fdt_rw.c fdt_ro.c fdt_wip.c fdt.c)) -endif - targets := vmlinux vmlinux.lds piggy_data piggy.o \ lib1funcs.o ashldi3.o bswapsdi2.o \ head.o $(OBJS) @@ -118,8 +111,8 @@ asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ - sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ - -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) + sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \ + -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) # Supply ZRELADDR to the decompressor via a linker symbol. ifneq ($(CONFIG_AUTO_ZRELADDR),y) diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 8e5d4ab4e75e..f8f09c5066e7 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -333,8 +333,11 @@ dtb-$(CONFIG_ARCH_LPC18XX) += \ dtb-$(CONFIG_ARCH_LPC32XX) += \ lpc3250-ea3250.dtb \ lpc3250-phy3250.dtb +dtb-$(CONFIG_ARCH_WPCM450) += \ + nuvoton-wpcm450-supermicro-x9sci-ln4f.dtb dtb-$(CONFIG_ARCH_NPCM7XX) += \ nuvoton-npcm730-gsj.dtb \ + nuvoton-npcm730-gbs.dtb \ nuvoton-npcm730-kudo.dtb \ nuvoton-npcm750-evb.dtb \ nuvoton-npcm750-runbmc-olympus.dtb @@ -660,6 +663,7 @@ dtb-$(CONFIG_SOC_IMX7D) += \ imx7d-pico-hobbit.dtb \ imx7d-pico-nymph.dtb \ imx7d-pico-pi.dtb \ + imx7d-remarkable2.dtb \ imx7d-sbc-imx7.dtb \ imx7d-sdb.dtb \ imx7d-sdb-reva.dtb \ @@ -929,7 +933,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \ qcom-msm8974-sony-xperia-castor.dtb \ qcom-msm8974-sony-xperia-honami.dtb \ qcom-mdm9615-wp8548-mangoh-green.dtb \ - qcom-sdx55-mtp.dtb + qcom-sdx55-mtp.dtb \ + qcom-sdx55-t55.dtb \ + qcom-sdx55-telit-fn980-tlb.dtb dtb-$(CONFIG_ARCH_RDA) += \ rda8810pl-orangepi-2g-iot.dtb \ rda8810pl-orangepi-i96.dtb @@ -1033,7 +1039,7 @@ dtb-$(CONFIG_ARCH_S5PV210) += \ s5pv210-smdkc110.dtb \ s5pv210-smdkv210.dtb \ s5pv210-torbreck.dtb -dtb-$(CONFIG_ARCH_SOCFPGA) += \ +dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += \ socfpga_arria5_socdk.dtb \ socfpga_arria10_socdk_nand.dtb \ socfpga_arria10_socdk_qspi.dtb \ @@ -1071,11 +1077,16 @@ dtb-$(CONFIG_ARCH_STM32) += \ stm32746g-eval.dtb \ stm32h743i-eval.dtb \ stm32h743i-disco.dtb \ + stm32h750i-art-pi.dtb \ stm32mp153c-dhcom-drc02.dtb \ stm32mp157a-avenger96.dtb \ stm32mp157a-dhcor-avenger96.dtb \ stm32mp157a-dk1.dtb \ stm32mp157a-iot-box.dtb \ + stm32mp157a-microgea-stm32mp1-microdev2.0.dtb \ + stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dtb \ + stm32mp157a-icore-stm32mp1-ctouch2.dtb \ + stm32mp157a-icore-stm32mp1-edimm2.2.dtb \ stm32mp157a-stinger96.dtb \ stm32mp157c-dhcom-pdk2.dtb \ stm32mp157c-dhcom-picoitx.dtb \ @@ -1105,7 +1116,8 @@ dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-olinuxino-lime.dtb \ sun4i-a10-pcduino.dtb \ sun4i-a10-pcduino2.dtb \ - sun4i-a10-pov-protab2-ips9.dtb + sun4i-a10-pov-protab2-ips9.dtb \ + sun4i-a10-topwise-a721.dtb dtb-$(CONFIG_MACH_SUN5I) += \ sun5i-a10s-auxtek-t003.dtb \ sun5i-a10s-auxtek-t004.dtb \ @@ -1337,6 +1349,7 @@ dtb-$(CONFIG_MACH_ARMADA_375) += \ armada-375-db.dtb dtb-$(CONFIG_MACH_ARMADA_38X) += \ armada-382-rd-ac3x-48g4x2xl.dtb \ + armada-385-atl-x530.dtb\ armada-385-clearfog-gtr-s4.dtb \ armada-385-clearfog-gtr-l8.dtb \ armada-385-db-88f6820-amc.dtb \ @@ -1397,6 +1410,7 @@ dtb-$(CONFIG_ARCH_MILBEAUT) += milbeaut-m10v-evb.dtb dtb-$(CONFIG_ARCH_MSTARV7) += \ mstar-infinity-msc313-breadbee_crust.dtb \ mstar-infinity2m-ssd202d-ssd201htv2.dtb \ + mstar-infinity2m-ssd202d-unitv2.dtb \ mstar-infinity3-msc313e-breadbee.dtb \ mstar-mercury5-ssc8336n-midrived08.dtb dtb-$(CONFIG_ARCH_ASPEED) += \ @@ -1406,6 +1420,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-ampere-mtjade.dtb \ aspeed-bmc-arm-centriq2400-rep.dtb \ aspeed-bmc-arm-stardragon4800-rep2.dtb \ + aspeed-bmc-asrock-e3c246d4i.dtb \ aspeed-bmc-bytedance-g220a.dtb \ aspeed-bmc-facebook-cmm.dtb \ aspeed-bmc-facebook-galaxy100.dtb \ @@ -1418,6 +1433,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-facebook-yosemitev2.dtb \ aspeed-bmc-ibm-everest.dtb \ aspeed-bmc-ibm-rainier.dtb \ + aspeed-bmc-ibm-rainier-1s4u.dtb \ aspeed-bmc-ibm-rainier-4u.dtb \ aspeed-bmc-intel-s2600wf.dtb \ aspeed-bmc-inspur-fp5280g2.dtb \ diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index b4feb85e171a..e2ee8b8c07bc 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -26,54 +26,54 @@ &gpio0 { gpio-line-names = - "[ethernet]", - "[ethernet]", + "[mdio_data]", + "[mdio_clk]", "P9_22 [spi0_sclk]", "P9_21 [spi0_d0]", "P9_18 [spi0_d1]", "P9_17 [spi0_cs0]", - "[sd card]", - "P9_42A [ecappwm0]", - "P8_35 [hdmi]", - "P8_33 [hdmi]", - "P8_31 [hdmi]", - "P8_32 [hdmi]", + "[mmc0_cd]", + "P8_42A [ecappwm0]", + "P8_35 [lcd d12]", + "P8_33 [lcd d13]", + "P8_31 [lcd d14]", + "P8_32 [lcd d15]", "P9_20 [i2c2_sda]", "P9_19 [i2c2_scl]", "P9_26 [uart1_rxd]", "P9_24 [uart1_txd]", - "[ethernet]", - "[ethernet]", - "[usb]", - "[hdmi]", + "[rmii1_txd3]", + "[rmii1_txd2]", + "[usb0_drvvbus]", + "[hdmi cec]", "P9_41B", - "[ethernet]", + "[rmii1_txd1]", "P8_19 [ehrpwm2a]", "P8_13 [ehrpwm2b]", - "[NC]", - "[NC]", + "NC", + "NC", "P8_14", "P8_17", - "[ethernet]", - "[ethernet]", + "[rmii1_txd0]", + "[rmii1_refclk]", "P9_11 [uart4_rxd]", "P9_13 [uart4_txd]"; }; &gpio1 { gpio-line-names = - "P8_25 [emmc]", - "[emmc]", - "P8_5 [emmc]", - "P8_6 [emmc]", - "P8_23 [emmc]", - "P8_22 [emmc]", - "P8_3 [emmc]", - "P8_4 [emmc]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", + "P8_25 [mmc1_dat0]", + "[mmc1_dat1]", + "P8_5 [mmc1_dat2]", + "P8_6 [mmc1_dat3]", + "P8_23 [mmc1_dat4]", + "P8_22 [mmc1_dat5]", + "P8_3 [mmc1_dat6]", + "P8_4 [mmc1_dat7]", + "NC", + "NC", + "NC", + "NC", "P8_12", "P8_11", "P8_16", @@ -82,13 +82,13 @@ "P9_23", "P9_14 [ehrpwm1a]", "P9_16 [ehrpwm1b]", - "[emmc]", + "[emmc rst]", "[usr0 led]", "[usr1 led]", "[usr2 led]", "[usr3 led]", - "[hdmi]", - "[usb]", + "[hdmi irq]", + "[usb vbus oc]", "[hdmi audio]", "P9_12", "P8_26", @@ -116,38 +116,38 @@ "P8_38 [hdmi]", "P8_36 [hdmi]", "P8_34 [hdmi]", - "[ethernet]", - "[ethernet]", - "[ethernet]", - "[ethernet]", + "[rmii1_rxd3]", + "[rmii1_rxd2]", + "[rmii1_rxd1]", + "[rmii1_rxd0]", "P8_27 [hdmi]", "P8_29 [hdmi]", "P8_28 [hdmi]", "P8_30 [hdmi]", - "[emmc]", - "[emmc]", - "[emmc]", - "[emmc]", - "[emmc]", - "[emmc]"; + "[mmc0_dat3]", + "[mmc0_dat2]", + "[mmc0_dat1]", + "[mmc0_dat0]", + "[mmc0_clk]", + "[mmc0_cmd]"; }; &gpio3 { gpio-line-names = - "[ethernet]", - "[ethernet]", - "[ethernet]", - "[ethernet]", - "[ethernet]", - "[i2c0]", - "[i2c0]", - "[emu]", - "[emu]", - "[ethernet]", - "[ethernet]", - "[NC]", - "[NC]", - "[usb]", + "[mii col]", + "[mii crs]", + "[mii rx err]", + "[mii tx en]", + "[mii rx dv]", + "[i2c0 sda]", + "[i2c0 scl]", + "[jtag emu0]", + "[jtag emu1]", + "[mii tx clk]", + "[mii rx clk]", + "NC", + "NC", + "[usb vbus en]", "P9_31 [spi1_sclk]", "P9_29 [spi1_d0]", "P9_30 [spi1_d1]", @@ -156,14 +156,14 @@ "P9_27", "P9_41A", "P9_25", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]"; + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC"; }; diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts index d526c5941c9b..209cdd17dc1e 100644 --- a/arch/arm/boot/dts/am335x-pocketbeagle.dts +++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts @@ -61,51 +61,51 @@ &gpio0 { gpio-line-names = - "[NC]", - "[NC]", + "NC", + "NC", "P1.08 [SPI0_CLK]", "P1.10 [SPI0_MISO]", "P1.12 [SPI0_MOSI]", "P1.06 [SPI0_CS]", "[MMC0_CD]", "P2.29 [SPI1_CLK]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", + "[SYSBOOT 12]", + "[SYSBOOT 13]", + "[SYSBOOT 14]", + "[SYSBOOT 15]", "P1.26 [I2C2_SDA]", "P1.28 [I2C2_SCL]", "P2.11 [I2C1_SDA]", "P2.09 [I2C1_SCL]", - "[NC]", - "[NC]", - "[NC]", + "NC", + "NC", + "NC", "P2.31 [SPI1_CS]", "P1.20 [PRU0.16]", - "[NC]", - "[NC]", + "NC", + "NC", "P2.03", - "[NC]", - "[NC]", + "NC", + "NC", "P1.34", "P2.19", - "[NC]", - "[NC]", + "NC", + "NC", "P2.05 [UART4_RX]", "P2.07 [UART4_TX]"; }; &gpio1 { gpio-line-names = - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", "P2.25 [SPI1_MOSI]", "P1.32 [UART0_RX]", "P1.30 [UART0_TX]", @@ -113,10 +113,10 @@ "P2.33", "P2.22", "P2.18", - "[NC]", - "[NC]", + "NC", + "NC", "P2.01 [PWM1A]", - "[NC]", + "NC", "P2.10", "[USR LED 0]", "[USR LED 1]", @@ -126,35 +126,35 @@ "P2.04", "P2.02", "P2.08", - "[NC]", - "[NC]", - "[NC]"; + "NC", + "NC", + "NC"; }; &gpio2 { gpio-line-names = "P2.20", "P2.17", - "[NC]", - "[NC]", - "[NC]", + "NC", + "NC", + "NC", "[EEPROM_WP]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[SYSBOOT]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", + "[SYSBOOT 0]", + "[SYSBOOT 1]", + "[SYSBOOT 2]", + "[SYSBOOT 3]", + "[SYSBOOT 4]", + "[SYSBOOT 5]", + "[SYSBOOT 6]", + "[SYSBOOT 7]", + "[SYSBOOT 8]", + "[SYSBOOT 9]", + "[SYSBOOT 10]", + "[SYSBOOT 11]", + "NC", + "NC", + "NC", + "NC", "P2.35 [AIN5]", "P1.02 [AIN6]", "P1.35 [PRU1.10]", @@ -169,19 +169,19 @@ &gpio3 { gpio-line-names = - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", + "NC", + "NC", + "NC", + "NC", + "NC", "[I2C0_SDA]", "[I2C0_SCL]", - "[JTAG]", - "[JTAG]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", + "[JTAG EMU0]", + "[JTAG EMU1]", + "NC", + "NC", + "NC", + "NC", "P1.03 [USB1]", "P1.36 [PWM0A]", "P1.33 [PRU0.1]", @@ -191,16 +191,16 @@ "P2.34 [PRU0.5]", "P2.28 [PRU0.6]", "P1.29 [PRU0.7]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]", - "[NC]"; + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC", + "NC"; }; &am33xx_pinmux { diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 1fb22088caeb..039a9ab4c7ea 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -259,22 +259,22 @@ ranges = <0x00000000 0x0000d000 0x00001000>, <0x00001000 0x0000e000 0x00001000>; - tscadc: tscadc@0 { - compatible = "ti,am3359-tscadc"; - reg = <0x0 0x1000>; - interrupts = <16>; - status = "disabled"; - dmas = <&edma 53 0>, <&edma 57 0>; - dma-names = "fifo0", "fifo1"; + tscadc: tscadc@0 { + compatible = "ti,am3359-tscadc"; + reg = <0x0 0x1000>; + interrupts = <16>; + status = "disabled"; + dmas = <&edma 53 0>, <&edma 57 0>; + dma-names = "fifo0", "fifo1"; - tsc { - compatible = "ti,am3359-tsc"; - }; - am335x_adc: adc { - #io-channel-cells = <1>; - compatible = "ti,am3359-adc"; - }; + tsc { + compatible = "ti,am3359-tsc"; + }; + am335x_adc: adc { + #io-channel-cells = <1>; + compatible = "ti,am3359-adc"; }; + }; }; target-module@10000 { /* 0x44e10000, ap 22 0c.0 */ diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 5b213a1e68bb..5e33d0e88f5b 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -40,6 +40,9 @@ ethernet1 = &cpsw_emac1; spi0 = &spi0; spi1 = &spi1; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; }; cpus { diff --git a/arch/arm/boot/dts/armada-385-atl-x530.dts b/arch/arm/boot/dts/armada-385-atl-x530.dts new file mode 100644 index 000000000000..ed3f41c7df71 --- /dev/null +++ b/arch/arm/boot/dts/armada-385-atl-x530.dts @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Device Tree file for Armada 385 Allied Telesis x530/GS980MX Board. + (x530/AT-GS980MX) + * + Copyright (C) 2020 Allied Telesis Labs + */ + +/dts-v1/; +#include "armada-385.dtsi" + +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "x530/AT-GS980MX"; + compatible = "alliedtelesis,gs980mx", "alliedtelesis,x530", "marvell,armada385", "marvell,armada380"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory { + device_type = "memory"; + reg = <0x00000000 0x40000000>; /* 1GB */ + }; + + soc { + ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 + MBUS_ID(0x01, 0x3d) 0 0xf4800000 0x80000 + MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>; + + internal-regs { + i2c0: i2c@11000 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; + status = "okay"; + }; + + uart0: serial@12000 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins>; + status = "okay"; + }; + }; + }; +}; + +&pciec { + status = "okay"; +}; + +&pcie1 { + status = "okay"; + reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; + reset-delay-us = <400000>; +}; + +&pcie2 { + status = "okay"; +}; + +&devbus_cs1 { + compatible = "marvell,mvebu-devbus"; + status = "okay"; + + devbus,bus-width = <8>; + devbus,turn-off-ps = <60000>; + devbus,badr-skew-ps = <0>; + devbus,acc-first-ps = <124000>; + devbus,acc-next-ps = <248000>; + devbus,rd-setup-ps = <0>; + devbus,rd-hold-ps = <0>; + + /* Write parameters */ + devbus,sync-enable = <0>; + devbus,wr-high-ps = <60000>; + devbus,wr-low-ps = <60000>; + devbus,ale-wr-ps = <60000>; + + nvs@0 { + status = "okay"; + + compatible = "mtd-ram"; + reg = <0 0x00080000>; + bank-width = <1>; + label = "nvs"; + }; +}; + +&pinctrl { + i2c0_gpio_pins: i2c-gpio-pins-0 { + marvell,pins = "mpp2", "mpp3"; + marvell,function = "gpio"; + }; +}; + +&i2c0 { + clock-frequency = <100000>; + status = "okay"; + + pinctrl-names = "default", "gpio"; + pinctrl-0 = <&i2c0_pins>; + pinctrl-1 = <&i2c0_gpio_pins>; + scl-gpio = <&gpio0 2 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + sda-gpio = <&gpio0 3 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + + i2c0mux: mux@71 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "nxp,pca9544"; + reg = <0x71>; + i2c-mux-idle-disconnect; + + i2c@0 { /* POE devices MUX */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + adt7476_2e: hwmon@2e { + compatible = "adi,adt7476"; + reg = <0x2e>; + }; + + adt7476_2d: hwmon@2d { + compatible = "adi,adt7476"; + reg = <0x2d>; + }; + }; + + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + rtc@68 { + compatible = "dallas,ds1340"; + reg = <0x68>; + }; + }; + + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + gpio@20 { + compatible = "nxp,pca9554"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x20>; + }; + }; + }; +}; + +&usb0 { + status = "okay"; +}; + +&spi1 { + pinctrl-names = "default"; + pinctrl-0 = <&spi1_pins>; + status = "okay"; + + spi-flash@1 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <1>; /* Chip select 1 */ + spi-max-frequency = <54000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + partition@u-boot { + reg = <0x00000000 0x00100000>; + label = "u-boot"; + }; + partition@u-boot-env { + reg = <0x00100000 0x00040000>; + label = "u-boot-env"; + }; + partition@unused { + reg = <0x00140000 0x00e80000>; + label = "unused"; + }; + partition@idprom { + reg = <0x00fc0000 0x00040000>; + label = "idprom"; + }; + }; + }; +}; + +&nand_controller { + status = "okay"; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + nand-ecc-strength = <4>; + nand-ecc-step-size = <512>; + + marvell,nand-enable-arbiter; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + partition@user { + reg = <0x00000000 0x0f000000>; + label = "user"; + }; + partition@errlog { + /* Maximum mtdoops size is 8MB, so set to that. */ + reg = <0x0f000000 0x00800000>; + label = "errlog"; + }; + partition@nand-bbt { + reg = <0x0f800000 0x00800000>; + label = "nand-bbt"; + }; + }; + }; +}; + diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index 646a06420c77..5bd6a66d2c2b 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -32,7 +32,8 @@ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000 MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000 - MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>; + MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000 + MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>; internal-regs { @@ -389,6 +390,7 @@ phy1: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <1>; + marvell,reg-init = <3 18 0 0x4985>; /* irq is connected to &pcawan pin 7 */ }; diff --git a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts index ac2d04cfaf2f..6aeb47c44eba 100644 --- a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts +++ b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts @@ -151,6 +151,31 @@ status = "okay"; }; +//FPGA +&i2c2 { + status = "okay"; +}; + +//24LC128 EEPROM +&i2c3 { + status = "okay"; +}; + +//P0 Power regulators +&i2c4 { + status = "okay"; +}; + +//P1 Power regulators +&i2c5 { + status = "okay"; +}; + +//P0/P1 Thermal diode +&i2c6 { + status = "okay"; +}; + // Thermal Sensors &i2c7 { status = "okay"; @@ -196,6 +221,11 @@ }; }; +//BMC I2C +&i2c8 { + status = "okay"; +}; + &kcs1 { status = "okay"; aspeed,lpc-io-reg = <0x60>; diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts new file mode 100644 index 000000000000..dcab6e78dfa4 --- /dev/null +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +#include "aspeed-g5.dtsi" +#include <dt-bindings/gpio/aspeed-gpio.h> +#include <dt-bindings/i2c/i2c.h> + +/{ + model = "ASRock E3C246D4I BMC"; + compatible = "asrock,e3c246d4i-bmc", "aspeed,ast2500"; + + aliases { + serial4 = &uart5; + }; + + chosen { + stdout-path = &uart5; + bootargs = "console=tty0 console=ttyS4,115200 earlyprintk"; + }; + + memory@80000000 { + reg = <0x80000000 0x20000000>; + }; + + leds { + compatible = "gpio-leds"; + + heartbeat { + /* BMC_HB_LED_N */ + gpios = <&gpio ASPEED_GPIO(H, 6) GPIO_ACTIVE_LOW>; + linux,default-trigger = "timer"; + }; + + system-fault { + /* SYSTEM_FAULT_LED_N */ + gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>; + panic-indicator; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + uid-button { + label = "uid-button"; + gpios = <&gpio ASPEED_GPIO(F, 1) GPIO_ACTIVE_LOW>; + linux,code = <ASPEED_GPIO(F, 1)>; + }; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>, + <&adc 5>, <&adc 6>, <&adc 7>, <&adc 8>, <&adc 9>, + <&adc 10>, <&adc 11>, <&adc 12>; + }; +}; + +&fmc { + status = "okay"; + flash@0 { + status = "okay"; + m25p,fast-read; + label = "bmc"; + spi-max-frequency = <100000000>; /* 100 MHz */ +#include "openbmc-flash-layout.dtsi" + }; +}; + +&uart5 { + status = "okay"; +}; + +&vuart { + status = "okay"; + aspeed,sirq-active-high; +}; + +&mac0 { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>; +}; + +&i2c1 { + status = "okay"; + + /* thermal sensor, one diode run to a disconnected header */ + w83773g@4c { + compatible = "nuvoton,w83773g"; + reg = <0x4c>; + }; +}; + +&i2c3 { + status = "okay"; + + /* FRU EEPROM */ + eeprom@57 { + compatible = "st,24c128", "atmel,24c128"; + reg = <0x57>; + pagesize = <16>; + }; +}; + +&video { + status = "okay"; +}; + +&vhub { + status = "okay"; +}; + +&lpc_ctrl { + status = "okay"; +}; + +&lpc_snoop { + status = "okay"; + snoop-ports = <0x80>; +}; + +&gpio { + status = "okay"; + gpio-line-names = + /* A */ "BMC_MAC1_INTB", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI", + "", "", "", "", + /* B */ "", "", "", "", "", "IRQ_BMC_PCH_SMI_LPC_N", "", "", + /* C */ "", "", "", "", "", "", "", "", + /* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON", + "", "", "", "", + /* E */ "", "", "", "", "", "", "", "", + /* F */ "LOCATORLED_STATUS_N", "LOCATORBTN", "", "", + "", "", "BMC_PCH_SCI_LPC", "BMC_NCSI_MUX_CTL", + /* G */ "HWM_BAT_EN", "CHASSIS_ID0", "CHASSIS_ID1", "CHASSIS_ID2", + "BMC_ALERT1_N_R", "BMC_ALERT2_N_R", "BMC_ALERT3_N", "SML0ALERT", + /* H */ "FM_ME_RCVR_N", "O_PWROK", "SKL_CNL_R", "D4_DIMM_EVENT_3V_N", + "MFG_MODE_N", "BMC_RTCRST", "BMC_HB_LED_N", "BMC_CASEOPEN", + /* I */ "", "", "", "", "", "", "", "", + /* J */ "BMC_READY", "BMC_PCH_BIOS_CS_N", "BMC_SMI", "", + "", "", "", "", + /* K */ "", "", "", "", "", "", "", "", + /* L */ "BMC_CTS1", "BMC_DCD1", "BMC_DSR1", "BMC_RI1", + "BMC_DTR1", "BMC_RTS1", "BMC_TXD1", "BMC_RXD1", + /* M */ "BMC_LAN0_DIS_N", "BMC_LAN1_DIS_N", "", "", + "", "", "", "", + /* N */ "", "", "", "", "", "", "", "", + /* O */ "", "", "", "", "", "", "", "", + /* P */ "", "", "", "", "", "", "", "", + /* Q */ "", "", "", "", + "BMC_SBM_PRESENT_1_N", "BMC_SBM_PRESENT_2_N", + "BMC_SBM_PRESENT_3_N", "BMC_PCIE_WAKE_N", + /* R */ "", "", "", "", "", "", "", "", + /* S */ "PCHHOT_BMC_N", "", "RSMRST", + "", "", "", "", "", + /* T */ "", "", "", "", "", "", "", "", + /* U */ "", "", "", "", "", "", "", "", + /* V */ "", "", "", "", "", "", "", "", + /* W */ "PS_PWROK", /* dummy always-high signal */ + "", "", "", "", "", "", "", + /* X */ "", "", "", "", "", "", "", "", + /* Y */ "SLP_S3", "SLP_S5", "", "", "", "", "", "", + /* Z */ "CPU_CATERR_BMC_PCH_N", "", "SYSTEM_FAULT_LED_N", "BMC_THROTTLE_N", + "", "", "", "", + /* AA */ "CPU1_THERMTRIP_LATCH_N", "", "CPU1_PROCHOT_N", "", + "", "", "IRQ_SMI_ACTIVE_N", "FM_BIOS_POST_CMPLT_N", + /* AB */ "", "", "ME_OVERRIDE", "BMC_DMI_MODIFY", + "", "", "", "", + /* AC */ "LAD0", "LAD1", "LAD2", "LAD3", + "CK_33M_BMC", "LFRAME", "SERIRQ", "S_PLTRST"; + + /* Assert BMC_READY so BIOS doesn't sit around waiting for it */ + bmc-ready { + gpio-hog; + gpios = <ASPEED_GPIO(J, 0) GPIO_ACTIVE_LOW>; + output-high; + }; +}; + +&adc { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc0_default + &pinctrl_adc1_default + &pinctrl_adc2_default + &pinctrl_adc3_default + &pinctrl_adc4_default + &pinctrl_adc5_default + &pinctrl_adc6_default + &pinctrl_adc7_default + &pinctrl_adc8_default + &pinctrl_adc9_default + &pinctrl_adc10_default + &pinctrl_adc11_default + &pinctrl_adc12_default>; +}; + +&kcs3 { + status = "okay"; + aspeed,lpc-io-reg = <0xca2>; +}; diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts index cd18641d5c23..7b4b2b126ad8 100644 --- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts +++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts @@ -507,6 +507,11 @@ &i2c7 { status = "okay"; //HSC, AirMax Conn A + adm1278@45 { + compatible = "adm1275"; + reg = <0x45>; + shunt-resistor-micro-ohms = <250>; + }; }; &i2c8 { diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts index 6bd876657bb8..3295c8c7c05c 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts @@ -44,6 +44,58 @@ i2c415 = &cfam3_i2c15; i2c416 = &cfam3_i2c16; i2c417 = &cfam3_i2c17; + i2c500 = &cfam4_i2c0; + i2c501 = &cfam4_i2c1; + i2c510 = &cfam4_i2c10; + i2c511 = &cfam4_i2c11; + i2c512 = &cfam4_i2c12; + i2c513 = &cfam4_i2c13; + i2c514 = &cfam4_i2c14; + i2c515 = &cfam4_i2c15; + i2c602 = &cfam5_i2c2; + i2c603 = &cfam5_i2c3; + i2c610 = &cfam5_i2c10; + i2c611 = &cfam5_i2c11; + i2c614 = &cfam5_i2c14; + i2c615 = &cfam5_i2c15; + i2c616 = &cfam5_i2c16; + i2c617 = &cfam5_i2c17; + i2c700 = &cfam6_i2c0; + i2c701 = &cfam6_i2c1; + i2c710 = &cfam6_i2c10; + i2c711 = &cfam6_i2c11; + i2c712 = &cfam6_i2c12; + i2c713 = &cfam6_i2c13; + i2c714 = &cfam6_i2c14; + i2c715 = &cfam6_i2c15; + i2c802 = &cfam7_i2c2; + i2c803 = &cfam7_i2c3; + i2c810 = &cfam7_i2c10; + i2c811 = &cfam7_i2c11; + i2c814 = &cfam7_i2c14; + i2c815 = &cfam7_i2c15; + i2c816 = &cfam7_i2c16; + i2c817 = &cfam7_i2c17; + + i2c16 = &i2c4mux0chn0; + i2c17 = &i2c4mux0chn1; + i2c18 = &i2c4mux0chn2; + i2c19 = &i2c5mux0chn0; + i2c20 = &i2c5mux0chn1; + i2c21 = &i2c5mux0chn2; + i2c22 = &i2c5mux0chn3; + i2c23 = &i2c6mux0chn0; + i2c24 = &i2c6mux0chn1; + i2c25 = &i2c6mux0chn2; + i2c26 = &i2c6mux0chn3; + i2c27 = &i2c14mux0chn0; + i2c28 = &i2c14mux0chn1; + i2c29 = &i2c14mux0chn2; + i2c30 = &i2c14mux0chn3; + i2c31 = &i2c14mux1chn0; + i2c32 = &i2c14mux1chn1; + i2c33 = &i2c14mux1chn2; + i2c34 = &i2c14mux1chn3; serial4 = &uart5; @@ -63,6 +115,22 @@ spi41 = &cfam3_spi1; spi42 = &cfam3_spi2; spi43 = &cfam3_spi3; + spi50 = &cfam4_spi0; + spi51 = &cfam4_spi1; + spi52 = &cfam4_spi2; + spi53 = &cfam4_spi3; + spi60 = &cfam5_spi0; + spi61 = &cfam5_spi1; + spi62 = &cfam5_spi2; + spi63 = &cfam5_spi3; + spi70 = &cfam6_spi0; + spi71 = &cfam6_spi1; + spi72 = &cfam6_spi2; + spi73 = &cfam6_spi3; + spi80 = &cfam7_spi0; + spi81 = &cfam7_spi1; + spi82 = &cfam7_spi2; + spi83 = &cfam7_spi3; }; chosen { @@ -103,6 +171,889 @@ reg = <0xbf000000 0x01000000>; /* 16M */ }; }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <1000>; + + fan0-presence { + label = "fan0-presence"; + gpios = <&pca0 15 GPIO_ACTIVE_LOW>; + linux,code = <15>; + }; + + fan1-presence { + label = "fan1-presence"; + gpios = <&pca0 14 GPIO_ACTIVE_LOW>; + linux,code = <14>; + }; + + fan2-presence { + label = "fan2-presence"; + gpios = <&pca0 13 GPIO_ACTIVE_LOW>; + linux,code = <13>; + }; + + fan3-presence { + label = "fan3-presence"; + gpios = <&pca0 12 GPIO_ACTIVE_LOW>; + linux,code = <12>; + }; + }; +}; + +&gpio0 { + gpio-line-names = + /*A0-A7*/ "","","","","","","","", + /*B0-B7*/ "USERSPACE_RSTIND_BUFF","","","","","","","", + /*C0-C7*/ "","","","","","","","", + /*D0-D7*/ "","","","","","","","", + /*E0-E7*/ "","","","","","","","", + /*F0-F7*/ "PIN_HOLE_RESET_IN_N","","", + "PIN_HOLE_RESET_OUT_N","","","","", + /*G0-G7*/ "","","","","","","","", + /*H0-H7*/ "","","","","","","","", + /*I0-I7*/ "","","","","","","","", + /*J0-J7*/ "","","","","","","","", + /*K0-K7*/ "","","","","","","","", + /*L0-L7*/ "","","","","","","","", + /*M0-M7*/ "","","","","","","","", + /*N0-N7*/ "","","","","","","","", + /*O0-O7*/ "","","","","","","","", + /*P0-P7*/ "","","","","","","","", + /*Q0-Q7*/ "","","","","","","","", + /*R0-R7*/ "","","","","","I2C_FLASH_MICRO_N","","", + /*S0-S7*/ "","","","","","","","", + /*T0-T7*/ "","","","","","","","", + /*U0-U7*/ "","","","","","","","", + /*V0-V7*/ "","BMC_3RESTART_ATTEMPT_P","","","","","","", + /*W0-W7*/ "","","","","","","","", + /*X0-X7*/ "","","","","","","","", + /*Y0-Y7*/ "","","","","","","","", + /*Z0-Z7*/ "","","","","","","",""; +}; + +&i2c0 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + pca1: pca9552@62 { + compatible = "nxp,pca9552"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-ps0", + "presence-ps1", + "presence-ps2", + "presence-ps3", + "presence-pdb", + "presence-tpm", + "", "", + "presence-cp0", + "presence-cp1", + "presence-cp2", + "presence-cp3", + "presence-dasd", + "presence-lcd-op", + "presence-base-op", + ""; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@01 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@8 { + reg = <8>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@9 { + reg = <9>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@10 { + reg = <10>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@11 { + reg = <11>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@12 { + reg = <12>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@13 { + reg = <13>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@14 { + reg = <14>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@15 { + reg = <15>; + type = <PCA955X_TYPE_GPIO>; + }; + }; +}; + +&i2c1 { + status = "okay"; + + pca2: pca9552@61 { + compatible = "nxp,pca9552"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-cable-card1", + "presence-cable-card2", + "presence-cable-card3", + "presence-cable-card4", + "presence-cable-card5", + "expander-cable-card1", + "expander-cable-card2", + "expander-cable-card3", + "expander-cable-card4", + "expander-cable-card5"; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@1 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@8 { + reg = <8>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@9 { + reg = <9>; + type = <PCA955X_TYPE_GPIO>; + }; + + }; + + pca3: pca9552@62 { + compatible = "nxp,pca9552"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-cable-card6", + "presence-cable-card7", + "presence-cable-card8", + "presence-cable-card9", + "presence-cable-card10", + "presence-cable-card11", + "expander-cable-card6", + "expander-cable-card7", + "expander-cable-card8", + "expander-cable-card9", + "expander-cable-card10", + "expander-cable-card11"; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@1 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@8 { + reg = <8>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@9 { + reg = <9>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@10 { + reg = <10>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@11 { + reg = <11>; + type = <PCA955X_TYPE_GPIO>; + }; + + }; + +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + status = "okay"; + + eeprom@54 { + compatible = "atmel,24c128"; + reg = <0x54>; + }; + + power-supply@68 { + compatible = "ibm,cffps"; + reg = <0x68>; + }; + + power-supply@69 { + compatible = "ibm,cffps"; + reg = <0x69>; + }; + + power-supply@6a { + compatible = "ibm,cffps"; + reg = <0x6a>; + }; + + power-supply@6b { + compatible = "ibm,cffps"; + reg = <0x6b>; + }; +}; + +&i2c4 { + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; + + i2c4mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + + i2c4mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c4mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + }; +}; + +&i2c5 { + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; + + i2c5mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c5mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + + i2c5mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + + i2c5mux0chn3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + }; +}; + +&i2c6 { + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; + + i2c6mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c6mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + + i2c6mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c6mux0chn3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + }; +}; + +&i2c7 { + status = "okay"; +}; + +&i2c8 { + status = "okay"; + + ucd90320@11 { + compatible = "ti,ucd90320"; + reg = <0x11>; + }; + + rtc@32 { + compatible = "epson,rx8900"; + reg = <0x32>; + }; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; +}; + +&i2c9 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c10 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c11 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c12 { + status = "okay"; +}; + +&i2c13 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c14 { + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; + + i2c14mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c14mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + }; + }; + + i2c14mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux0chn3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + max31785@52 { + compatible = "maxim,max31785a"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x52>; + + fan@0 { + compatible = "pmbus-fan"; + reg = <0>; + tach-pulses = <2>; + }; + + fan@1 { + compatible = "pmbus-fan"; + reg = <1>; + tach-pulses = <2>; + }; + + fan@2 { + compatible = "pmbus-fan"; + reg = <2>; + tach-pulses = <2>; + }; + + fan@3 { + compatible = "pmbus-fan"; + reg = <3>; + tach-pulses = <2>; + }; + }; + + pca0: pca9552@61 { + compatible = "nxp,pca9552"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x61>; + + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "","","","", + "","","","", + "","","","", + "presence-fan3", + "presence-fan2", + "presence-fan1", + "presence-fan0"; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@1 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@8 { + reg = <8>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@9 { + reg = <9>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@10 { + reg = <10>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@11 { + reg = <11>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@12 { + reg = <12>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@13 { + reg = <13>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@14 { + reg = <14>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@15 { + reg = <15>; + type = <PCA955X_TYPE_GPIO>; + }; + }; + }; + }; + + i2c-switch@71 { + compatible = "nxp,pca9546"; + reg = <0x71>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; + + i2c14mux1chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + }; +}; + +&i2c15 { + status = "okay"; }; &ehci1 { @@ -133,7 +1084,7 @@ */ cfam-reset-gpios = <&gpio0 ASPEED_GPIO(Q, 0) GPIO_ACTIVE_HIGH>; - cfam@0,0 { + cfam@0,0 { /* DCM0_C0 */ reg = <0 0>; #address-cells = <1>; #size-cells = <1>; @@ -277,7 +1228,7 @@ }; &fsi_hub0 { - cfam@1,0 { + cfam@1,0 { /* DCM0_C1 */ reg = <1 0>; #address-cells = <1>; #size-cells = <1>; @@ -421,7 +1372,7 @@ }; }; - cfam@2,0 { + cfam@2,0 { /* DCM1_C0 */ reg = <2 0>; #address-cells = <1>; #size-cells = <1>; @@ -565,7 +1516,7 @@ }; }; - cfam@3,0 { + cfam@3,0 { /* DCM1_C1 */ reg = <3 0>; #address-cells = <1>; #size-cells = <1>; @@ -708,6 +1659,582 @@ no-scan-on-init; }; }; + + cfam@4,0 { /* DCM2_C0 */ + reg = <4 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <4>; + + scom@1000 { + compatible = "ibm,fsi2pib"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,fsi-i2c-master"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam4_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + }; + + cfam4_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + }; + + cfam4_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam4_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam4_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + }; + + cfam4_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + }; + + cfam4_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam4_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam4_spi0: spi@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam4_spi1: spi@20 { + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam4_spi2: spi@40 { + reg = <0x40>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam4_spi3: spi@60 { + reg = <0x60>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + fsi_occ4: occ { + compatible = "ibm,p10-occ"; + }; + }; + + fsi_hub4: hub@3400 { + compatible = "fsi-master-hub"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + + no-scan-on-init; + }; + }; + + cfam@5,0 { /* DCM2_C1 */ + reg = <5 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <5>; + + scom@1000 { + compatible = "ibm,fsi2pib"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,fsi-i2c-master"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam5_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + }; + + cfam5_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + }; + + cfam5_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam5_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam5_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam5_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + + cfam5_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + }; + + cfam5_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam5_spi0: spi@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam5_spi1: spi@20 { + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam5_spi2: spi@40 { + reg = <0x40>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam5_spi3: spi@60 { + reg = <0x60>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + fsi_occ5: occ { + compatible = "ibm,p10-occ"; + }; + }; + + fsi_hub5: hub@3400 { + compatible = "fsi-master-hub"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + + no-scan-on-init; + }; + }; + + cfam@6,0 { /* DCM3_C0 */ + reg = <6 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <6>; + + scom@1000 { + compatible = "ibm,fsi2pib"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,fsi-i2c-master"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam6_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + }; + + cfam6_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + }; + + cfam6_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam6_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam6_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + }; + + cfam6_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + }; + + cfam6_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam6_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam6_spi0: spi@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam6_spi1: spi@20 { + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam6_spi2: spi@40 { + reg = <0x40>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam6_spi3: spi@60 { + reg = <0x60>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + fsi_occ6: occ { + compatible = "ibm,p10-occ"; + }; + }; + + fsi_hub6: hub@3400 { + compatible = "fsi-master-hub"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + + no-scan-on-init; + }; + }; + + cfam@7,0 { /* DCM3_C1 */ + reg = <7 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <7>; + + scom@1000 { + compatible = "ibm,fsi2pib"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,fsi-i2c-master"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam7_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + }; + + cfam7_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + }; + + cfam7_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam7_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam7_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam7_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + + cfam7_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + }; + + cfam7_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam7_spi0: spi@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam7_spi1: spi@20 { + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam7_spi2: spi@40 { + reg = <0x40>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam7_spi3: spi@60 { + reg = <0x60>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + fsi_occ7: occ { + compatible = "ibm,p10-occ"; + }; + }; + + fsi_hub7: hub@3400 { + compatible = "fsi-master-hub"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + + no-scan-on-init; + }; + }; }; /* Legacy OCC numbering (to get rid of when userspace is fixed) */ @@ -727,6 +2254,22 @@ reg = <4>; }; +&fsi_occ4 { + reg = <5>; +}; + +&fsi_occ5 { + reg = <6>; +}; + +&fsi_occ6 { + reg = <7>; +}; + +&fsi_occ7 { + reg = <8>; +}; + &ibt { status = "okay"; }; @@ -769,6 +2312,20 @@ use-ncsi; }; +&wdt1 { + aspeed,reset-type = "none"; + aspeed,external-signal; + aspeed,ext-push-pull; + aspeed,ext-active-high; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdtrst1_default>; +}; + +&wdt2 { + status = "okay"; +}; + &xdma { status = "okay"; memory-region = <&vga_memory>; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts new file mode 100644 index 000000000000..f5f5b18c113a --- /dev/null +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2021 IBM Corp. +/dts-v1/; + +#include "aspeed-bmc-ibm-rainier-4u.dts" + +/ { + model = "Rainier 1S4U"; +}; + +&max { + /delete-node/ fan3; + /delete-node/ fan5; +}; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts index 291f7d6c9979..f7fd3b3c90d0 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts @@ -22,16 +22,30 @@ &fan0 { tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; }; &fan1 { tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; }; &fan2 { tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; }; &fan3 { tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; +}; + +&fan4 { + tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; +}; + +&fan5 { + tach-pulses = <4>; + /delete-property/ maxim,fan-dual-tach; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index 6c9804d2f3b4..941c0489479a 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -12,11 +12,55 @@ compatible = "ibm,rainier-bmc", "aspeed,ast2600"; aliases { + i2c100 = &cfam0_i2c0; + i2c101 = &cfam0_i2c1; + i2c110 = &cfam0_i2c10; + i2c111 = &cfam0_i2c11; + i2c112 = &cfam0_i2c12; + i2c113 = &cfam0_i2c13; + i2c114 = &cfam0_i2c14; + i2c115 = &cfam0_i2c15; + i2c202 = &cfam1_i2c2; + i2c203 = &cfam1_i2c3; + i2c210 = &cfam1_i2c10; + i2c211 = &cfam1_i2c11; + i2c214 = &cfam1_i2c14; + i2c215 = &cfam1_i2c15; + i2c216 = &cfam1_i2c16; + i2c217 = &cfam1_i2c17; + i2c300 = &cfam2_i2c0; + i2c301 = &cfam2_i2c1; + i2c310 = &cfam2_i2c10; + i2c311 = &cfam2_i2c11; + i2c312 = &cfam2_i2c12; + i2c313 = &cfam2_i2c13; + i2c314 = &cfam2_i2c14; + i2c315 = &cfam2_i2c15; + i2c402 = &cfam3_i2c2; + i2c403 = &cfam3_i2c3; + i2c410 = &cfam3_i2c10; + i2c411 = &cfam3_i2c11; + i2c414 = &cfam3_i2c14; + i2c415 = &cfam3_i2c15; + i2c416 = &cfam3_i2c16; + i2c417 = &cfam3_i2c17; + serial4 = &uart5; i2c16 = &i2c2mux0; i2c17 = &i2c2mux1; i2c18 = &i2c2mux2; i2c19 = &i2c2mux3; + i2c20 = &i2c4mux0chn0; + i2c21 = &i2c4mux0chn1; + i2c22 = &i2c4mux0chn2; + i2c23 = &i2c5mux0chn0; + i2c24 = &i2c5mux0chn1; + i2c25 = &i2c6mux0chn0; + i2c26 = &i2c6mux0chn1; + i2c27 = &i2c6mux0chn2; + i2c28 = &i2c6mux0chn3; + i2c29 = &i2c11mux0chn0; + i2c30 = &i2c11mux0chn1; spi10 = &cfam0_spi0; spi11 = &cfam0_spi1; @@ -30,6 +74,10 @@ spi31 = &cfam2_spi1; spi32 = &cfam2_spi2; spi33 = &cfam2_spi3; + spi40 = &cfam3_spi0; + spi41 = &cfam3_spi1; + spi42 = &cfam3_spi2; + spi43 = &cfam3_spi3; }; chosen { @@ -131,6 +179,73 @@ reg = <3>; }; }; + + leds { + compatible = "gpio-leds"; + + /* BMC Card fault LED at the back */ + bmc-ingraham0 { + gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure ID LED at the back */ + rear-enc-id0 { + gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure fault LED at the back */ + rear-enc-fault0 { + gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>; + }; + + /* PCIE slot power LED */ + pcieslot-power { + gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <1000>; + + fan0-presence { + label = "fan0-presence"; + gpios = <&pca0 6 GPIO_ACTIVE_LOW>; + linux,code = <6>; + }; + + fan1-presence { + label = "fan1-presence"; + gpios = <&pca0 7 GPIO_ACTIVE_LOW>; + linux,code = <7>; + }; + + fan2-presence { + label = "fan2-presence"; + gpios = <&pca0 8 GPIO_ACTIVE_LOW>; + linux,code = <8>; + }; + + fan3-presence { + label = "fan3-presence"; + gpios = <&pca0 9 GPIO_ACTIVE_LOW>; + linux,code = <9>; + }; + + fan4-presence { + label = "fan4-presence"; + gpios = <&pca0 10 GPIO_ACTIVE_LOW>; + linux,code = <10>; + }; + + fan5-presence { + label = "fan5-presence"; + gpios = <&pca0 11 GPIO_ACTIVE_LOW>; + linux,code = <11>; + }; + }; }; &ehci1 { @@ -146,7 +261,7 @@ /*E0-E7*/ "","","","","","","","", /*F0-F7*/ "","","","","","","","", /*G0-G7*/ "","","","","","","","", - /*H0-H7*/ "","","","","","","","", + /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","", /*I0-I7*/ "","","","","","","","", /*J0-J7*/ "","","","","","","","", /*K0-K7*/ "","","","","","","","", @@ -154,7 +269,7 @@ /*M0-M7*/ "","","","","","","","", /*N0-N7*/ "","","","","","","","", /*O0-O7*/ "","","","usb-power","","","","", - /*P0-P7*/ "","","","","","","","", + /*P0-P7*/ "","","","","pcieslot-power","","","", /*Q0-Q7*/ "cfam-reset","","","","","","","", /*R0-R7*/ "","","","","","","","", /*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3", @@ -226,6 +341,38 @@ reg = <0x1800 0x400>; #address-cells = <1>; #size-cells = <0>; + + cfam0_i2c0: i2c-bus@0 { + reg = <0>; /* OMI01 */ + }; + + cfam0_i2c1: i2c-bus@1 { + reg = <1>; /* OMI23 */ + }; + + cfam0_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam0_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam0_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + }; + + cfam0_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + }; + + cfam0_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam0_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; }; fsi2spi@1c00 { @@ -317,8 +464,6 @@ reg = <0x3400 0x400>; #address-cells = <2>; #size-cells = <0>; - - no-scan-on-init; }; }; }; @@ -340,6 +485,38 @@ reg = <0x1800 0x400>; #address-cells = <1>; #size-cells = <0>; + + cfam1_i2c2: i2c-bus@2 { + reg = <2>; /* OMI45 */ + }; + + cfam1_i2c3: i2c-bus@3 { + reg = <3>; /* OMI67 */ + }; + + cfam1_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam1_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam1_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam1_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + + cfam1_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + }; + + cfam1_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + }; }; fsi2spi@1c00 { @@ -452,6 +629,38 @@ reg = <0x1800 0x400>; #address-cells = <1>; #size-cells = <0>; + + cfam2_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + }; + + cfam2_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + }; + + cfam2_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam2_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam2_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + }; + + cfam2_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + }; + + cfam2_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam2_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; }; fsi2spi@1c00 { @@ -547,6 +756,150 @@ no-scan-on-init; }; }; + + cfam@3,0 { + reg = <3 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <3>; + + scom@1000 { + compatible = "ibm,fsi2pib"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,fsi-i2c-master"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam3_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + }; + + cfam3_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + }; + + cfam3_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + }; + + cfam3_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + }; + + cfam3_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + }; + + cfam3_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + }; + + cfam3_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + }; + + cfam3_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam3_spi0: spi@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam3_spi1: spi@20 { + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam3_spi2: spi@40 { + reg = <0x40>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + + cfam3_spi3: spi@60 { + reg = <0x60>; + compatible = "ibm,fsi2spi-restricted"; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + at25,byte-len = <0x80000>; + at25,addr-mode = <4>; + at25,page-size = <256>; + + compatible = "atmel,at25"; + reg = <0>; + spi-max-frequency = <1000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + fsi_occ3: occ { + compatible = "ibm,p10-occ"; + }; + }; + + fsi_hub3: hub@3400 { + compatible = "fsi-master-hub"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + + no-scan-on-init; + }; + }; }; /* Legacy OCC numbering (to get rid of when userspace is fixed) */ @@ -562,6 +915,10 @@ reg = <3>; }; +&fsi_occ3 { + reg = <4>; +}; + &ibt { status = "okay"; }; @@ -574,20 +931,64 @@ reg = <0x51>; }; - tca9554@40 { + tca_pres1: tca9554@20{ compatible = "ti,tca9554"; - reg = <0x40>; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; #gpio-cells = <2>; - smbus0-hog { - gpio-hog; - gpios = <4 GPIO_ACTIVE_HIGH>; - output-high; - line-name = "smbus0"; + gpio-line-names = "", + "RUSSEL_FW_I2C_ENABLE_N", + "RUSSEL_OPPANEL_PRESENCE_N", + "BLYTH_OPPANEL_PRESENCE_N", + "CPU_TPM_CARD_PRESENT_N", + "DASD_BP2_PRESENT_N", + "DASD_BP1_PRESENT_N", + "DASD_BP0_PRESENT_N"; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@1 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; }; - }; + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + }; }; &i2c1 { @@ -610,6 +1011,104 @@ compatible = "ibm,cffps"; reg = <0x69>; }; + + pca_pres1: pca9552@61 { + compatible = "nxp,pca9552"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "SLOT0_PRSNT_EN_RSVD", "SLOT1_PRSNT_EN_RSVD", + "SLOT2_PRSNT_EN_RSVD", "SLOT3_PRSNT_EN_RSVD", + "SLOT4_PRSNT_EN_RSVD", "SLOT0_EXPANDER_PRSNT_N", + "SLOT1_EXPANDER_PRSNT_N", "SLOT2_EXPANDER_PRSNT_N", + "SLOT3_EXPANDER_PRSNT_N", "SLOT4_EXPANDER_PRSNT_N", + "", "", "", "", "", ""; + + gpio@0 { + reg = <0>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@1 { + reg = <1>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@2 { + reg = <2>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@3 { + reg = <3>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@4 { + reg = <4>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@5 { + reg = <5>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@6 { + reg = <6>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@7 { + reg = <7>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@8 { + reg = <8>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@9 { + reg = <9>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@10 { + reg = <10>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@11 { + reg = <11>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@12 { + reg = <12>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@13 { + reg = <13>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@14 { + reg = <14>; + type = <PCA955X_TYPE_GPIO>; + }; + + gpio@15 { + reg = <15>; + type = <PCA955X_TYPE_GPIO>; + }; + }; }; &i2c4 { @@ -630,19 +1129,46 @@ reg = <0x4a>; }; - eeprom@50 { - compatible = "atmel,24c64"; - reg = <0x50>; - }; + pca9546@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; - }; + i2c4mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; - eeprom@52 { - compatible = "atmel,24c64"; - reg = <0x52>; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c4mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + + i2c4mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; }; }; @@ -659,14 +1185,35 @@ reg = <0x49>; }; - eeprom@50 { - compatible = "atmel,24c64"; - reg = <0x50>; - }; + pca9546@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; + i2c5mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c5mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; }; }; @@ -688,24 +1235,57 @@ reg = <0x4b>; }; - eeprom@50 { - compatible = "atmel,24c64"; - reg = <0x50>; - }; + pca9546@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; - }; + i2c6mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; - eeprom@52 { - compatible = "atmel,24c64"; - reg = <0x52>; - }; + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; - eeprom@53 { - compatible = "atmel,24c64"; - reg = <0x53>; + i2c6mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + + i2c6mux0chn2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c6mux0chn3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; }; }; @@ -713,9 +1293,9 @@ multi-master; status = "okay"; - si7021-a20@20 { + si7021-a20@40 { compatible = "silabs,si7020"; - reg = <0x20>; + reg = <0x40>; }; tmp275@48 { @@ -723,7 +1303,7 @@ reg = <0x48>; }; - max31785@52 { + max: max31785@52 { compatible = "maxim,max31785a"; reg = <0x52>; #address-cells = <1>; @@ -752,6 +1332,18 @@ reg = <3>; tach-pulses = <2>; }; + + fan4: fan@4 { + compatible = "pmbus-fan"; + reg = <4>; + tach-pulses = <2>; + }; + + fan5: fan@5 { + compatible = "pmbus-fan"; + reg = <5>; + tach-pulses = <2>; + }; }; pca0: pca9552@61 { @@ -899,7 +1491,7 @@ reg = <0x51>; }; - pca1: pca9552@61 { + pca_pres2: pca9552@61 { compatible = "nxp,pca9552"; reg = <0x61>; #address-cells = <1>; @@ -907,6 +1499,15 @@ gpio-controller; #gpio-cells = <2>; + gpio-line-names = + "SLOT6_PRSNT_EN_RSVD", "SLOT7_PRSNT_EN_RSVD", + "SLOT8_PRSNT_EN_RSVD", "SLOT9_PRSNT_EN_RSVD", + "SLOT10_PRSNT_EN_RSVD", "SLOT11_PRSNT_EN_RSVD", + "SLOT6_EXPANDER_PRSNT_N", "SLOT7_EXPANDER_PRSNT_N", + "SLOT8_EXPANDER_PRSNT_N", "SLOT9_EXPANDER_PRSNT_N", + "SLOT10_EXPANDER_PRSNT_N", "SLOT11_EXPANDER_PRSNT_N", + "", "", "", ""; + gpio@0 { reg = <0>; type = <PCA955X_TYPE_GPIO>; @@ -1041,14 +1642,35 @@ reg = <0x49>; }; - eeprom@50 { - compatible = "atmel,24c64"; - reg = <0x50>; - }; + pca9546@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + i2c-mux-idle-disconnect; - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; + i2c11mux0chn0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c11mux0chn1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; }; }; @@ -1140,6 +1762,20 @@ }; }; +&wdt1 { + aspeed,reset-type = "none"; + aspeed,external-signal; + aspeed,ext-push-pull; + aspeed,ext-active-high; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdtrst1_default>; +}; + +&wdt2 { + status = "okay"; +}; + &xdma { status = "okay"; memory-region = <&vga_memory>; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts index 577c211c469e..15c1f0ac81dc 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts @@ -383,6 +383,39 @@ }; }; +&gpio { + gpio-line-names = + /*A0-A7*/ "","cfam-reset","","","","","","", + /*B0-B7*/ "","","","","","","","", + /*C0-C7*/ "","","","","","","","", + /*D0-D7*/ "fsi-enable","","","","","","","", + /*E0-E7*/ "","","","","","fsi-mux","fsi-clock","fsi-data", + /*F0-F7*/ "","id-button","","","","","air-water","", + /*G0-G7*/ "","","","","","","","", + /*H0-H7*/ "","","","","","","","", + /*I0-I7*/ "","","","","","","","", + /*J0-J7*/ "","","checkstop","","","","","", + /*K0-K7*/ "","","","","","","","", + /*L0-L7*/ "","","","","","","","", + /*M0-M7*/ "","","","","","","","", + /*N0-N7*/ "","","","","","","","", + /*O0-O7*/ "","","","","","","","", + /*P0-P7*/ "","","","","","","","", + /*Q0-Q7*/ "","","","","","","","", + /*R0-R7*/ "","","fsi-trans","","","","","", + /*S0-S7*/ "","","","","","","","", + /*T0-T7*/ "","","","","","","","", + /*U0-U7*/ "","","","","","","","", + /*V0-V7*/ "","","","","","","","", + /*W0-W7*/ "","","","","","","","", + /*X0-X7*/ "","","","","","","","", + /*Y0-Y7*/ "","","","","","","","", + /*Z0-Z7*/ "presence-ps1","","presence-ps0","","","","","", + /*AA0-AA7*/ "led-front-fault","power-button","led-front-id","","","","","", + /*AB0-AB7*/ "","","","","","","","", + /*AC0-AC7*/ "","","","","","","",""; +}; + &fmc { status = "okay"; flash@0 { diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index e7a45ba18fc9..c5aeb3cf3a09 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -343,59 +343,45 @@ }; lpc: lpc@1e789000 { - compatible = "aspeed,ast2400-lpc", "simple-mfd"; + compatible = "aspeed,ast2400-lpc-v2", "simple-mfd", "syscon"; reg = <0x1e789000 0x1000>; + reg-io-width = <4>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x1e789000 0x1000>; - lpc_bmc: lpc-bmc@0 { - compatible = "aspeed,ast2400-lpc-bmc"; - reg = <0x0 0x80>; + lpc_ctrl: lpc-ctrl@80 { + compatible = "aspeed,ast2400-lpc-ctrl"; + reg = <0x80 0x10>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; }; - lpc_host: lpc-host@80 { - compatible = "aspeed,ast2400-lpc-host", "simple-mfd", "syscon"; - reg = <0x80 0x1e0>; - reg-io-width = <4>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x80 0x1e0>; - - lpc_ctrl: lpc-ctrl@0 { - compatible = "aspeed,ast2400-lpc-ctrl"; - reg = <0x0 0x10>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lpc_snoop: lpc-snoop@10 { - compatible = "aspeed,ast2400-lpc-snoop"; - reg = <0x10 0x8>; - interrupts = <8>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lhc: lhc@20 { - compatible = "aspeed,ast2400-lhc"; - reg = <0x20 0x24 0x48 0x8>; - }; - - lpc_reset: reset-controller@18 { - compatible = "aspeed,ast2400-lpc-reset"; - reg = <0x18 0x4>; - #reset-cells = <1>; - }; - - ibt: ibt@c0 { - compatible = "aspeed,ast2400-ibt-bmc"; - reg = <0xc0 0x18>; - interrupts = <8>; - status = "disabled"; - }; + lpc_snoop: lpc-snoop@90 { + compatible = "aspeed,ast2400-lpc-snoop"; + reg = <0x90 0x8>; + interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; + }; + + lhc: lhc@a0 { + compatible = "aspeed,ast2400-lhc"; + reg = <0xa0 0x24 0xc8 0x8>; + }; + + lpc_reset: reset-controller@98 { + compatible = "aspeed,ast2400-lpc-reset"; + reg = <0x98 0x4>; + #reset-cells = <1>; + }; + + ibt: ibt@140 { + compatible = "aspeed,ast2400-ibt-bmc"; + reg = <0x140 0x18>; + interrupts = <8>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index 21930521a986..d733c1f161c1 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -434,91 +434,74 @@ }; lpc: lpc@1e789000 { - compatible = "aspeed,ast2500-lpc", "simple-mfd"; + compatible = "aspeed,ast2500-lpc-v2", "simple-mfd", "syscon"; reg = <0x1e789000 0x1000>; + reg-io-width = <4>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x1e789000 0x1000>; - lpc_bmc: lpc-bmc@0 { - compatible = "aspeed,ast2500-lpc-bmc", "simple-mfd", "syscon"; - reg = <0x0 0x80>; - reg-io-width = <4>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x0 0x80>; - - kcs1: kcs@24 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; - interrupts = <8>; - status = "disabled"; - }; - kcs2: kcs@28 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; - interrupts = <8>; - status = "disabled"; - }; - kcs3: kcs@2c { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; - interrupts = <8>; - status = "disabled"; - }; + kcs1: kcs@24 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; + interrupts = <8>; + status = "disabled"; + }; + + kcs2: kcs@28 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; + interrupts = <8>; + status = "disabled"; + }; + + kcs3: kcs@2c { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; + interrupts = <8>; + status = "disabled"; + }; + + kcs4: kcs@114 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; + interrupts = <8>; + status = "disabled"; }; - lpc_host: lpc-host@80 { - compatible = "aspeed,ast2500-lpc-host", "simple-mfd", "syscon"; - reg = <0x80 0x1e0>; - reg-io-width = <4>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x80 0x1e0>; - - kcs4: kcs@94 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>; - interrupts = <8>; - status = "disabled"; - }; - - lpc_ctrl: lpc-ctrl@0 { - compatible = "aspeed,ast2500-lpc-ctrl"; - reg = <0x0 0x10>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lpc_snoop: lpc-snoop@10 { - compatible = "aspeed,ast2500-lpc-snoop"; - reg = <0x10 0x8>; - interrupts = <8>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lpc_reset: reset-controller@18 { - compatible = "aspeed,ast2500-lpc-reset"; - reg = <0x18 0x4>; - #reset-cells = <1>; - }; - - lhc: lhc@20 { - compatible = "aspeed,ast2500-lhc"; - reg = <0x20 0x24 0x48 0x8>; - }; - - - ibt: ibt@c0 { - compatible = "aspeed,ast2500-ibt-bmc"; - reg = <0xc0 0x18>; - interrupts = <8>; - status = "disabled"; - }; + lpc_ctrl: lpc-ctrl@80 { + compatible = "aspeed,ast2500-lpc-ctrl"; + reg = <0x80 0x10>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; + }; + + lpc_snoop: lpc-snoop@90 { + compatible = "aspeed,ast2500-lpc-snoop"; + reg = <0x90 0x8>; + interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; + }; + + lpc_reset: reset-controller@98 { + compatible = "aspeed,ast2500-lpc-reset"; + reg = <0x98 0x4>; + #reset-cells = <1>; + }; + + lhc: lhc@a0 { + compatible = "aspeed,ast2500-lhc"; + reg = <0xa0 0x24 0xc8 0x8>; + }; + + + ibt: ibt@140 { + compatible = "aspeed,ast2500-ibt-bmc"; + reg = <0x140 0x18>; + interrupts = <8>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi index 3ee470c2b7b5..f96607b7b4e2 100644 --- a/arch/arm/boot/dts/aspeed-g6.dtsi +++ b/arch/arm/boot/dts/aspeed-g6.dtsi @@ -460,91 +460,74 @@ }; lpc: lpc@1e789000 { - compatible = "aspeed,ast2600-lpc", "simple-mfd"; + compatible = "aspeed,ast2600-lpc-v2", "simple-mfd", "syscon"; reg = <0x1e789000 0x1000>; + reg-io-width = <4>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x1e789000 0x1000>; - lpc_bmc: lpc-bmc@0 { - compatible = "aspeed,ast2600-lpc-bmc", "simple-mfd", "syscon"; - reg = <0x0 0x80>; - reg-io-width = <4>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x0 0x80>; - - kcs1: kcs@24 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; - interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; - kcs_chan = <1>; - status = "disabled"; - }; - kcs2: kcs@28 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; - interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; - }; - kcs3: kcs@2c { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; - interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; - }; + kcs1: kcs@24 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; + interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; + kcs_chan = <1>; + status = "disabled"; + }; + + kcs2: kcs@28 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + kcs3: kcs@2c { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + kcs4: kcs@114 { + compatible = "aspeed,ast2500-kcs-bmc-v2"; + reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; + interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + lpc_ctrl: lpc-ctrl@80 { + compatible = "aspeed,ast2600-lpc-ctrl"; + reg = <0x80 0x80>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; + }; + + lpc_snoop: lpc-snoop@80 { + compatible = "aspeed,ast2600-lpc-snoop"; + reg = <0x80 0x80>; + interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; + status = "disabled"; }; - lpc_host: lpc-host@80 { - compatible = "aspeed,ast2600-lpc-host", "simple-mfd", "syscon"; - reg = <0x80 0x1e0>; - reg-io-width = <4>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x80 0x1e0>; - - kcs4: kcs@94 { - compatible = "aspeed,ast2500-kcs-bmc-v2"; - reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>; - interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; - }; - - lpc_ctrl: lpc-ctrl@0 { - compatible = "aspeed,ast2600-lpc-ctrl"; - reg = <0x0 0x80>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lpc_snoop: lpc-snoop@0 { - compatible = "aspeed,ast2600-lpc-snoop"; - reg = <0x0 0x80>; - interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&syscon ASPEED_CLK_GATE_LCLK>; - status = "disabled"; - }; - - lhc: lhc@20 { - compatible = "aspeed,ast2600-lhc"; - reg = <0x20 0x24 0x48 0x8>; - }; - - lpc_reset: reset-controller@18 { - compatible = "aspeed,ast2600-lpc-reset"; - reg = <0x18 0x4>; - #reset-cells = <1>; - }; - - ibt: ibt@c0 { - compatible = "aspeed,ast2600-ibt-bmc"; - reg = <0xc0 0x18>; - interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; - }; + lhc: lhc@a0 { + compatible = "aspeed,ast2600-lhc"; + reg = <0xa0 0x24 0xc8 0x8>; + }; + + lpc_reset: reset-controller@98 { + compatible = "aspeed,ast2600-lpc-reset"; + reg = <0x98 0x4>; + #reset-cells = <1>; + }; + + ibt: ibt@140 { + compatible = "aspeed,ast2600-ibt-bmc"; + reg = <0x140 0x18>; + interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts index 73b6b1f89de9..edca66c232c1 100644 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts @@ -8,6 +8,7 @@ */ /dts-v1/; #include "sam9x60.dtsi" +#include <dt-bindings/input/input.h> / { model = "Microchip SAM9X60-EK"; @@ -84,7 +85,7 @@ sw1 { label = "SW1"; gpios = <&pioD 18 GPIO_ACTIVE_LOW>; - linux,code=<0x104>; + linux,code=<KEY_PROG1>; wakeup-source; }; }; @@ -334,14 +335,6 @@ }; &pinctrl { - atmel,mux-mask = < - /* A B C */ - 0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */ - 0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */ - 0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */ - 0x003FFFFF 0x003F8000 0x00000000 /* pioD */ - >; - adc { pinctrl_adc_default: adc_default { atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi index 1b1163858b1d..e3251f3e3eaa 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi @@ -84,8 +84,8 @@ pinctrl-0 = <&pinctrl_macb0_default>; phy-mode = "rmii"; - ethernet-phy@0 { - reg = <0x0>; + ethernet-phy@7 { + reg = <0x7>; interrupt-parent = <&pioA>; interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts index 84e1180f3e89..a9e6fee55a2a 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts @@ -11,6 +11,7 @@ #include "at91-sama5d27_som1.dtsi" #include <dt-bindings/mfd/atmel-flexcom.h> #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> / { model = "Atmel SAMA5D27 SOM1 EK"; @@ -466,7 +467,7 @@ pb4 { label = "USER"; gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts index 180a08765cb8..ff83967fd008 100644 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts @@ -8,6 +8,7 @@ */ /dts-v1/; #include "at91-sama5d27_wlsom1.dtsi" +#include <dt-bindings/input/input.h> / { model = "Microchip SAMA5D27 WLSOM1 EK"; @@ -35,7 +36,7 @@ sw4 { label = "USER BUTTON"; gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts index 46722a163184..bd64721fa23c 100644 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts @@ -12,6 +12,7 @@ #include "sama5d2.dtsi" #include "sama5d2-pinfunc.h" #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> #include <dt-bindings/mfd/atmel-flexcom.h> / { @@ -51,7 +52,7 @@ sw4 { label = "USER_PB1"; gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts index 8de57d164acd..dfd150eb0fd8 100644 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts @@ -11,6 +11,7 @@ #include "sama5d2-pinfunc.h" #include <dt-bindings/mfd/atmel-flexcom.h> #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> #include <dt-bindings/pinctrl/at91.h> / { @@ -402,7 +403,7 @@ bp1 { label = "PB_USER"; gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index 4e7cf21f124c..509c732a0d8b 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts @@ -10,6 +10,7 @@ #include "sama5d2-pinfunc.h" #include <dt-bindings/mfd/atmel-flexcom.h> #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> #include <dt-bindings/regulator/active-semi,8945a-regulator.h> / { @@ -712,7 +713,7 @@ bp1 { label = "PB_USER"; gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 5179258f9247..9c55a921263b 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -7,6 +7,7 @@ */ /dts-v1/; #include "sama5d36.dtsi" +#include <dt-bindings/input/input.h> / { model = "SAMA5D3 Xplained"; @@ -354,7 +355,7 @@ bp3 { label = "PB_USER"; gpios = <&pioE 29 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts index d3446e42b598..ce96345d28a3 100644 --- a/arch/arm/boot/dts/at91sam9260ek.dts +++ b/arch/arm/boot/dts/at91sam9260ek.dts @@ -7,6 +7,7 @@ */ /dts-v1/; #include "at91sam9260.dtsi" +#include <dt-bindings/input/input.h> / { model = "Atmel at91sam9260ek"; @@ -156,7 +157,7 @@ btn4 { label = "Button 4"; gpios = <&pioA 31 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 6e6e672c0b86..87bb39060e8b 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -5,6 +5,7 @@ * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> */ #include "at91sam9g20.dtsi" +#include <dt-bindings/input/input.h> / { @@ -234,7 +235,7 @@ btn4 { label = "Button 4"; gpios = <&pioA 31 GPIO_ACTIVE_LOW>; - linux,code = <0x104>; + linux,code = <KEY_PROG1>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 462b1dfb0385..720beec54d61 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -308,14 +308,6 @@ #reset-cells = <1>; }; - bsc_intr: interrupt-controller@7ef00040 { - compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc"; - reg = <0x7ef00040 0x30>; - interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; - interrupt-controller; - #interrupt-cells = <1>; - }; - aon_intr: interrupt-controller@7ef00100 { compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc"; reg = <0x7ef00100 0x30>; @@ -362,8 +354,6 @@ reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>; reg-names = "bsc", "auto-i2c"; clock-frequency = <97500>; - interrupt-parent = <&bsc_intr>; - interrupts = <0>; status = "disabled"; }; @@ -405,8 +395,6 @@ reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>; reg-names = "bsc", "auto-i2c"; clock-frequency = <97500>; - interrupt-parent = <&bsc_intr>; - interrupts = <1>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts index 6a96655d8626..8ed403767540 100644 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts index 3b0029e61b4c..667b118ba4ee 100644 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts index 90f57bad6b24..ff31ce45831a 100644 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; }; spi { diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts index 41548d6d479a..5bac1e15775a 100644 --- a/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts +++ b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts @@ -21,6 +21,11 @@ reg = <0x00000000 0x08000000>; }; + nvram@1c080000 { + compatible = "brcm,nvram"; + reg = <0x1c080000 0x180000>; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts index fed75e6ab58c..61c7b137607e 100644 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts @@ -22,8 +22,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts index 79542e18915c..4c60eda296d9 100644 --- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts +++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts index 51c64f0b2560..9ca6d1b2590d 100644 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts index c29950b43a95..0e273c598732 100644 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts index 2f2d2b0a6893..d857751ec507 100644 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; spi { diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts index 0e349e39f608..8b1a05a0f1a1 100644 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; spi { diff --git a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts index 432254383769..9316a36434f7 100644 --- a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts +++ b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts @@ -21,6 +21,11 @@ reg = <0x00000000 0x08000000>; }; + nvram@1eff0000 { + compatible = "brcm,nvram"; + reg = <0x1eff0000 0x10000>; + }; + leds { compatible = "gpio-leds"; diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts index 8f1e565c3db4..6c6bb7b17d27 100644 --- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts +++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts @@ -21,8 +21,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts index ce888b1835d1..d29e7f80ea6a 100644 --- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts +++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts @@ -21,8 +21,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts index ed8619b54d69..9b6887d477d8 100644 --- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts +++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts @@ -18,8 +18,13 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; + }; + + nvram@1c080000 { + compatible = "brcm,nvram"; + reg = <0x1c080000 0x180000>; }; gpio-keys { diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts index 1f87993eae1d..7989a53597d4 100644 --- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts +++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts @@ -21,8 +21,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts index 6c6199a53d09..87b655be674c 100644 --- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts +++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts @@ -32,8 +32,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts index 911c65fbf251..e635a15041dd 100644 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts @@ -21,8 +21,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; nand: nand@18028000 { diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts index 3725f2b0d60b..05d4f2931772 100644 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts @@ -18,8 +18,13 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; + }; + + nvram@1c080000 { + compatible = "brcm,nvram"; + reg = <0x1c080000 0x100000>; }; gpio-keys { @@ -70,6 +75,7 @@ power { label = "bcm53xx:white:power"; gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-on"; }; wifi-disabled { @@ -274,7 +280,7 @@ &nandcs { partitions { - compatible = "fixed-partitions"; + compatible = "linksys,ns-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -295,20 +301,18 @@ }; partition@200000 { - label = "firmware"; - reg = <0x0200000 0x01D00000>; - compatible = "brcm,trx"; + reg = <0x0200000 0x01d00000>; + compatible = "linksys,ns-firmware", "brcm,trx"; }; - partition@1F00000 { - label = "failsafe"; - reg = <0x01F00000 0x01D00000>; - read-only; + partition@1f00000 { + reg = <0x01f00000 0x01d00000>; + compatible = "linksys,ns-firmware", "brcm,trx"; }; partition@5200000 { label = "system"; - reg = <0x05200000 0x02E00000>; + reg = <0x05200000 0x02e00000>; }; }; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts index 50f7cd08cfbb..4b8117f32d26 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts @@ -18,8 +18,13 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; + }; + + nvram@1eff0000 { + compatible = "brcm,nvram"; + reg = <0x1eff0000 0x10000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts index bcc420f85b56..5fecce0422c7 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts @@ -18,8 +18,13 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; + }; + + nvram@1eff0000 { + compatible = "brcm,nvram"; + reg = <0x1eff0000 0x10000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts index 4f8d777ae18d..452b8d0ab180 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts @@ -18,8 +18,8 @@ memory { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts index e17e9a17fb00..cbe8c8e4a301 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts @@ -18,8 +18,13 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; + }; + + nvram@1eff0000 { + compatible = "brcm,nvram"; + reg = <0x1eff0000 0x10000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts index 60cc87ecc7ec..24ae3c8a3e09 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts @@ -18,8 +18,13 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; + }; + + nvram@1eff0000 { + compatible = "brcm,nvram"; + reg = <0x1eff0000 0x10000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts index f42a1703f4ab..42097a4c2659 100644 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts @@ -18,8 +18,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; }; leds { diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts index ac3a4483dcb3..a2566ad4619c 100644 --- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts @@ -15,8 +15,8 @@ memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000 - 0x88000000 0x18000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x18000000>; }; gpio-keys { diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 3bf90d9e3335..149144cdff35 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -1,5 +1,8 @@ &l4_cfg { /* 0x4a000000 */ - compatible = "ti,dra7-l4-cfg", "simple-bus"; + compatible = "ti,dra7-l4-cfg", "simple-pm-bus"; + power-domains = <&prm_coreaon>; + clocks = <&l4cfg_clkctrl DRA7_L4CFG_L4_CFG_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4a000000 0x800>, <0x4a000800 0x800>, <0x4a001000 0x1000>; @@ -11,7 +14,7 @@ <0x00200000 0x4a200000 0x100000>; /* segment 2 */ segment@0 { /* 0x4a000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -493,7 +496,7 @@ }; segment@100000 { /* 0x4a100000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00002000 0x00102000 0x001000>, /* ap 27 */ @@ -572,11 +575,33 @@ }; target-module@40000 { /* 0x4a140000, ap 31 06.0 */ - compatible = "ti,sysc"; - status = "disabled"; - #address-cells = <1>; + compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0x400fc 4>, + <0x41100 4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>, + <SYSC_IDLE_SMART_WKUP>; + power-domains = <&prm_l3init>; + clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 0>; + clock-names = "fck"; #size-cells = <1>; + #address-cells = <1>; ranges = <0x0 0x40000 0x10000>; + + sata: sata@0 { + compatible = "snps,dwc-ahci"; + reg = <0 0x1100>, <0x1100 0x8>; + interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; + phys = <&sata_phy>; + phy-names = "sata-phy"; + clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 8>; + ports-implemented = <0x1>; + }; }; target-module@51000 { /* 0x4a151000, ap 33 50.0 */ @@ -789,7 +814,7 @@ }; segment@200000 { /* 0x4a200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00018000 0x00218000 0x001000>, /* ap 43 */ @@ -1006,7 +1031,10 @@ }; &l4_per1 { /* 0x48000000 */ - compatible = "ti,dra7-l4-per1", "simple-bus"; + compatible = "ti,dra7-l4-per1", "simple-pm-bus"; + power-domains = <&prm_l4per>; + clocks = <&l4per_clkctrl DRA7_L4PER_L4_PER1_CLKCTRL 0>; + clock-names = "fck"; reg = <0x48000000 0x800>, <0x48000800 0x800>, <0x48001000 0x400>, @@ -1020,7 +1048,7 @@ <0x00200000 0x48200000 0x200000>; /* segment 1 */ segment@0 { /* 0x48000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -1168,7 +1196,7 @@ }; }; - target-module@34000 { /* 0x48034000, ap 7 46.0 */ + timer3_target: target-module@34000 { /* 0x48034000, ap 7 46.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x34000 0x4>, <0x34010 0x4>; @@ -1195,7 +1223,7 @@ }; }; - target-module@36000 { /* 0x48036000, ap 9 4e.0 */ + timer4_target: target-module@36000 { /* 0x48036000, ap 9 4e.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x36000 0x4>, <0x36010 0x4>; @@ -2269,14 +2297,17 @@ }; segment@200000 { /* 0x48200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; }; &l4_per2 { /* 0x48400000 */ - compatible = "ti,dra7-l4-per2", "simple-bus"; + compatible = "ti,dra7-l4-per2", "simple-pm-bus"; + power-domains = <&prm_l4per>; + clocks = <&l4per2_clkctrl DRA7_L4PER2_L4_PER2_CLKCTRL 0>; + clock-names = "fck"; reg = <0x48400000 0x800>, <0x48400800 0x800>, <0x48401000 0x400>, @@ -2296,7 +2327,7 @@ <0x48454000 0x48454000 0x400000>; /* L3 data port */ segment@0 { /* 0x48400000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -3094,7 +3125,10 @@ }; &l4_per3 { /* 0x48800000 */ - compatible = "ti,dra7-l4-per3", "simple-bus"; + compatible = "ti,dra7-l4-per3", "simple-pm-bus"; + power-domains = <&prm_l4per>; + clocks = <&l4per3_clkctrl DRA7_L4PER3_L4_PER3_CLKCTRL 0>; + clock-names = "fck"; reg = <0x48800000 0x800>, <0x48800800 0x800>, <0x48801000 0x400>, @@ -3106,7 +3140,7 @@ ranges = <0x00000000 0x48800000 0x200000>; /* segment 0 */ segment@0 { /* 0x48800000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -4205,7 +4239,10 @@ }; &l4_wkup { /* 0x4ae00000 */ - compatible = "ti,dra7-l4-wkup", "simple-bus"; + compatible = "ti,dra7-l4-wkup", "simple-pm-bus"; + power-domains = <&prm_wkupaon>; + clocks = <&wkupaon_clkctrl DRA7_WKUPAON_L4_WKUP_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4ae00000 0x800>, <0x4ae00800 0x800>, <0x4ae01000 0x1000>; @@ -4218,7 +4255,7 @@ <0x00030000 0x4ae30000 0x010000>; /* segment 3 */ segment@0 { /* 0x4ae00000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -4295,7 +4332,7 @@ }; segment@10000 { /* 0x4ae10000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */ @@ -4405,7 +4442,7 @@ }; segment@20000 { /* 0x4ae20000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */ @@ -4511,7 +4548,7 @@ }; segment@30000 { /* 0x4ae30000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0000c000 0x0003c000 0x002000>, /* ap 30 */ diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index ce1194744f84..dfc1ef8ef6ae 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -46,6 +46,7 @@ timer { compatible = "arm,armv7-timer"; + status = "disabled"; /* See ARM architected timer wrap erratum i940 */ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, @@ -125,18 +126,6 @@ }; /* - * The soc node represents the soc top level view. It is used for IPs - * that are not memory mapped in the MPU view or for the MPU itself. - */ - soc { - compatible = "ti,omap-infra"; - mpu { - compatible = "ti,omap5-mpu"; - ti,hwmods = "mpu"; - }; - }; - - /* * XXX: Use a flat representation of the SOC interconnect. * The real OMAP interconnect network is quite complex. * Since it will not bring real advantage to represent that in DT for @@ -144,16 +133,22 @@ * hierarchy. */ ocp: ocp { - compatible = "ti,dra7-l3-noc", "simple-bus"; + compatible = "simple-pm-bus"; + power-domains = <&prm_core>; + clocks = <&l3main1_clkctrl DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL 0>, + <&l3instr_clkctrl DRA7_L3INSTR_L3_MAIN_2_CLKCTRL 0>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x0 0xc0000000>; dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; - ti,hwmods = "l3_main_1", "l3_main_2"; - reg = <0x0 0x44000000 0x0 0x1000000>, - <0x0 0x45000000 0x0 0x1000>; - interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, - <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + + l3-noc@44000000 { + compatible = "ti,dra7-l3-noc"; + reg = <0x44000000 0x1000>, + <0x45000000 0x1000>; + interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + }; l4_cfg: interconnect@4a000000 { }; @@ -161,36 +156,65 @@ }; l4_per1: interconnect@48000000 { }; + + target-module@48210000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + power-domains = <&prm_mpu>; + clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x48210000 0x1f0000>; + + mpu { + compatible = "ti,omap5-mpu"; + }; + }; + l4_per2: interconnect@48400000 { }; l4_per3: interconnect@48800000 { }; - axi@0 { - compatible = "simple-bus"; + /* + * Register access seems to have complex dependencies and also + * seems to need an enabled phy. See the TRM chapter for "Table + * 26-678. Main Sequence PCIe Controller Global Initialization" + * and also dra7xx_pcie_probe(). + */ + axi0: target-module@51000000 { + compatible = "ti,sysc-omap4", "ti,sysc"; + power-domains = <&prm_l3init>; + resets = <&prm_l3init 0>; + reset-names = "rstctrl"; + clocks = <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 0>, + <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 9>, + <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 10>; + clock-names = "fck", "phy-clk", "phy-clk-div"; #size-cells = <1>; #address-cells = <1>; - ranges = <0x51000000 0x51000000 0x3000 - 0x0 0x20000000 0x10000000>; + ranges = <0x51000000 0x51000000 0x3000>, + <0x20000000 0x20000000 0x10000000>; dma-ranges; /** * To enable PCI endpoint mode, disable the pcie1_rc * node and enable pcie1_ep mode. */ pcie1_rc: pcie@51000000 { - reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; + reg = <0x51000000 0x2000>, + <0x51002000 0x14c>, + <0x20001000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; interrupts = <0 232 0x4>, <0 233 0x4>; #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - ranges = <0x81000000 0 0 0x03000 0 0x00010000 - 0x82000000 0 0x20013000 0x13000 0 0xffed000>; + ranges = <0x81000000 0 0x00000000 0x20003000 0 0x00010000>, + <0x82000000 0 0x20013000 0x20013000 0 0x0ffed000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <0>; - ti,hwmods = "pcie1"; phys = <&pcie1_phy>; phy-names = "pcie-phy0"; ti,syscon-lane-sel = <&scm_conf_pcie 0x18>; @@ -209,13 +233,15 @@ }; pcie1_ep: pcie_ep@51000000 { - reg = <0x51000000 0x28>, <0x51002000 0x14c>, <0x51001000 0x28>, <0x1000 0x10000000>; + reg = <0x51000000 0x28>, + <0x51002000 0x14c>, + <0x51001000 0x28>, + <0x20001000 0x10000000>; reg-names = "ep_dbics", "ti_conf", "ep_dbics2", "addr_space"; interrupts = <0 232 0x4>; num-lanes = <1>; num-ib-windows = <4>; num-ob-windows = <16>; - ti,hwmods = "pcie1"; phys = <&pcie1_phy>; phy-names = "pcie-phy0"; ti,syscon-unaligned-access = <&scm_conf1 0x14 1>; @@ -224,28 +250,42 @@ }; }; - axi@1 { - compatible = "simple-bus"; + /* + * Register access seems to have complex dependencies and also + * seems to need an enabled phy. See the TRM chapter for "Table + * 26-678. Main Sequence PCIe Controller Global Initialization" + * and also dra7xx_pcie_probe(). + */ + axi1: target-module@51800000 { + compatible = "ti,sysc-omap4", "ti,sysc"; + clocks = <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 0>, + <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 9>, + <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 10>; + clock-names = "fck", "phy-clk", "phy-clk-div"; + power-domains = <&prm_l3init>; + resets = <&prm_l3init 1>; + reset-names = "rstctrl"; #size-cells = <1>; #address-cells = <1>; - ranges = <0x51800000 0x51800000 0x3000 - 0x0 0x30000000 0x10000000>; + ranges = <0x51800000 0x51800000 0x3000>, + <0x30000000 0x30000000 0x10000000>; dma-ranges; status = "disabled"; pcie2_rc: pcie@51800000 { - reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; + reg = <0x51800000 0x2000>, + <0x51802000 0x14c>, + <0x30001000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; interrupts = <0 355 0x4>, <0 356 0x4>; #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - ranges = <0x81000000 0 0 0x03000 0 0x00010000 - 0x82000000 0 0x30013000 0x13000 0 0xffed000>; + ranges = <0x81000000 0 0x00000000 0x30003000 0 0x00010000>, + <0x82000000 0 0x30013000 0x30013000 0 0x0ffed000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <1>; - ti,hwmods = "pcie2"; phys = <&pcie2_phy>; phy-names = "pcie-phy0"; interrupt-map-mask = <0 0 0 7>; @@ -336,8 +376,15 @@ target-module@43300000 { compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0x43300000 0x4>; - reg-names = "rev"; + reg = <0x43300000 0x4>, + <0x43300010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPCC_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; @@ -369,8 +416,15 @@ target-module@43400000 { compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0x43400000 0x4>; - reg-names = "rev"; + reg = <0x43400000 0x4>, + <0x43400010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC0_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; @@ -387,8 +441,15 @@ target-module@43500000 { compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0x43500000 0x4>; - reg-names = "rev"; + reg = <0x43500000 0x4>, + <0x43500010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC1_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; @@ -403,11 +464,23 @@ }; }; - dmm@4e000000 { - compatible = "ti,omap5-dmm"; - reg = <0x4e000000 0x800>; - interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; - ti,hwmods = "dmm"; + target-module@4e000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x4e000000 0x4>, + <0x4e000010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ranges = <0x0 0x4e000000 0x2000000>; + #size-cells = <1>; + #address-cells = <1>; + + dmm@0 { + compatible = "ti,omap5-dmm"; + reg = <0 0x800>; + interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; + }; }; ipu1: ipu@58820000 { @@ -694,32 +767,36 @@ >; }; - qspi: spi@4b300000 { - compatible = "ti,dra7xxx-qspi"; - reg = <0x4b300000 0x100>, - <0x5c000000 0x4000000>; - reg-names = "qspi_base", "qspi_mmap"; - syscon-chipselects = <&scm_conf 0x558>; - #address-cells = <1>; - #size-cells = <0>; - ti,hwmods = "qspi"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 25>; + target-module@4b300000 { + compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0x4b300000 0x4>, + <0x4b300010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>, + <SYSC_IDLE_SMART_WKUP>; + clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 0>; clock-names = "fck"; - num-cs = <4>; - interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; - }; - - /* OCP2SCP3 */ - sata: sata@4a141100 { - compatible = "snps,dwc-ahci"; - reg = <0x4a140000 0x1100>, <0x4a141100 0x7>; - interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; - phys = <&sata_phy>; - phy-names = "sata-phy"; - clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 8>; - ti,hwmods = "sata"; - ports-implemented = <0x1>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x4b300000 0x1000>, + <0x5c000000 0x5c000000 0x4000000>; + + qspi: spi@0 { + compatible = "ti,dra7xxx-qspi"; + reg = <0 0x100>, + <0x5c000000 0x4000000>; + reg-names = "qspi_base", "qspi_mmap"; + syscon-chipselects = <&scm_conf 0x558>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 25>; + clock-names = "fck"; + num-cs = <4>; + interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; }; /* OCP2SCP1 */ @@ -1241,3 +1318,22 @@ assigned-clock-parents = <&sys_32k_ck>; }; }; + +/* Local timers, see ARM architected timer wrap erratum i940 */ +&timer3_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>; + assigned-clock-parents = <&timer_sys_clk_div>; + }; +}; + +&timer4_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>; + assigned-clock-parents = <&timer_sys_clk_div>; + }; +}; diff --git a/arch/arm/boot/dts/ep7209.dtsi b/arch/arm/boot/dts/ep7209.dtsi index 365931f8b48d..57bdad2c1994 100644 --- a/arch/arm/boot/dts/ep7209.dtsi +++ b/arch/arm/boot/dts/ep7209.dtsi @@ -10,6 +10,8 @@ model = "Cirrus Logic EP7209"; compatible = "cirrus,ep7209"; + chosen { }; + aliases { gpio0 = &porta; gpio1 = &portb; @@ -108,6 +110,7 @@ compatible = "cirrus,ep7209-fb"; reg = <0x800002c0 0xd44>, <0x60000000 0xc000>; clocks = <&clks CLPS711X_CLK_BUS>; + syscon = <&syscon1>; status = "disabled"; }; @@ -132,7 +135,7 @@ #pwm-cells = <1>; }; - uart1: uart@80000480 { + uart1: serial@80000480 { compatible = "cirrus,ep7209-uart"; reg = <0x80000480 0x80>; interrupts = <12 13>; @@ -147,6 +150,7 @@ reg = <0x80000500 0x4>; interrupts = <15>; clocks = <&clks CLPS711X_CLK_SPI>; + syscon = <&syscon3>; status = "disabled"; }; @@ -155,7 +159,7 @@ reg = <0x80001100 0x80>; }; - uart2: uart@80001480 { + uart2: serial@80001480 { compatible = "cirrus,ep7209-uart"; reg = <0x80001480 0x80>; interrupts = <28 29>; @@ -170,6 +174,7 @@ clocks = <&clks CLPS711X_CLK_PLL>; clock-names = "pll"; interrupts = <32>; + syscon = <&syscon3>; status = "disabled"; }; @@ -179,8 +184,17 @@ }; }; + keypad: keypad { + compatible = "cirrus,ep7209-keypad"; + interrupt-parent = <&intc>; + interrupts = <16>; + syscon = <&syscon1>; + status = "disabled"; + }; + mctrl: mctrl { compatible = "cirrus,ep7209-mctrl-gpio"; + gpio,syscon-dev = <&syscon1 0 0>; gpio-controller; #gpio-cells = <2>; }; diff --git a/arch/arm/boot/dts/ep7211-edb7211.dts b/arch/arm/boot/dts/ep7211-edb7211.dts index da076479c8e2..7fb532f227af 100644 --- a/arch/arm/boot/dts/ep7211-edb7211.dts +++ b/arch/arm/boot/dts/ep7211-edb7211.dts @@ -7,7 +7,7 @@ model = "Cirrus Logic EP7211 Development Board"; compatible = "cirrus,edb7211", "cirrus,ep7211", "cirrus,ep7209"; - memory { + memory@c0000000 { device_type = "memory"; reg = <0xc0000000 0x02000000>; }; diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts index fae046e08a5d..8b41a9d5e2db 100644 --- a/arch/arm/boot/dts/exynos3250-monk.dts +++ b/arch/arm/boot/dts/exynos3250-monk.dts @@ -142,7 +142,7 @@ assigned-clock-rates = <6000000>; thermistor-ap { - compatible = "ntc,ncp15wb473"; + compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <100000>; pulldown-ohm = <100000>; @@ -150,7 +150,7 @@ }; thermistor-battery { - compatible = "ntc,ncp15wb473"; + compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <100000>; pulldown-ohm = <100000>; diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index d64ccf4b7d32..c52b9cf4f74c 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -142,7 +142,7 @@ assigned-clock-rates = <6000000>; thermistor-ap { - compatible = "ntc,ncp15wb473"; + compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <100000>; pulldown-ohm = <100000>; @@ -150,7 +150,7 @@ }; thermistor-battery { - compatible = "ntc,ncp15wb473"; + compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <100000>; pulldown-ohm = <100000>; diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts index 304a8ee2364c..525ff3d2fac3 100644 --- a/arch/arm/boot/dts/exynos4210-i9100.dts +++ b/arch/arm/boot/dts/exynos4210-i9100.dts @@ -136,7 +136,7 @@ compatible = "maxim,max17042"; interrupt-parent = <&gpx2>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; pinctrl-0 = <&max17042_fuel_irq>; pinctrl-names = "default"; @@ -147,6 +147,36 @@ }; }; + i2c_s5k5baf: i2c-gpio-1 { + compatible = "i2c-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + sda-gpios = <&gpc1 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpc1 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + i2c-gpio,delay-us = <2>; + + image-sensor@2d { + compatible = "samsung,s5k5baf"; + reg = <0x2d>; + vdda-supply = <&cam_io_en_reg>; + vddreg-supply = <&vt_core_15v_reg>; + vddio-supply = <&vtcam_reg>; + clocks = <&camera 0>; + clock-names = "mclk"; + stbyn-gpios = <&gpl2 0 GPIO_ACTIVE_LOW>; + rstn-gpios = <&gpl2 1 GPIO_ACTIVE_LOW>; + clock-frequency = <24000000>; + + port { + s5k5bafx_ep: endpoint { + remote-endpoint = <&csis1_ep>; + data-lanes = <1>; + }; + }; + }; + }; + spi-3 { compatible = "spi-gpio"; #address-cells = <1>; @@ -220,7 +250,29 @@ }; &camera { + pinctrl-0 = <&cam_port_a_clk_active>; + pinctrl-names = "default"; status = "okay"; + assigned-clocks = <&clock CLK_MOUT_CAM0>, <&clock CLK_MOUT_CAM1>; + assigned-clock-parents = <&clock CLK_XUSBXTI>, <&clock CLK_XUSBXTI>; +}; + +&csis_1 { + status = "okay"; + vddcore-supply = <&vusb_reg>; + vddio-supply = <&vmipi_reg>; + clock-frequency = <160000000>; + #address-cells = <1>; + #size-cells = <0>; + + port@4 { + reg = <4>; + csis1_ep: endpoint { + remote-endpoint = <&s5k5bafx_ep>; + data-lanes = <1>; + samsung,csis-hs-settle = <6>; + }; + }; }; &cpu0 { @@ -384,6 +436,8 @@ pinctrl-0 = <&max8997_irq>, <&otg_gp>, <&usb_sel>; pinctrl-names = "default"; + charger-supply = <&charger_reg>; + regulators { vadc_reg: LDO1 { regulator-name = "VADC_3.3V_C210"; diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi index 111c32bae02c..fc77c1bfd844 100644 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi @@ -173,7 +173,7 @@ pmic@66 { compatible = "maxim,max77693"; interrupt-parent = <&gpx1>; - interrupts = <5 IRQ_TYPE_EDGE_FALLING>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77693_irq>; reg = <0x66>; @@ -221,7 +221,7 @@ fuel-gauge@36 { compatible = "maxim,max17047"; interrupt-parent = <&gpx2>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77693_fuel_irq>; reg = <0x36>; @@ -665,7 +665,7 @@ max77686: pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; pinctrl-0 = <&max77686_irq>; pinctrl-names = "default"; reg = <0x09>; diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 2b20d9095d9f..5bd05866d7a3 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -2,7 +2,7 @@ /* * Common definition for Hardkernel's Exynos4412 based ODROID-X/X2/U2/U3 boards * device tree source -*/ + */ #include <dt-bindings/sound/samsung-i2s.h> #include <dt-bindings/input/input.h> @@ -122,6 +122,7 @@ }; &clock { + clocks = <&clock CLK_XUSBXTI>; assigned-clocks = <&clock CLK_FOUT_EPLL>; assigned-clock-rates = <45158401>; }; @@ -278,7 +279,7 @@ max77686: pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; reg = <0x09>; diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts index 0e9d626e740a..440135d0ff2a 100644 --- a/arch/arm/boot/dts/exynos4412-odroidx.dts +++ b/arch/arm/boot/dts/exynos4412-odroidx.dts @@ -84,7 +84,8 @@ ethernet: usbether@1 { compatible = "usb0424,ec00"; reg = <1>; - local-mac-address = [00 00 00 00 00 00]; /* Filled in by a bootloader */ + /* Filled in by a bootloader */ + local-mac-address = [00 00 00 00 00 00]; }; }; }; diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi index b2f9d5448a18..9e750890edb8 100644 --- a/arch/arm/boot/dts/exynos4412-p4note.dtsi +++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi @@ -146,7 +146,7 @@ pinctrl-0 = <&fuel_alert_irq>; pinctrl-names = "default"; interrupt-parent = <&gpx2>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; maxim,rsns-microohm = <10000>; maxim,over-heat-temp = <600>; maxim,over-volt = <4300>; @@ -322,7 +322,7 @@ max77686: pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; pinctrl-0 = <&max77686_irq>; pinctrl-names = "default"; reg = <0x09>; diff --git a/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi index 3a3b2fafefdd..7f187a3dedcc 100644 --- a/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi @@ -7,41 +7,41 @@ */ &ppmu_dmc0 { - status = "okay"; + status = "okay"; - events { - ppmu_dmc0_3: ppmu-event3-dmc0 { - event-name = "ppmu-event3-dmc0"; - }; - }; + events { + ppmu_dmc0_3: ppmu-event3-dmc0 { + event-name = "ppmu-event3-dmc0"; + }; + }; }; &ppmu_dmc1 { - status = "okay"; + status = "okay"; - events { - ppmu_dmc1_3: ppmu-event3-dmc1 { - event-name = "ppmu-event3-dmc1"; - }; - }; + events { + ppmu_dmc1_3: ppmu-event3-dmc1 { + event-name = "ppmu-event3-dmc1"; + }; + }; }; &ppmu_leftbus { - status = "okay"; + status = "okay"; - events { - ppmu_leftbus_3: ppmu-event3-leftbus { - event-name = "ppmu-event3-leftbus"; - }; - }; + events { + ppmu_leftbus_3: ppmu-event3-leftbus { + event-name = "ppmu-event3-leftbus"; + }; + }; }; &ppmu_rightbus { - status = "okay"; + status = "okay"; - events { - ppmu_rightbus_3: ppmu-event3-rightbus { - event-name = "ppmu-event3-rightbus"; - }; - }; + events { + ppmu_rightbus_3: ppmu-event3-rightbus { + event-name = "ppmu-event3-rightbus"; + }; + }; }; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 8b5a79a8720c..39bbe18145cf 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -134,7 +134,7 @@ compatible = "maxim,max77686"; reg = <0x09>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; #clock-cells = <1>; diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index 6635f6184051..2335c4687349 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi @@ -292,7 +292,7 @@ max77686: pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi index d0aa18443a69..9599ba8ba798 100644 --- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi @@ -235,13 +235,13 @@ interrupt-parent = <&combiner>; #interrupt-cells = <2>; interrupts = <23 0>, - <24 0>, - <25 0>, - <25 1>, - <26 0>, - <26 1>, - <27 0>, - <27 1>; + <24 0>, + <25 0>, + <25 1>, + <26 0>, + <26 1>, + <27 0>, + <27 1>; }; gpx1: gpx1 { @@ -252,13 +252,13 @@ interrupt-parent = <&combiner>; #interrupt-cells = <2>; interrupts = <28 0>, - <28 1>, - <29 0>, - <29 1>, - <30 0>, - <30 1>, - <31 0>, - <31 1>; + <28 1>, + <29 0>, + <29 1>, + <30 0>, + <30 1>, + <31 0>, + <31 1>; }; gpx2: gpx2 { diff --git a/arch/arm/boot/dts/imx50-kobo-aura.dts b/arch/arm/boot/dts/imx50-kobo-aura.dts index 97cfd970fe74..82ce8c43be86 100644 --- a/arch/arm/boot/dts/imx50-kobo-aura.dts +++ b/arch/arm/boot/dts/imx50-kobo-aura.dts @@ -143,10 +143,24 @@ pinctrl-0 = <&pinctrl_i2c3>; status = "okay"; - /* TODO: embedded controller at 0x43 */ + embedded-controller@43 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ec>; + compatible = "netronix,ntxec"; + reg = <0x43>; + system-power-controller; + interrupts-extended = <&gpio4 11 IRQ_TYPE_EDGE_FALLING>; + #pwm-cells = <2>; + }; }; &iomuxc { + pinctrl_ec: ecgrp { + fsl,pins = < + MX50_PAD_CSPI_SS0__GPIO4_11 0x0 /* INT */ + >; + }; + pinctrl_gpiokeys: gpiokeysgrp { fsl,pins = < MX50_PAD_CSPI_MISO__GPIO4_10 0x0 diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 7ebb46ce9e36..01cfcbe5928e 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -467,7 +467,7 @@ }; iim: efuse@83f98000 { - compatible = "fsl,imx51-iim", "fsl,imx27-iim"; + compatible = "fsl,imx51-iim", "fsl,imx27-iim", "syscon"; reg = <0x83f98000 0x4000>; interrupts = <69>; clocks = <&clks IMX5_CLK_IIM_GATE>; diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index 9b4efcd82636..fe4244044a0f 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -142,6 +142,7 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; status = "okay"; }; @@ -209,6 +210,7 @@ MX53_PAD_SD1_DATA3__ESDHC1_DAT3 0x1d5 MX53_PAD_SD1_CMD__ESDHC1_CMD 0x1d5 MX53_PAD_SD1_CLK__ESDHC1_CLK 0x1d5 + MX53_PAD_EIM_DA13__GPIO3_13 0xe4 >; }; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 000050aeeabe..2cf3909cca2f 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -668,7 +668,7 @@ }; iim: efuse@63f98000 { - compatible = "fsl,imx53-iim", "fsl,imx27-iim"; + compatible = "fsl,imx53-iim", "fsl,imx27-iim", "syscon"; reg = <0x63f98000 0x4000>; interrupts = <69>; clocks = <&clks IMX5_CLK_IIM_GATE>; diff --git a/arch/arm/boot/dts/imx6dl-plybas.dts b/arch/arm/boot/dts/imx6dl-plybas.dts index 333c306aa946..bf72a67a9c76 100644 --- a/arch/arm/boot/dts/imx6dl-plybas.dts +++ b/arch/arm/boot/dts/imx6dl-plybas.dts @@ -19,17 +19,15 @@ gpio_keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; autorepeat; - button@20 { + button-start { label = "START"; linux,code = <31>; gpios = <&gpio5 8 GPIO_ACTIVE_LOW>; }; - button@21 { + button-clean { label = "CLEAN"; linux,code = <46>; gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts index 604f2420370f..d994b32ad825 100644 --- a/arch/arm/boot/dts/imx6q-b450v3.dts +++ b/arch/arm/boot/dts/imx6q-b450v3.dts @@ -84,6 +84,11 @@ }; &pca9539 { + gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN", + "", "SM_D_ACT", "DP1_RST#", "", + "WD15S_EN", "WD15S_DIS#", "", "", + "", "", "", ""; + P04-hog { gpio-hog; gpios = <4 0>; diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts index 56d2aeb1900c..fa1a1df37cde 100644 --- a/arch/arm/boot/dts/imx6q-b650v3.dts +++ b/arch/arm/boot/dts/imx6q-b650v3.dts @@ -84,6 +84,11 @@ }; &pca9539 { + gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN", + "", "SM_D_ACT", "DP1_RST#", "", + "WD15S_EN", "WD15S_DIS#", "", "", + "", "", "", ""; + P07-hog { gpio-hog; gpios = <7 0>; diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts index 3d6b757bf325..db8c332df6a1 100644 --- a/arch/arm/boot/dts/imx6q-b850v3.dts +++ b/arch/arm/boot/dts/imx6q-b850v3.dts @@ -199,6 +199,11 @@ }; &pca9539 { + gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN", + "REMOTE_ON_PML#", "SM_D_ACT", "DP1_RST#", "DP2_RST#", + "", "", "", "", + "", "", "", ""; + P10-hog { gpio-hog; gpios = <8 0>; diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi index e4578ed3371e..6330d75f8f39 100644 --- a/arch/arm/boot/dts/imx6q-ba16.dtsi +++ b/arch/arm/boot/dts/imx6q-ba16.dtsi @@ -124,6 +124,9 @@ regulator-name = "usb_otg_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + pinctrl-0 = <&pinctrl_usbotg_vbus>; + gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>; + enable-active-high; }; }; @@ -172,7 +175,19 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rgmii-id"; + phy-supply = <®_3p3v>; + phy-handle = <&phy0>; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + phy0: ethernet-phy@4 { + reg = <4>; + qca,clk-out-frequency = <125000000>; + }; + }; }; &hdmi { @@ -575,6 +590,12 @@ >; }; + pinctrl_usbotg_vbus: usbotgvbusgrp { + fsl,pins = < + MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x000b0 + >; + }; + pinctrl_usdhc2: usdhc2grp { fsl,pins = < MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059 diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index 2a98cc657595..10922375c51e 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -173,8 +173,8 @@ &i2c1 { pinctrl-names = "default", "gpio"; pinctrl-1 = <&pinctrl_i2c1_gpio>; - sda-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH>; - scl-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; + sda-gpios = <&gpio5 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpio5 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; pca9547: mux@70 { compatible = "nxp,pca9547"; @@ -315,15 +315,15 @@ &i2c2 { pinctrl-names = "default", "gpio"; pinctrl-1 = <&pinctrl_i2c2_gpio>; - sda-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>; - scl-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>; + sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; }; &i2c3 { pinctrl-names = "default", "gpio"; pinctrl-1 = <&pinctrl_i2c3_gpio>; - sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; - scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; + sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; }; &iomuxc { diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index 959d8ac2e393..8e587e17e75d 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -32,8 +32,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 8072ed47c6bb..a0710d562766 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -32,8 +32,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 8c9bcdd39830..29ba24c273e9 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -33,8 +33,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi index e5d803d023c8..435dec6338fe 100644 --- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi @@ -67,8 +67,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi index 290a607fede9..2e61102ae694 100644 --- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi @@ -24,8 +24,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi index 093a219a77ae..4bc4371e6bae 100644 --- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi @@ -91,8 +91,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi index e1c8dd233cab..1fdb7ba630f1 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi @@ -75,8 +75,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi index 3cd2e717c1da..304f3fb88fab 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi @@ -72,8 +72,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi index 21c68a55bcb9..fcd3bdfd6182 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi @@ -23,8 +23,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi index ed4e22259959..68e5ab2e27e2 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi @@ -26,8 +26,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi index 797f160249f7..0415bcb41640 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi @@ -24,8 +24,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi index 4cd7d290f5b2..8e23cec7149e 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi @@ -23,8 +23,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi index d434868e870a..51d28e275aa6 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi @@ -78,7 +78,8 @@ ssi2 { fsl,audmux-port = <1>; fsl,port-config = < - (IMX_AUDMUX_V2_PTCR_TFSDIR | + (IMX_AUDMUX_V2_PTCR_SYN | + IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TFSEL(4) | IMX_AUDMUX_V2_PTCR_TCLKDIR | IMX_AUDMUX_V2_PTCR_TCSEL(4)) @@ -89,7 +90,7 @@ pins5 { fsl,audmux-port = <4>; fsl,port-config = < - 0x00000000 + IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(1) >; }; @@ -164,6 +165,7 @@ &usbotg { status = "okay"; + dr_mode = "peripheral"; }; &usdhc2 { diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 7a1e53195785..7bd658b7bdda 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -31,6 +31,8 @@ reg_usb_h1_vbus: regulator@1 { compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1_vbus>; reg = <1>; regulator-name = "usb_h1_vbus"; regulator-min-microvolt = <5000000>; @@ -41,6 +43,8 @@ }; gpio_leds: leds { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_leds>; compatible = "gpio-leds"; green { @@ -122,6 +126,8 @@ }; pmic@58 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pmic>; compatible = "dlg,da9063"; reg = <0x58>; interrupt-parent = <&gpio2>; @@ -215,25 +221,13 @@ }; &iomuxc { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hog>; - imx6q-phytec-pfla02 { - pinctrl_hog: hoggrp { - fsl,pins = < - MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000 - MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */ - MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x80000000 /* PMIC interrupt */ - MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */ - MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */ - >; - }; - pinctrl_ecspi3: ecspi3grp { fsl,pins = < MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1 MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1 MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1 + MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* CS0 */ >; }; @@ -255,6 +249,7 @@ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030 MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0 + MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000 /* Reset GPIO */ >; }; @@ -308,10 +303,21 @@ >; }; + pinctrl_leds: ledsgrp { + fsl,pins = < + MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */ + MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */ + >; + }; + pinctrl_pcie: pciegrp { fsl,pins = <MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000>; }; + pinctrl_pmic: pmicgrp { + fsl,pins = <MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x80000000>; /* PMIC interrupt */ + }; + pinctrl_uart3: uart3grp { fsl,pins = < MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 @@ -328,9 +334,9 @@ >; }; - pinctrl_usbh1: usbh1grp { + pinctrl_usbh1_vbus: usbh1vbusgrp { fsl,pins = < - MX6QDL_PAD_GPIO_0__USB_H1_PWR 0x80000000 + MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 >; }; @@ -415,8 +421,6 @@ &usbh1 { vbus-supply = <®_usb_h1_vbus>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbh1>; status = "disabled"; }; @@ -433,6 +437,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd1_reg>; status = "disabled"; }; @@ -442,5 +447,6 @@ &pinctrl_usdhc3_cdwp>; cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd0_reg>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi index e6aa0c33754d..fded07f370b3 100644 --- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi +++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi @@ -506,7 +506,6 @@ }; &ssi1 { - fsl,mode = "i2s-slave"; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index c070893c509e..b62a0dbb033f 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi @@ -97,15 +97,21 @@ &i2c1 { clock-frequency = <100000>; - pinctrl-names = "default"; + pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c1>; + pinctrl-1 = <&pinctrl_i2c1_gpio>; + scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; }; &i2c2 { clock-frequency = <100000>; - pinctrl-names = "default"; + pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; + pinctrl-1 = <&pinctrl_i2c2_gpio>; + scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; codec: sgtl5000@a { @@ -185,6 +191,13 @@ >; }; + pinctrl_i2c1_gpio: i2c1gpiogrp { + fsl,pins = < + MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x4001b8b0 + MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x4001b8b0 + >; + }; + pinctrl_i2c2: i2c2grp { fsl,pins = < MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1 @@ -192,6 +205,13 @@ >; }; + pinctrl_i2c2_gpio: i2c2gpiogrp { + fsl,pins = < + MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x4001b8b0 + MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x4001b8b0 + >; + }; + pinctrl_mclk: mclkgrp { fsl,pins = < MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0 diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts index 6ea5f918d059..a17b8bbbdb95 100644 --- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts +++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts @@ -97,8 +97,11 @@ pinctrl-1 = <&pinctrl_i2c1_sleep>; status = "okay"; - /* TODO: embedded controller at 0x43 (driver missing) */ - + ec: embedded-controller@43 { + compatible = "netronix,ntxec"; + reg = <0x43>; + #pwm-cells = <2>; + }; }; &i2c2 { diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index c593597b2119..5a1e10def6ef 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -210,9 +210,6 @@ micrel,led-mode = <1>; clocks = <&clks IMX6UL_CLK_ENET_REF>; clock-names = "rmii-ref"; - reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>; - reset-assert-us = <10000>; - reset-deassert-us = <100>; }; @@ -222,9 +219,6 @@ micrel,led-mode = <1>; clocks = <&clks IMX6UL_CLK_ENET2_REF>; clock-names = "rmii-ref"; - reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>; - reset-assert-us = <10000>; - reset-deassert-us = <100>; }; }; }; @@ -243,6 +237,22 @@ status = "okay"; }; +&gpio_spi { + eth0-phy-hog { + gpio-hog; + gpios = <1 GPIO_ACTIVE_HIGH>; + output-high; + line-name = "eth0-phy"; + }; + + eth1-phy-hog { + gpio-hog; + gpios = <2 GPIO_ACTIVE_HIGH>; + output-high; + line-name = "eth1-phy"; + }; +}; + &i2c1 { clock-frequency = <100000>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi index 4436556624d6..0cdbf7b6e728 100644 --- a/arch/arm/boot/dts/imx6ull-colibri.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi @@ -522,12 +522,12 @@ pinctrl_usdhc2: usdhc2-grp { fsl,pins = < - MX6UL_PAD_CSI_DATA00__USDHC2_DATA0 0x17059 - MX6UL_PAD_CSI_DATA01__USDHC2_DATA1 0x17059 - MX6UL_PAD_CSI_DATA02__USDHC2_DATA2 0x17059 - MX6UL_PAD_CSI_DATA03__USDHC2_DATA3 0x17059 - MX6UL_PAD_CSI_HSYNC__USDHC2_CMD 0x17059 - MX6UL_PAD_CSI_VSYNC__USDHC2_CLK 0x17059 + MX6UL_PAD_CSI_DATA00__USDHC2_DATA0 0x17069 + MX6UL_PAD_CSI_DATA01__USDHC2_DATA1 0x17069 + MX6UL_PAD_CSI_DATA02__USDHC2_DATA2 0x17069 + MX6UL_PAD_CSI_DATA03__USDHC2_DATA3 0x17069 + MX6UL_PAD_CSI_HSYNC__USDHC2_CMD 0x17069 + MX6UL_PAD_CSI_VSYNC__USDHC2_CLK 0x17069 MX6UL_PAD_GPIO1_IO03__OSC32K_32K_OUT 0x10 >; diff --git a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts index ecbb2cc5b9ab..79cc45728cd2 100644 --- a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts +++ b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts @@ -14,5 +14,6 @@ }; &gpmi { + fsl,use-minimum-ecc; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7d-mba7.dts b/arch/arm/boot/dts/imx7d-mba7.dts index 5ef86de53013..23856a8d4b8c 100644 --- a/arch/arm/boot/dts/imx7d-mba7.dts +++ b/arch/arm/boot/dts/imx7d-mba7.dts @@ -99,8 +99,6 @@ /* probe deferral not supported */ /* pcie-bus-supply = <®_mpcie_1v5>; */ reset-gpio = <&gpio5 12 GPIO_ACTIVE_LOW>; - disable-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>; - power-on-gpio = <&gpio2 30 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7d-remarkable2.dts b/arch/arm/boot/dts/imx7d-remarkable2.dts new file mode 100644 index 000000000000..8cbae656395c --- /dev/null +++ b/arch/arm/boot/dts/imx7d-remarkable2.dts @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Copyright (C) 2019 reMarkable AS - http://www.remarkable.com/ + * + */ + +/dts-v1/; + +#include "imx7d.dtsi" + +/ { + model = "reMarkable 2.0"; + compatible = "remarkable,imx7d-remarkable2", "fsl,imx7d"; + + chosen { + stdout-path = &uart6; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x40000000>; + }; +}; + +&clks { + assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>, + <&clks IMX7D_CLKO2_ROOT_DIV>; + assigned-clock-parents = <&clks IMX7D_CKIL>; + assigned-clock-rates = <0>, <32768>; +}; + +&snvs_pwrkey { + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + assigned-clocks = <&clks IMX7D_UART1_ROOT_SRC>; + assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>; + status = "okay"; +}; + +&uart6 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart6>; + assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>; + assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>; + status = "okay"; +}; + +&usbotg2 { + srp-disable; + hnp-disable; + status = "okay"; +}; + +&usdhc3 { + pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep"; + pinctrl-0 = <&pinctrl_usdhc3>; + pinctrl-1 = <&pinctrl_usdhc3_100mhz>; + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + pinctrl-3 = <&pinctrl_usdhc3>; + assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>; + assigned-clock-rates = <400000000>; + bus-width = <8>; + non-removable; + status = "okay"; +}; + +&wdog1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; +}; + +&iomuxc { + pinctrl_uart1: uart1grp { + fsl,pins = < + MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79 + MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79 + >; + }; + + pinctrl_uart6: uart6grp { + fsl,pins = < + MX7D_PAD_EPDC_DATA09__UART6_DCE_TX 0x79 + MX7D_PAD_EPDC_DATA08__UART6_DCE_RX 0x79 + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x59 + MX7D_PAD_SD3_CLK__SD3_CLK 0x19 + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59 + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59 + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59 + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59 + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59 + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59 + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59 + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59 + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19 + >; + }; + + pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x5a + MX7D_PAD_SD3_CLK__SD3_CLK 0x1a + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a + >; + }; + + pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x5b + MX7D_PAD_SD3_CLK__SD3_CLK 0x1b + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b + >; + }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX7D_PAD_ENET1_COL__WDOG1_WDOG_ANY 0x74 + >; + }; +}; diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi index 63cafd220dba..bc857676d191 100644 --- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi +++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi @@ -325,8 +325,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi1 &src3 &dvc1>; - capture = <&ssi0 &src2 &dvc0>; + playback = <&ssi1>, <&src3>, <&dvc1>; + capture = <&ssi0>, <&src2>, <&dvc0>; }; }; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 007dd2bd0595..4fce81422943 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -246,6 +246,7 @@ reg = <0x0 0x1700000 0x0 0x100000>; ranges = <0x0 0x0 0x1700000 0x100000>; interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.0-job-ring", @@ -871,7 +872,7 @@ phy_type = "ulpi"; }; - usb3: usb3@3100000 { + usb3: usb@3100000 { compatible = "snps,dwc3"; reg = <0x0 0x3100000 0x0 0x10000>; interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index 08a7d3ce383f..ea02fd403a9b 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -27,16 +27,14 @@ cpcap_battery: battery { compatible = "motorola,cpcap-battery"; - interrupts-extended = < - &cpcap 6 0 &cpcap 5 0 &cpcap 3 0 - &cpcap 20 0 &cpcap 54 0 &cpcap 57 0 - >; + interrupts-extended = + <&cpcap 6 0>, <&cpcap 5 0>, <&cpcap 3 0>, + <&cpcap 20 0>, <&cpcap 54 0>, <&cpcap 57 0>; interrupt-names = "eol", "lowbph", "lowbpl", - "chrgcurr1", "battdetb", - "cccal"; - io-channels = <&cpcap_adc 0 &cpcap_adc 1 - &cpcap_adc 5 &cpcap_adc 6>; + "chrgcurr1", "battdetb", "cccal"; + io-channels = <&cpcap_adc 0>, <&cpcap_adc 1>, + <&cpcap_adc 5>, <&cpcap_adc 6>; io-channel-names = "battdetb", "battp", "chg_isense", "batti"; power-supplies = <&cpcap_charger>; @@ -44,20 +42,19 @@ cpcap_charger: charger { compatible = "motorola,mapphone-cpcap-charger"; - interrupts-extended = < - &cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0 - &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0 - &cpcap 54 0 - >; + interrupts-extended = + <&cpcap 13 0>, <&cpcap 12 0>, <&cpcap 29 0>, + <&cpcap 28 0>, <&cpcap 22 0>, <&cpcap 21 0>, + <&cpcap 20 0>, <&cpcap 19 0>, <&cpcap 54 0>; interrupt-names = - "chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn", - "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld", - "battdetb"; - mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW - &gpio3 23 GPIO_ACTIVE_LOW>; - io-channels = <&cpcap_adc 0 &cpcap_adc 1 - &cpcap_adc 2 &cpcap_adc 5 - &cpcap_adc 6>; + "chrg_det", "rvrs_chrg", "chrg_se1b", + "se0conn", "rvrs_mode", "chrgcurr2", + "chrgcurr1", "vbusvld", "battdetb"; + mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW>, + <&gpio3 23 GPIO_ACTIVE_LOW>; + io-channels = <&cpcap_adc 0>, <&cpcap_adc 1>, + <&cpcap_adc 2>, <&cpcap_adc 5>, + <&cpcap_adc 6>; io-channel-names = "battdetb", "battp", "vbus", "chg_isense", "batti"; @@ -98,22 +95,22 @@ cpcap_usb2_phy: phy { compatible = "motorola,mapphone-cpcap-usb-phy"; - pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>; + pinctrl-0 = <&usb_gpio_mux_sel1>, <&usb_gpio_mux_sel2>; pinctrl-1 = <&usb_ulpi_pins>; pinctrl-2 = <&usb_utmi_pins>; pinctrl-3 = <&uart3_pins>; pinctrl-names = "default", "ulpi", "utmi", "uart"; #phy-cells = <0>; - interrupts-extended = < - &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 - &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 - &cpcap 48 0 - >; + interrupts-extended = + <&cpcap 15 0>, <&cpcap 14 0>, <&cpcap 28 0>, + <&cpcap 19 0>, <&cpcap 18 0>, <&cpcap 17 0>, + <&cpcap 16 0>, <&cpcap 49 0>, <&cpcap 48 0>; interrupt-names = - "id_ground", "id_float", "se0conn", "vbusvld", - "sessvld", "sessend", "se1", "dm", "dp"; - mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH - &gpio1 0 GPIO_ACTIVE_HIGH>; + "id_ground", "id_float", "se0conn", + "vbusvld", "sessvld", "sessend", + "se1", "dm", "dp"; + mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>, + <&gpio1 0 GPIO_ACTIVE_HIGH>; io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>; io-channel-names = "vbus", "id"; vusb-supply = <&vusb>; diff --git a/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts b/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts new file mode 100644 index 000000000000..a81684002e45 --- /dev/null +++ b/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021 thingy.jp. + * Author: Daniel Palmer <daniel@thingy.jp> + */ + +/dts-v1/; +#include "mstar-infinity2m-ssd202d.dtsi" + +/ { + model = "UnitV2"; + compatible = "m5stack,unitv2", "mstar,infinity2m"; + + aliases { + serial0 = &pm_uart; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&pm_uart { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/mstar-v7.dtsi b/arch/arm/boot/dts/mstar-v7.dtsi index b0a21b0b731f..075d583d6f40 100644 --- a/arch/arm/boot/dts/mstar-v7.dtsi +++ b/arch/arm/boot/dts/mstar-v7.dtsi @@ -6,6 +6,7 @@ #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/clock/mstar-msc313-mpll.h> / { #address-cells = <1>; @@ -46,6 +47,21 @@ interrupt-affinity = <&cpu0>; }; + clocks: clocks { + xtal: xtal { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <24000000>; + }; + + rtc_xtal: rtc_xtal { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <32768>; + status = "disabled"; + }; + }; + soc: soc { compatible = "simple-bus"; #address-cells = <1>; @@ -109,6 +125,13 @@ reg = <0x204400 0x200>; }; + mpll: mpll@206000 { + compatible = "mstar,msc313-mpll"; + #clock-cells = <1>; + reg = <0x206000 0x200>; + clocks = <&xtal>; + }; + gpio: gpio@207800 { #gpio-cells = <2>; reg = <0x207800 0x200>; diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index fade14284017..4776f85d6d5b 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -607,7 +607,7 @@ }; usb0: usb@1a1c0000 { - compatible = "mediatek,mt8173-xhci"; + compatible = "mediatek,mt2701-xhci", "mediatek,mtk-xhci"; reg = <0 0x1a1c0000 0 0x1000>, <0 0x1a1c4700 0 0x0100>; reg-names = "mac", "ippc"; @@ -620,8 +620,9 @@ status = "disabled"; }; - u3phy0: usb-phy@1a1c4000 { - compatible = "mediatek,mt2701-u3phy"; + u3phy0: t-phy@1a1c4000 { + compatible = "mediatek,mt2701-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a1c4000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; @@ -646,7 +647,7 @@ }; usb1: usb@1a240000 { - compatible = "mediatek,mt8173-xhci"; + compatible = "mediatek,mt2701-xhci", "mediatek,mtk-xhci"; reg = <0 0x1a240000 0 0x1000>, <0 0x1a244700 0 0x0100>; reg-names = "mac", "ippc"; @@ -659,8 +660,9 @@ status = "disabled"; }; - u3phy1: usb-phy@1a244000 { - compatible = "mediatek,mt2701-u3phy"; + u3phy1: t-phy@1a244000 { + compatible = "mediatek,mt2701-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a244000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; @@ -700,8 +702,9 @@ status = "disabled"; }; - u2phy0: usb-phy@11210000 { - compatible = "mediatek,generic-tphy-v1"; + u2phy0: t-phy@11210000 { + compatible = "mediatek,mt2701-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x11210000 0 0x0800>; #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi index f3ccb70c0779..70df00a7bb26 100644 --- a/arch/arm/boot/dts/mt6589.dtsi +++ b/arch/arm/boot/dts/mt6589.dtsi @@ -17,6 +17,7 @@ cpus { #address-cells = <1>; #size-cells = <0>; + enable-method = "mediatek,mt6589-smp"; cpu@0 { device_type = "cpu"; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index aea6809500d7..3c11f7cfcc40 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -787,8 +787,9 @@ }; }; - pcie0_phy: pcie-phy@1a149000 { - compatible = "mediatek,generic-tphy-v1"; + pcie0_phy: t-phy@1a149000 { + compatible = "mediatek,mt7623-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a149000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; @@ -804,8 +805,9 @@ }; }; - pcie1_phy: pcie-phy@1a14a000 { - compatible = "mediatek,generic-tphy-v1"; + pcie1_phy: t-phy@1a14a000 { + compatible = "mediatek,mt7623-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a14a000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; @@ -823,7 +825,7 @@ usb1: usb@1a1c0000 { compatible = "mediatek,mt7623-xhci", - "mediatek,mt8173-xhci"; + "mediatek,mtk-xhci"; reg = <0 0x1a1c0000 0 0x1000>, <0 0x1a1c4700 0 0x0100>; reg-names = "mac", "ippc"; @@ -836,9 +838,9 @@ status = "disabled"; }; - u3phy1: usb-phy@1a1c4000 { - compatible = "mediatek,mt7623-u3phy", - "mediatek,mt2701-u3phy"; + u3phy1: t-phy@1a1c4000 { + compatible = "mediatek,mt7623-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a1c4000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; @@ -864,7 +866,7 @@ usb2: usb@1a240000 { compatible = "mediatek,mt7623-xhci", - "mediatek,mt8173-xhci"; + "mediatek,mtk-xhci"; reg = <0 0x1a240000 0 0x1000>, <0 0x1a244700 0 0x0100>; reg-names = "mac", "ippc"; @@ -877,9 +879,9 @@ status = "disabled"; }; - u3phy2: usb-phy@1a244000 { - compatible = "mediatek,mt7623-u3phy", - "mediatek,mt2701-u3phy"; + u3phy2: t-phy@1a244000 { + compatible = "mediatek,mt7623-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x1a244000 0 0x0700>; #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/mt7623n.dtsi b/arch/arm/boot/dts/mt7623n.dtsi index 1880ac9e32cf..bcb0846e29fd 100644 --- a/arch/arm/boot/dts/mt7623n.dtsi +++ b/arch/arm/boot/dts/mt7623n.dtsi @@ -246,7 +246,7 @@ status = "disabled"; }; - mipi_tx0: mipi-dphy@10010000 { + mipi_tx0: dsi-phy@10010000 { compatible = "mediatek,mt7623-mipi-tx", "mediatek,mt2701-mipi-tx"; reg = <0 0x10010000 0 0x90>; @@ -265,7 +265,7 @@ status = "disabled"; }; - hdmi_phy: phy@10209100 { + hdmi_phy: hdmi-phy@10209100 { compatible = "mediatek,mt7623-hdmi-phy", "mediatek,mt2701-hdmi-phy"; reg = <0 0x10209100 0 0x24>; diff --git a/arch/arm/boot/dts/mt7629.dtsi b/arch/arm/boot/dts/mt7629.dtsi index 5cbb3d244c75..874043f0490d 100644 --- a/arch/arm/boot/dts/mt7629.dtsi +++ b/arch/arm/boot/dts/mt7629.dtsi @@ -329,8 +329,9 @@ status = "disabled"; }; - u3phy0: usb-phy@1a0c4000 { - compatible = "mediatek,generic-tphy-v2"; + u3phy0: t-phy@1a0c4000 { + compatible = "mediatek,mt7629-tphy", + "mediatek,generic-tphy-v2"; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x1a0c4000 0xe00>; @@ -413,14 +414,15 @@ }; }; - pciephy1: pcie-phy@1a14a000 { - compatible = "mediatek,generic-tphy-v2"; + pciephy1: t-phy@1a14a000 { + compatible = "mediatek,mt7629-tphy", + "mediatek,generic-tphy-v2"; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x1a14a000 0x1000>; status = "disabled"; - pcieport1: port1phy@0 { + pcieport1: pcie-phy@0 { reg = <0 0x1000>; clocks = <&clk20m>; clock-names = "ref"; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts new file mode 100644 index 000000000000..eb6eb21cb2a4 --- /dev/null +++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts @@ -0,0 +1,1135 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Quanta Computer Inc. George.Hung@quantatw.com + +/dts-v1/; +#include "nuvoton-npcm730.dtsi" +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "Quanta GBS Board (Device Tree)"; + compatible = "quanta,gbs-bmc","nuvoton,npcm730"; + + aliases { + ethernet1 = &gmac0; + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c4 = &i2c4; + i2c5 = &i2c5; + i2c6 = &i2c6; + i2c7 = &i2c7; + i2c8 = &i2c8; + i2c9 = &i2c9; + i2c10 = &i2c10; + i2c11 = &i2c11; + i2c12 = &i2c12; + i2c13 = &i2c13; + i2c14 = &i2c14; + i2c15 = &i2c15; + i2c16 = &i2c0_slotPE0_0; + i2c17 = &i2c0_slotPE1_1; + i2c18 = &i2c0_slotUSB_2; + i2c19 = &i2c0_3; + i2c20 = &i2c5_i2cool_0; + i2c21 = &i2c5_i2cool_1; + i2c22 = &i2c5_i2cool_2; + i2c23 = &i2c5_hsbp_fru_3; + i2c24 = &i2c6_u2_15_0; + i2c25 = &i2c6_u2_14_1; + i2c26 = &i2c6_u2_13_2; + i2c27 = &i2c6_u2_12_3; + i2c28 = &i2c7_u2_11_0; + i2c29 = &i2c7_u2_10_1; + i2c30 = &i2c7_u2_9_2; + i2c31 = &i2c7_u2_8_3; + i2c32 = &i2c9_vddcr_cpu; + i2c33 = &i2c9_vddcr_soc; + i2c34 = &i2c9_vddio_efgh; + i2c35 = &i2c9_vddio_abcd; + i2c36 = &i2c10_u2_7_0; + i2c37 = &i2c10_u2_6_1; + i2c38 = &i2c10_u2_5_2; + i2c39 = &i2c10_u2_4_3; + i2c40 = &i2c11_clk_buf0_0; + i2c41 = &i2c11_clk_buf1_1; + i2c42 = &i2c11_clk_buf2_2; + i2c43 = &i2c11_clk_buf3_3; + i2c44 = &i2c14_u2_3_0; + i2c45 = &i2c14_u2_2_1; + i2c46 = &i2c14_u2_1_2; + i2c47 = &i2c14_u2_0_3; + fiu0 = &fiu0; + fiu1 = &fiu3; + }; + + chosen { + stdout-path = &serial0; + }; + + memory { + reg = <0 0x40000000>; + }; + + gpio-keys { + compatible = "gpio-keys"; + sas-cable0 { + label = "sas-cable0"; + gpios = <&gpio2 9 GPIO_ACTIVE_LOW>; + linux,code = <73>; + }; + + sas-cable1 { + label = "sas-cable1"; + gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; + linux,code = <72>; + }; + + sas-cable2 { + label = "sas-cable2"; + gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; + linux,code = <71>; + }; + + sas-cable3 { + label = "sas-cable3"; + gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; + linux,code = <70>; + }; + + sata0 { + label = "sata0"; + gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; + linux,code = <5>; + }; + + hsbp-cable { + label = "hsbp-cable"; + gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; + linux,code = <57>; + }; + + fanbd-cable { + label = "fanbd-cable"; + gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>; + linux,code = <58>; + }; + + bp12v-cable { + label = "bp12v-cable"; + gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; + linux,code = <69>; + }; + + pe-slot0 { + label = "pe-slot0"; + gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; + linux,code = <120>; + }; + + pe-slot1 { + label = "pe-slot1"; + gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; + linux,code = <121>; + }; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc 1>, <&adc 2>; + }; + + iio-hwmon-battery { + compatible = "iio-hwmon"; + io-channels = <&adc 0>; + }; + + leds { + compatible = "gpio-leds"; + + heartbeat { /* gpio153 */ + gpios = <&gpio4 25 GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + + attention { /* gpio215 */ + gpios = <&gpio6 23 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + sys_boot_status { /* gpio216 */ + gpios = <&gpio6 24 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + retain-state-shutdown; + }; + + bmc_fault { /* gpio217 */ + gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>; + default-state = "off"; + linux,default-trigger = "panic"; + panic-indicator; + }; + + led_u2_0_locate { + gpios = <&pca9535_ledlocate 3 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_1_locate { + gpios = <&pca9535_ledlocate 2 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_2_locate { + gpios = <&pca9535_ledlocate 1 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_3_locate { + gpios = <&pca9535_ledlocate 0 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_4_locate { + gpios = <&pca9535_ledlocate 7 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_5_locate { + gpios = <&pca9535_ledlocate 6 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_6_locate { + gpios = <&pca9535_ledlocate 5 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_7_locate { + gpios = <&pca9535_ledlocate 4 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_8_locate { + gpios = <&pca9535_ledlocate 11 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_9_locate { + gpios = <&pca9535_ledlocate 10 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_10_locate { + gpios = <&pca9535_ledlocate 9 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_11_locate { + gpios = <&pca9535_ledlocate 8 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_12_locate { + gpios = <&pca9535_ledlocate 15 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_13_locate { + gpios = <&pca9535_ledlocate 14 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_14_locate { + gpios = <&pca9535_ledlocate 13 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_15_locate { + gpios = <&pca9535_ledlocate 12 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_0_fault { + gpios = <&pca9535_ledfault 3 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_1_fault { + gpios = <&pca9535_ledfault 2 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_2_fault { + gpios = <&pca9535_ledfault 1 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_3_fault { + gpios = <&pca9535_ledfault 0 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_4_fault { + gpios = <&pca9535_ledfault 7 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_5_fault { + gpios = <&pca9535_ledfault 6 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_6_fault { + gpios = <&pca9535_ledfault 5 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_7_fault { + gpios = <&pca9535_ledfault 4 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_8_fault { + gpios = <&pca9535_ledfault 11 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_9_fault { + gpios = <&pca9535_ledfault 10 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_10_fault { + gpios = <&pca9535_ledfault 9 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_11_fault { + gpios = <&pca9535_ledfault 8 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_12_fault { + gpios = <&pca9535_ledfault 15 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_13_fault { + gpios = <&pca9535_ledfault 14 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_14_fault { + gpios = <&pca9535_ledfault 13 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led_u2_15_fault { + gpios = <&pca9535_ledfault 12 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + }; + + seven-seg-disp { + compatible = "seven-seg-gpio-dev"; + refresh-interval-ms = /bits/ 16 <600>; + clock-gpios = <&gpio0 2 GPIO_ACTIVE_LOW>; + data-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>; + clear-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; + }; + + pcie-slot { + pcie1: pcie-slot@1 { + label = "PE0"; + }; + pcie2: pcie-slot@2 { + label = "PE1"; + }; + }; +}; + +&fiu0 { + pinctrl-names = "default"; + pinctrl-0 = <&spi0cs1_pins>; + status = "okay"; + spi-nor@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + spi-max-frequency = <20000000>; + spi-rx-bus-width = <2>; + label = "bmc"; + partitions@80000000 { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + u-boot@0 { + label = "u-boot"; + reg = <0x0000000 0xf0000>; + }; + image-descriptor@f0000 { + label = "image-descriptor"; + reg = <0xf0000 0x10000>; + }; + hoth-update@100000 { + label = "hoth-update"; + reg = <0x100000 0x100000>; + }; + kernel@200000 { + label = "kernel"; + reg = <0x200000 0x500000>; + }; + rofs@700000 { + label = "rofs"; + reg = <0x700000 0x35f0000>; + }; + rwfs@3cf0000 { + label = "rwfs"; + reg = <0x3cf0000 0x300000>; + }; + hoth-mailbox@3ff0000 { + label = "hoth-mailbox"; + reg = <0x3ff0000 0x10000>; + }; + }; + }; +}; + +&fiu3 { + pinctrl-0 = <&spi3_pins>, <&spi3cs1_pins>; + status = "okay"; + + spi-nor@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; + m25p,fast-read; + label = "pnor"; + }; + spi-nor@1 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <1>; + spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; + m25p,fast-read; + }; +}; + +&gcr { + serial_port_mux: uart-mux-controller { + compatible = "mmio-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x38 0x07>; + idle-states = <2>; /* Serial port mode 3 (takeover) */ + }; + + uart1_mode_mux: uart1-mode-mux-controller { + compatible = "mmio-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x64 0x01000000>; + idle-states = <0>; /* Set UART1 mode to normal (follow SPMOD) */ + }; +}; + +&gmac0 { + status = "okay"; + phy-mode = "rgmii-id"; + snps,eee-force-disable; +}; + +&ehci1 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&rng { + status = "okay"; +}; + +&serial0 { + status = "okay"; +}; + +&serial1 { + status = "okay"; +}; + +&serial2 { + status = "okay"; +}; + +&serial3 { + status = "okay"; +}; + +&adc { + #io-channel-cells = <1>; + status = "okay"; +}; + +&lpc_kcs { + kcs1: kcs1@0 { + status = "okay"; + }; + + kcs2: kcs2@0 { + status = "okay"; + }; + + kcs3: kcs3@0 { + status = "okay"; + }; +}; + +&spi1 { + cs-gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>; /* dummy - gpio147 */ + pinctrl-names = "default"; + pinctrl-0 = <&gpio224ol_pins &gpio227o_pins + &gpio228_pins>; + status = "okay"; + + jtag_master@0 { + compatible = "nuvoton,npcm750-jtag-master"; + spi-max-frequency = <25000000>; + reg = <0>; + status = "okay"; + + pinctrl-names = "pspi", "gpio"; + pinctrl-0 = <&pspi2_pins>; + pinctrl-1 = <&gpio224ol_pins &gpio227o_pins + &gpio228_pins>; + + tck-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + tdi-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>; + tdo-gpios = <&gpio7 4 GPIO_ACTIVE_HIGH>; + tms-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>; + }; +}; + +&i2c0 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@71 { + compatible = "nxp,pca9546"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x71>; + i2c-mux-idle-disconnect; + reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>; + + i2c0_slotPE0_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + pcie-slot = &pcie1; + }; + + i2c0_slotPE1_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + pcie-slot = &pcie2; + }; + + i2c0_slotUSB_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c0_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c1 { + clock-frequency = <100000>; + status = "okay"; + + pca9535_ifdet: pca9535-ifdet@24 { + compatible = "nxp,pca9535"; + reg = <0x24>; + gpio-controller; + #gpio-cells = <2>; + }; + + pca9535_pwren: pca9535-pwren@20 { + compatible = "nxp,pca9535"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "pwr_u2_3_en","pwr_u2_2_en", + "pwr_u2_1_en","pwr_u2_0_en", + "pwr_u2_7_en","pwr_u2_6_en", + "pwr_u2_5_en","pwr_u2_4_en", + "pwr_u2_11_en","pwr_u2_10_en", + "pwr_u2_9_en","pwr_u2_8_en", + "pwr_u2_15_en","pwr_u2_14_en", + "pwr_u2_13_en","pwr_u2_12_en"; + }; + + pca9535_pwrgd: pca9535-pwrgd@21 { + compatible = "nxp,pca9535"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + }; + + pca9535_ledlocate: pca9535-ledlocate@22 { + compatible = "nxp,pca9535"; + reg = <0x22>; + gpio-controller; + #gpio-cells = <2>; + + }; + + pca9535_ledfault: pca9535-ledfault@23 { + compatible = "nxp,pca9535"; + reg = <0x23>; + gpio-controller; + #gpio-cells = <2>; + + }; + + pca9535_pwrdisable: pca9535-pwrdisable@25 { + compatible = "nxp,pca9535"; + reg = <0x25>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "u2_3_pwr_dis","u2_2_pwr_dis", + "u2_1_pwr_dis","u2_0_pwr_dis", + "u2_7_pwr_dis","u2_6_pwr_dis", + "u2_5_pwr_dis","u2_4_pwr_dis", + "u2_11_pwr_dis","u2_10_pwr_dis", + "u2_9_pwr_dis","u2_8_pwr_dis", + "u2_15_pwr_dis","u2_14_pwr_dis", + "u2_13_pwr_dis","u2_12_pwr_dis"; + }; + + pca9535_perst: pca9535-perst@26 { + compatible = "nxp,pca9535"; + reg = <0x26>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "u2_15_perst","u2_14_perst", + "u2_13_perst","u2_12_perst", + "u2_11_perst","u2_10_perst", + "u2_9_perst","u2_8_perst", + "u2_7_perst","u2_6_perst", + "u2_5_perst","u2_4_perst", + "u2_3_perst","u2_2_perst", + "u2_1_perst","u2_0_perst"; + }; +}; + +&i2c2 { + clock-frequency = <100000>; + status = "okay"; + + sbtsi@4c { + compatible = "amd,sbtsi"; + reg = <0x4c>; + }; +}; + +&i2c5 { + clock-frequency = <100000>; + status = "okay"; + + mb_fru@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + i2c-switch@71 { + compatible = "nxp,pca9546"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x71>; + i2c-mux-idle-disconnect; + + i2c5_i2cool_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + max31725@54 { + compatible = "maxim,max31725"; + reg = <0x54>; + status = "okay"; + }; + }; + + i2c5_i2cool_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + max31725@55 { + compatible = "maxim,max31725"; + reg = <0x55>; + status = "okay"; + }; + }; + + i2c5_i2cool_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + max31725@5d { + compatible = "maxim,max31725"; + reg = <0x5d>; + status = "okay"; + }; + fan_fru@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + + i2c5_hsbp_fru_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + hsbp_fru@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + status = "okay"; + }; + }; + }; +}; + +&i2c6 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@73 { + compatible = "nxp,pca9545"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x73>; + i2c-mux-idle-disconnect; + + i2c6_u2_15_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c6_u2_14_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c6_u2_13_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c6_u2_12_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c7 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@72 { + compatible = "nxp,pca9545"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x72>; + i2c-mux-idle-disconnect; + + i2c7_u2_11_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c7_u2_10_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c7_u2_9_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c7_u2_8_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c8 { + clock-frequency = <100000>; + status = "okay"; + + i2c8_adm1272: adm1272@10 { + compatible = "adi,adm1272"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10>; + shunt-resistor-micro-ohms = <300>; + }; +}; + +&i2c9 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@71 { + compatible = "nxp,pca9546"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x71>; + i2c-mux-idle-disconnect; + reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; + + i2c9_vddcr_cpu: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + vrm@60 { + compatible = "isil,isl68137"; + reg = <0x60>; + }; + }; + + i2c9_vddcr_soc: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + vrm@61 { + compatible = "isil,isl68137"; + reg = <0x61>; + }; + }; + + i2c9_vddio_efgh: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + vrm@63 { + compatible = "isil,isl68137"; + reg = <0x63>; + }; + }; + + i2c9_vddio_abcd: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + vrm@45 { + compatible = "isil,isl68137"; + reg = <0x45>; + }; + }; + }; +}; + +&i2c10 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@71 { + compatible = "nxp,pca9545"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x71>; + i2c-mux-idle-disconnect; + + i2c10_u2_7_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c10_u2_6_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c10_u2_5_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c10_u2_4_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c11 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@76 { + compatible = "nxp,pca9545"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x76>; + i2c-mux-idle-disconnect; + + i2c11_clk_buf0_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c11_clk_buf1_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c11_clk_buf2_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c11_clk_buf3_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c12 { + clock-frequency = <100000>; + status = "okay"; + + max34451@4e { + compatible = "maxim,max34451"; + reg = <0x4e>; + }; + vrm@5d { + compatible = "isil,isl68137"; + reg = <0x5d>; + }; + vrm@5e { + compatible = "isil,isl68137"; + reg = <0x5e>; + }; +}; + +&i2c13 { + clock-frequency = <100000>; + status = "okay"; +}; + +&i2c14 { + clock-frequency = <100000>; + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9545"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x70>; + i2c-mux-idle-disconnect; + + i2c14_u2_3_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + + i2c14_u2_2_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + + i2c14_u2_1_2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + + i2c14_u2_0_3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&pwm_fan { + pinctrl-names = "default"; + pinctrl-0 = < + &pwm0_pins &pwm1_pins + &pwm2_pins &pwm3_pins + &pwm4_pins + &fanin0_pins &fanin1_pins + &fanin2_pins &fanin3_pins + &fanin4_pins + >; + status = "okay"; + + fan@0 { + reg = <0x00>; + fan-tach-ch = /bits/ 8 <0x00>; + }; + fan@1 { + reg = <0x01>; + fan-tach-ch = /bits/ 8 <0x01>; + }; + fan@2 { + reg = <0x02>; + fan-tach-ch = /bits/ 8 <0x02>; + }; + fan@3 { + reg = <0x04>; + fan-tach-ch = /bits/ 8 <0x04>; + }; + fan@4 { + reg = <0x03>; + fan-tach-ch = /bits/ 8 <0x03>; + }; +}; + +&pinctrl { + pinctrl-names = "default"; + + gpio0: gpio@f0010000 { + /* POWER_OUT=gpio07, RESET_OUT=gpio06, PS_PWROK=gpio13 */ + gpio-line-names = + /*0-31*/ + "","","","","","","RESET_OUT","POWER_OUT", + "","","","","","PS_PWROK","","", + "","","","","","","","", + "","","","","","","",""; + }; + gpio1: gpio@f0011000 { + /* SIO_POWER_GOOD=gpio59 */ + gpio-line-names = + /*32-63*/ + "","","","","","","","", + "","","","","","","","", + "","","","","","","","", + "","","","SIO_POWER_GOOD","","","",""; + }; + gpio2: gpio@f0012000 { + bmc_usb_mux_oe_n { + gpio-hog; + gpios = <25 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "bmc-usb-mux-oe-n"; + }; + bmc_usb_mux_sel { + gpio-hog; + gpios = <26 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "bmc-usb-mux-sel"; + }; + bmc_usb2517_reset_n { + gpio-hog; + gpios = <27 GPIO_ACTIVE_LOW>; + output-low; + line-name = "bmc-usb2517-reset-n"; + }; + }; + gpio3: gpio@f0013000 { + assert_cpu0_reset { + gpio-hog; + gpios = <14 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "assert-cpu0-reset"; + }; + assert_pwrok_cpu0_n { + gpio-hog; + gpios = <15 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "assert-pwrok-cpu0-n"; + }; + assert_cpu0_prochot { + gpio-hog; + gpios = <16 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "assert-cpu0-prochot"; + }; + }; + gpio4: gpio@f0014000 { + /* POST_COMPLETE=gpio143 */ + gpio-line-names = + /*128-159*/ + "","","","","","","","", + "","","","","","","","POST_COMPLETE", + "","","","","","","","", + "","","","","","","",""; + }; + gpio5: gpio@f0015000 { + /* POWER_BUTTON=gpio177 */ + gpio-line-names = + /*160-191*/ + "","","","","","","","", + "","","","","","","","", + "","POWER_BUTTON","","","","","","", + "","","","","","","",""; + }; + gpio6: gpio@f0016000 { + /* SIO_S5=gpio199, RESET_BUTTON=gpio203 */ + gpio-line-names = + /*192-223*/ + "","","","","","","","SIO_S5", + "","","","RESET_BUTTON","","","","", + "","","","","","","","", + "","","","","","","",""; + }; + + gpio224ol_pins: gpio224ol-pins { + pins = "GPIO224/SPIXCK"; + bias-disable; + output-low; + }; + gpio227o_pins: gpio227o-pins { + pins = "GPIO227/nSPIXCS0"; + bias-disable; + output-high; + }; + gpio228_pins: gpio228-pins { + pins = "GPIO228/nSPIXCS1"; + bias-disable; + input-enable; + }; +}; diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts index 9f13d08f5804..dea3dbc4a6a5 100644 --- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts +++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts @@ -9,7 +9,7 @@ / { model = "Nuvoton npcm750 Development Board (Device Tree)"; - compatible = "nuvoton,npcm750"; + compatible = "nuvoton,npcm750-evb", "nuvoton,npcm750"; aliases { ethernet2 = &gmac0; diff --git a/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts b/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts new file mode 100644 index 000000000000..83f27fbf4e93 --- /dev/null +++ b/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +// Copyright 2021 Jonathan Neuschäfer + +/dts-v1/; + +/* The last 16 MiB are dedicated to the GPU */ +/memreserve/ 0x07000000 0x01000000; + +#include "nuvoton-wpcm450.dtsi" + +/ { + model = "Supermicro X9SCi-LN4F BMC"; + compatible = "supermicro,x9sci-ln4f-bmc", "nuvoton,wpcm450"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + device_type = "memory"; + reg = <0 0x08000000>; /* 128 MiB */ + }; +}; + +&serial0 { + /* + * Debug serial port. TX is exposed on the right pad of unpopulated + * resistor R1247, RX on the right pad of R1162. + */ + status = "okay"; +}; + +&serial1 { + /* "Serial over LAN" port. Connected to ttyS2 of the host system. */ + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi new file mode 100644 index 000000000000..d7cbeb187484 --- /dev/null +++ b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +// Copyright 2021 Jonathan Neuschäfer + +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + compatible = "nuvoton,wpcm450"; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "arm,arm926ej-s"; + device_type = "cpu"; + reg = <0>; + }; + }; + + clk24m: clock-24mhz { + /* 24 MHz dummy clock */ + compatible = "fixed-clock"; + clock-frequency = <24000000>; + #clock-cells = <0>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&aic>; + ranges; + + serial0: serial@b8000000 { + compatible = "nuvoton,wpcm450-uart"; + reg = <0xb8000000 0x20>; + reg-shift = <2>; + interrupts = <7 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk24m>; + status = "disabled"; + }; + + serial1: serial@b8000100 { + compatible = "nuvoton,wpcm450-uart"; + reg = <0xb8000100 0x20>; + reg-shift = <2>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk24m>; + status = "disabled"; + }; + + timer0: timer@b8001000 { + compatible = "nuvoton,wpcm450-timer"; + interrupts = <12 IRQ_TYPE_LEVEL_HIGH>; + reg = <0xb8001000 0x1c>; + clocks = <&clk24m>; + }; + + watchdog0: watchdog@b800101c { + compatible = "nuvoton,wpcm450-wdt"; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + reg = <0xb800101c 0x4>; + clocks = <&clk24m>; + status = "disabled"; + }; + + aic: interrupt-controller@b8002000 { + compatible = "nuvoton,wpcm450-aic"; + reg = <0xb8002000 0x1000>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm/boot/dts/omap3-echo.dts b/arch/arm/boot/dts/omap3-echo.dts index b9fd113979f2..8f02ff5e7da6 100644 --- a/arch/arm/boot/dts/omap3-echo.dts +++ b/arch/arm/boot/dts/omap3-echo.dts @@ -7,6 +7,7 @@ #include "dm3725.dtsi" #include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> / { model = "Amazon Echo (first generation)"; @@ -139,179 +140,367 @@ clock-frequency = <400000>; lp5523A: lp5523A@32 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; label = "q1"; reg = <0x32>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ enable-gpio = <&gpio4 13 GPIO_ACTIVE_HIGH>; /* GPIO_109 */ - chan0 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan1 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan2 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan3 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan4 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan5 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan6 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan7 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan8 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; + multi-led@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + color = <LED_COLOR_ID_RGB>; + + led@0 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x0>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@1 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x1>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@6 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x6>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + color = <LED_COLOR_ID_RGB>; + + led@2 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x2>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@3 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x3>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@7 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x7>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + color = <LED_COLOR_ID_RGB>; + + led@4 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x4>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@5 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x5>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@8 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x8>; + color = <LED_COLOR_ID_RED>; + }; }; }; lp5523B: lp5523B@33 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; label = "q3"; reg = <0x33>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ - chan0 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan1 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan2 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan3 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan4 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan5 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan6 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan7 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan8 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; + multi-led@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + color = <LED_COLOR_ID_RGB>; + + led@0 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x0>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@1 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x1>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@6 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x6>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + color = <LED_COLOR_ID_RGB>; + + led@2 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x2>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@3 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x3>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@7 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x7>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + color = <LED_COLOR_ID_RGB>; + + led@4 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x4>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@5 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x5>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@8 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x8>; + color = <LED_COLOR_ID_RED>; + }; }; }; lp5523C: lp5523C@34 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; label = "q4"; reg = <0x34>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ - chan0 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan1 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan2 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan3 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan4 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan5 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan6 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan7 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan8 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; + multi-led@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + color = <LED_COLOR_ID_RGB>; + + led@0 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x0>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@1 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x1>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@6 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x6>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + color = <LED_COLOR_ID_RGB>; + + led@2 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x2>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@3 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x3>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@7 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x7>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + color = <LED_COLOR_ID_RGB>; + + led@4 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x4>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@5 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x5>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@8 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x8>; + color = <LED_COLOR_ID_RED>; + }; }; }; lp5523D: lp552D@35 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; label = "q2"; reg = <0x35>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ - chan0 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan1 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan2 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan3 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan4 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan5 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan6 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan7 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; - }; - chan8 { - led-cur = /bits/ 8 <12>; - max-cur = /bits/ 8 <15>; + multi-led@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + color = <LED_COLOR_ID_RGB>; + + led@0 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x0>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@1 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x1>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@6 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x6>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + color = <LED_COLOR_ID_RGB>; + + led@2 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x2>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@3 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x3>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@7 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x7>; + color = <LED_COLOR_ID_RED>; + }; + }; + multi-led@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + color = <LED_COLOR_ID_RGB>; + + led@4 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x4>; + color = <LED_COLOR_ID_GREEN>; + }; + + led@5 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x5>; + color = <LED_COLOR_ID_BLUE>; + }; + + led@8 { + led-cur = /bits/ 8 <12>; + max-cur = /bits/ 8 <15>; + reg = <0x8>; + color = <LED_COLOR_ID_RED>; + }; }; }; }; @@ -417,6 +606,8 @@ }; &mmc3 { + #address-cells = <1>; + #size-cells = <0>; status = "okay"; bus-width = <4>; pinctrl-names = "default"; @@ -426,6 +617,11 @@ mmc-pwrseq = <&sdio_pwrseq>; vmmc-supply = <&vcc3v3>; vqmmc-supply = <&vcc1v8>; + atheros@0 { + compatible = "atheros,ath6kl"; + reg = <0>; + bus-width = <4>; + }; }; &tps { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 9dcae1f2bc99..c5b9da0d7e6c 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -24,6 +24,9 @@ i2c0 = &i2c1; i2c1 = &i2c2; i2c2 = &i2c3; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi index e0bb60a30779..99721673d7af 100644 --- a/arch/arm/boot/dts/omap4-l4.dtsi +++ b/arch/arm/boot/dts/omap4-l4.dtsi @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 &l4_cfg { /* 0x4a000000 */ - compatible = "ti,omap4-l4-cfg", "simple-bus"; + compatible = "ti,omap4-l4-cfg", "simple-pm-bus"; + power-domains = <&prm_core>; + clocks = <&l4_cfg_clkctrl OMAP4_L4_CFG_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4a000000 0x800>, <0x4a000800 0x800>, <0x4a001000 0x1000>; @@ -16,7 +19,7 @@ <0x00300000 0x4a300000 0x080000>; /* segment 6 */ segment@0 { /* 0x4a000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -43,7 +46,6 @@ target-module@2000 { /* 0x4a002000, ap 3 06.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "ctrl_module_core"; reg = <0x2000 0x4>, <0x2010 0x4>; reg-names = "rev", "sysc"; @@ -347,7 +349,7 @@ }; segment@80000 { /* 0x4a080000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00059000 0x000d9000 0x001000>, /* ap 13 */ @@ -639,7 +641,7 @@ }; segment@100000 { /* 0x4a100000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00100000 0x001000>, /* ap 21 */ @@ -653,7 +655,6 @@ target-module@0 { /* 0x4a100000, ap 21 2a.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "ctrl_module_pad_core"; reg = <0x0 0x4>, <0x10 0x4>; reg-names = "rev", "sysc"; @@ -741,13 +742,13 @@ }; segment@180000 { /* 0x4a180000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; segment@200000 { /* 0x4a200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0001e000 0x0021e000 0x001000>, /* ap 31 */ @@ -903,13 +904,13 @@ }; segment@280000 { /* 0x4a280000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; l4_cfg_segment_300000: segment@300000 { /* 0x4a300000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00300000 0x020000>, /* ap 67 */ @@ -944,7 +945,10 @@ }; &l4_wkup { /* 0x4a300000 */ - compatible = "ti,omap4-l4-wkup", "simple-bus"; + compatible = "ti,omap4-l4-wkup", "simple-pm-bus"; + power-domains = <&prm_wkup>; + clocks = <&l4_wkup_clkctrl OMAP4_L4_WKUP_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4a300000 0x800>, <0x4a300800 0x800>, <0x4a301000 0x1000>; @@ -956,7 +960,7 @@ <0x00020000 0x4a320000 0x010000>; /* segment 2 */ segment@0 { /* 0x4a300000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -1041,7 +1045,6 @@ target-module@c000 { /* 0x4a30c000, ap 19 2c.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "ctrl_module_wkup"; reg = <0xc000 0x4>, <0xc010 0x4>; reg-names = "rev", "sysc"; @@ -1062,7 +1065,7 @@ }; segment@10000 { /* 0x4a310000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */ @@ -1202,7 +1205,6 @@ target-module@e000 { /* 0x4a31e000, ap 21 30.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "ctrl_module_pad_wkup"; reg = <0xe000 0x4>, <0xe010 0x4>; reg-names = "rev", "sysc"; @@ -1231,7 +1233,7 @@ }; segment@20000 { /* 0x4a320000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */ @@ -1284,7 +1286,10 @@ }; &l4_per { /* 0x48000000 */ - compatible = "ti,omap4-l4-per", "simple-bus"; + compatible = "ti,omap4-l4-per", "simple-pm-bus"; + power-domains = <&prm_l4per>; + clocks = <&l4_per_clkctrl OMAP4_L4_PER_CLKCTRL 0>; + clock-names = "fck"; reg = <0x48000000 0x800>, <0x48000800 0x800>, <0x48001000 0x400>, @@ -1298,7 +1303,7 @@ <0x00200000 0x48200000 0x200000>; /* segment 1 */ segment@0 { /* 0x48000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -2437,7 +2442,7 @@ }; segment@200000 { /* 0x48200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00150000 0x00350000 0x001000>, /* ap 77 */ diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 72e4f6481776..2bbff9032be3 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -22,6 +22,11 @@ i2c1 = &i2c2; i2c2 = &i2c3; i2c3 = &i2c4; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; + mmc3 = &mmc4; + mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; @@ -54,14 +59,12 @@ }; /* - * Note that 4430 needs cross trigger interface (CTI) supported - * before we can configure the interrupts. This means sampling - * events are not supported for pmu. Note that 4460 does not use - * CTI, see also 4460.dtsi. + * Needed early by omap4_sram_init() for barrier, do not move to l3 + * interconnect as simple-pm-bus probes at module_init() time. */ - pmu { - compatible = "arm,cortex-a9-pmu"; - ti,hwmods = "debugss"; + ocmcram: sram@40304000 { + compatible = "mmio-sram"; + reg = <0x40304000 0xa000>; /* 40k */ }; gic: interrupt-controller@48241000 { @@ -97,19 +100,6 @@ }; /* - * The soc node represents the soc top level view. It is used for IPs - * that are not memory mapped in the MPU view or for the MPU itself. - */ - soc { - compatible = "ti,omap-infra"; - mpu { - compatible = "ti,omap4-mpu"; - ti,hwmods = "mpu"; - sram = <&ocmcram>; - }; - }; - - /* * XXX: Use a flat representation of the OMAP4 interconnect. * The real OMAP interconnect network is quite complex. * Since it will not bring real advantage to represent that in DT for @@ -117,16 +107,23 @@ * hierarchy. */ ocp { - compatible = "ti,omap4-l3-noc", "simple-bus"; + compatible = "simple-pm-bus"; + power-domains = <&prm_l4per>; + clocks = <&l3_1_clkctrl OMAP4_L3_MAIN_1_CLKCTRL 0>, + <&l3_2_clkctrl OMAP4_L3_MAIN_2_CLKCTRL 0>, + <&l3_instr_clkctrl OMAP4_L3_MAIN_3_CLKCTRL 0>; #address-cells = <1>; #size-cells = <1>; ranges; - ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3"; - reg = <0x44000000 0x1000>, - <0x44800000 0x2000>, - <0x45000000 0x1000>; - interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + + l3-noc@44000000 { + compatible = "ti,omap4-l3-noc"; + reg = <0x44000000 0x1000>, + <0x44800000 0x2000>, + <0x45000000 0x1000>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + }; l4_wkup: interconnect@4a300000 { }; @@ -137,12 +134,22 @@ l4_per: interconnect@48000000 { }; - l4_abe: interconnect@40100000 { + target-module@48210000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + power-domains = <&prm_mpu>; + clocks = <&mpuss_clkctrl OMAP4_MPU_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x48210000 0x1f0000>; + + mpu { + compatible = "ti,omap4-mpu"; + sram = <&ocmcram>; + }; }; - ocmcram: sram@40304000 { - compatible = "mmio-sram"; - reg = <0x40304000 0xa000>; /* 40k */ + l4_abe: interconnect@40100000 { }; target-module@50000000 { @@ -184,7 +191,6 @@ target-module@52000000 { compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "iss"; reg = <0x52000000 0x4>, <0x52000010 0x4>; reg-names = "rev", "sysc"; @@ -198,6 +204,7 @@ <SYSC_IDLE_SMART>, <SYSC_IDLE_SMART_WKUP>; ti,sysc-delay-us = <2>; + power-domains = <&prm_cam>; clocks = <&iss_clkctrl OMAP4_ISS_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; @@ -207,6 +214,26 @@ /* No child device binding, driver in staging */ }; + /* + * Note that 4430 needs cross trigger interface (CTI) supported + * before we can configure the interrupts. This means sampling + * events are not supported for pmu. Note that 4460 does not use + * CTI, see also 4460.dtsi. + */ + target-module@54000000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + power-domains = <&prm_emu>; + clocks = <&emu_sys_clkctrl OMAP4_DEBUGSS_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x54000000 0x1000000>; + + pmu: pmu { + compatible = "arm,cortex-a9-pmu"; + }; + }; + target-module@55082000 { compatible = "ti,sysc-omap2", "ti,sysc"; reg = <0x55082000 0x4>, @@ -256,35 +283,67 @@ /* No child device binding or driver in mainline */ }; - dmm@4e000000 { - compatible = "ti,omap4-dmm"; - reg = <0x4e000000 0x800>; - interrupts = <0 113 0x4>; - ti,hwmods = "dmm"; + target-module@4e000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x4e000000 0x4>, + <0x4e000010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ranges = <0x0 0x4e000000 0x2000000>; + #size-cells = <1>; + #address-cells = <1>; + + dmm@0 { + compatible = "ti,omap4-dmm"; + reg = <0 0x800>; + interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; + }; }; - emif1: emif@4c000000 { - compatible = "ti,emif-4d"; - reg = <0x4c000000 0x100>; - interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; - ti,hwmods = "emif1"; - ti,no-idle-on-init; - phy-type = <1>; - hw-caps-read-idle-ctrl; - hw-caps-ll-interface; - hw-caps-temp-alert; + target-module@4c000000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + reg = <0x4c000000 0x4>; + reg-names = "rev"; + clocks = <&l3_emif_clkctrl OMAP4_EMIF1_CLKCTRL 0>; + clock-names = "fck"; + ti,no-idle; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x4c000000 0x1000000>; + + emif1: emif@0 { + compatible = "ti,emif-4d"; + reg = <0 0x100>; + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; + phy-type = <1>; + hw-caps-read-idle-ctrl; + hw-caps-ll-interface; + hw-caps-temp-alert; + }; }; - emif2: emif@4d000000 { - compatible = "ti,emif-4d"; - reg = <0x4d000000 0x100>; - interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; - ti,hwmods = "emif2"; - ti,no-idle-on-init; - phy-type = <1>; - hw-caps-read-idle-ctrl; - hw-caps-ll-interface; - hw-caps-temp-alert; + target-module@4d000000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + reg = <0x4d000000 0x4>; + reg-names = "rev"; + clocks = <&l3_emif_clkctrl OMAP4_EMIF2_CLKCTRL 0>; + clock-names = "fck"; + ti,no-idle; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x4d000000 0x1000000>; + + emif2: emif@0 { + compatible = "ti,emif-4d"; + reg = <0 0x100>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + phy-type = <1>; + hw-caps-read-idle-ctrl; + hw-caps-ll-interface; + hw-caps-temp-alert; + }; }; dsp: dsp { @@ -435,6 +494,7 @@ <SYSC_IDLE_NO>, <SYSC_IDLE_SMART>, <SYSC_IDLE_SMART_WKUP>; + power-domains = <&prm_gfx>; clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi index 2d3e54901b6e..3d6db1db94e0 100644 --- a/arch/arm/boot/dts/omap4460.dtsi +++ b/arch/arm/boot/dts/omap4460.dtsi @@ -26,13 +26,6 @@ }; }; - pmu { - compatible = "arm,cortex-a9-pmu"; - interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; - ti,hwmods = "debugss"; - }; - thermal-zones { #include "omap4-cpu-thermal.dtsi" }; @@ -128,4 +121,10 @@ <0x00030000 0x00030000 0x00010000>; }; +&pmu { + compatible = "arm,cortex-a9-pmu"; + interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; +}; + /include/ "omap446x-clocks.dtsi" diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi index 532868591107..1f1c04d8f472 100644 --- a/arch/arm/boot/dts/omap44xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi @@ -770,14 +770,6 @@ ti,max-div = <2>; }; - sha2md5_fck: sha2md5_fck@15c8 { - #clock-cells = <0>; - compatible = "ti,gate-clock"; - clocks = <&l3_div_ck>; - ti,bit-shift = <1>; - reg = <0x15c8>; - }; - usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 { #clock-cells = <0>; compatible = "ti,gate-clock"; diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 887b3359dd5a..b148b289e830 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi @@ -1,5 +1,8 @@ &l4_cfg { /* 0x4a000000 */ - compatible = "ti,omap5-l4-cfg", "simple-bus"; + compatible = "ti,omap5-l4-cfg", "simple-pm-bus"; + power-domains = <&prm_core>; + clocks = <&l4cfg_clkctrl OMAP5_L4_CFG_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4a000000 0x800>, <0x4a000800 0x800>, <0x4a001000 0x1000>; @@ -15,7 +18,7 @@ <0x00300000 0x4a300000 0x080000>; /* segment 6 */ segment@0 { /* 0x4a000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -391,7 +394,7 @@ }; segment@80000 { /* 0x4a080000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00059000 0x000d9000 0x001000>, /* ap 13 */ @@ -654,7 +657,7 @@ }; segment@100000 { /* 0x4a100000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00002000 0x00102000 0x001000>, /* ap 59 */ @@ -691,22 +694,44 @@ }; target-module@40000 { /* 0x4a140000, ap 101 16.0 */ - compatible = "ti,sysc"; - status = "disabled"; - #address-cells = <1>; + compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0x400fc 4>, + <0x41100 4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>, + <SYSC_IDLE_SMART_WKUP>; + power-domains = <&prm_l3init>; + clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 0>; + clock-names = "fck"; #size-cells = <1>; + #address-cells = <1>; ranges = <0x0 0x40000 0x10000>; + + sata: sata@0 { + compatible = "snps,dwc-ahci"; + reg = <0 0x1100>, <0x1100 0x8>; + interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; + phys = <&sata_phy>; + phy-names = "sata-phy"; + clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 8>; + ports-implemented = <0x1>; + }; }; }; segment@180000 { /* 0x4a180000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; segment@200000 { /* 0x4a200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0001e000 0x0021e000 0x001000>, /* ap 29 */ @@ -912,20 +937,23 @@ }; segment@280000 { /* 0x4a280000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; segment@300000 { /* 0x4a300000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; }; &l4_per { /* 0x48000000 */ - compatible = "ti,omap5-l4-per", "simple-bus"; + compatible = "ti,omap5-l4-per", "simple-pm-bus"; + power-domains = <&prm_core>; + clocks = <&l4per_clkctrl OMAP5_L4_PER_CLKCTRL 0>; + clock-names = "fck"; reg = <0x48000000 0x800>, <0x48000800 0x800>, <0x48001000 0x400>, @@ -939,7 +967,7 @@ <0x00200000 0x48200000 0x200000>; /* segment 1 */ segment@0 { /* 0x48000000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -2148,14 +2176,17 @@ }; segment@200000 { /* 0x48200000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; }; }; &l4_wkup { /* 0x4ae00000 */ - compatible = "ti,omap5-l4-wkup", "simple-bus"; + compatible = "ti,omap5-l4-wkup", "simple-pm-bus"; + power-domains = <&prm_wkupaon>; + clocks = <&wkupaon_clkctrl OMAP5_L4_WKUP_CLKCTRL 0>; + clock-names = "fck"; reg = <0x4ae00000 0x800>, <0x4ae00800 0x800>, <0x4ae01000 0x1000>; @@ -2167,7 +2198,7 @@ <0x00020000 0x4ae20000 0x010000>; /* segment 2 */ segment@0 { /* 0x4ae00000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */ @@ -2296,7 +2327,7 @@ }; segment@10000 { /* 0x4ae10000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */ @@ -2423,7 +2454,7 @@ }; segment@20000 { /* 0x4ae20000 */ - compatible = "simple-bus"; + compatible = "simple-pm-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */ diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index e025b7c9a357..bac6fa838793 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -25,6 +25,11 @@ i2c2 = &i2c3; i2c3 = &i2c4; i2c4 = &i2c5; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; + mmc3 = &mmc4; + mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; @@ -101,6 +106,15 @@ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>; }; + /* + * Needed early by omap4_sram_init() for barrier, do not move to l3 + * interconnect as simple-pm-bus probes at module_init() time. + */ + ocmcram: sram@40300000 { + compatible = "mmio-sram"; + reg = <0 0x40300000 0 0x20000>; /* 128k */ + }; + gic: interrupt-controller@48211000 { compatible = "arm,cortex-a15-gic"; interrupt-controller; @@ -121,19 +135,6 @@ }; /* - * The soc node represents the soc top level view. It is used for IPs - * that are not memory mapped in the MPU view or for the MPU itself. - */ - soc { - compatible = "ti,omap-infra"; - mpu { - compatible = "ti,omap4-mpu"; - ti,hwmods = "mpu"; - sram = <&ocmcram>; - }; - }; - - /* * XXX: Use a flat representation of the OMAP3 interconnect. * The real OMAP interconnect network is quite complex. * Since it will not bring real advantage to represent that in DT for @@ -141,17 +142,24 @@ * hierarchy. */ ocp { - compatible = "ti,omap5-l3-noc", "simple-bus"; + compatible = "simple-pm-bus"; + power-domains = <&prm_core>; + clocks = <&l3main1_clkctrl OMAP5_L3_MAIN_1_CLKCTRL 0>, + <&l3main2_clkctrl OMAP5_L3_MAIN_2_CLKCTRL 0>, + <&l3instr_clkctrl OMAP5_L3_MAIN_3_CLKCTRL 0>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xc0000000>; dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; - ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3"; - reg = <0 0x44000000 0 0x2000>, - <0 0x44800000 0 0x3000>, - <0 0x45000000 0 0x4000>; - interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + + l3-noc@44000000 { + compatible = "ti,omap5-l3-noc"; + reg = <0x44000000 0x2000>, + <0x44800000 0x3000>, + <0x45000000 0x4000>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + }; l4_wkup: interconnect@4ae00000 { }; @@ -162,31 +170,58 @@ l4_per: interconnect@48000000 { }; - l4_abe: interconnect@40100000 { + target-module@48210000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + power-domains = <&prm_mpu>; + clocks = <&mpu_clkctrl OMAP5_MPU_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x48210000 0x1f0000>; + + mpu { + compatible = "ti,omap4-mpu"; + sram = <&ocmcram>; + }; }; - ocmcram: sram@40300000 { - compatible = "mmio-sram"; - reg = <0x40300000 0x20000>; /* 128k */ + l4_abe: interconnect@40100000 { }; - gpmc: gpmc@50000000 { - compatible = "ti,omap4430-gpmc"; - reg = <0x50000000 0x1000>; - #address-cells = <2>; - #size-cells = <1>; - interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; - dmas = <&sdma 4>; - dma-names = "rxtx"; - gpmc,num-cs = <8>; - gpmc,num-waitpins = <4>; - ti,hwmods = "gpmc"; - clocks = <&l3_iclk_div>; + target-module@50000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x50000000 4>, + <0x50000010 4>, + <0x50000014 4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,syss-mask = <1>; + ti,no-idle-on-init; + clocks = <&l3main2_clkctrl OMAP5_L3_MAIN_2_GPMC_CLKCTRL 0>; clock-names = "fck"; - interrupt-controller; - #interrupt-cells = <2>; - gpio-controller; - #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x50000000 0x50000000 0x00001000>, /* regs */ + <0x00000000 0x00000000 0x40000000>; /* data */ + + gpmc: gpmc@50000000 { + compatible = "ti,omap4430-gpmc"; + reg = <0x50000000 0x1000>; + #address-cells = <2>; + #size-cells = <1>; + interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&sdma 4>; + dma-names = "rxtx"; + gpmc,num-cs = <8>; + gpmc,num-waitpins = <4>; + clock-names = "fck"; + interrupt-controller; + #interrupt-cells = <2>; + gpio-controller; + #gpio-cells = <2>; + }; }; target-module@55082000 { @@ -241,35 +276,67 @@ status = "disabled"; }; - dmm@4e000000 { - compatible = "ti,omap5-dmm"; - reg = <0x4e000000 0x800>; - interrupts = <0 113 0x4>; - ti,hwmods = "dmm"; + target-module@4e000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x4e000000 0x4>, + <0x4e000010 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ranges = <0x0 0x4e000000 0x2000000>; + #size-cells = <1>; + #address-cells = <1>; + + dmm@0 { + compatible = "ti,omap5-dmm"; + reg = <0 0x800>; + interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; + }; }; - emif1: emif@4c000000 { - compatible = "ti,emif-4d5"; - ti,hwmods = "emif1"; - ti,no-idle-on-init; - phy-type = <2>; /* DDR PHY type: Intelli PHY */ - reg = <0x4c000000 0x400>; - interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; - hw-caps-read-idle-ctrl; - hw-caps-ll-interface; - hw-caps-temp-alert; + target-module@4c000000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + reg = <0x4c000000 0x4>; + reg-names = "rev"; + clocks = <&emif_clkctrl OMAP5_EMIF1_CLKCTRL 0>; + clock-names = "fck"; + ti,no-idle; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x4c000000 0x1000000>; + + emif1: emif@0 { + compatible = "ti,emif-4d5"; + reg = <0 0x400>; + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; + phy-type = <2>; /* DDR PHY type: Intelli PHY */ + hw-caps-read-idle-ctrl; + hw-caps-ll-interface; + hw-caps-temp-alert; + }; }; - emif2: emif@4d000000 { - compatible = "ti,emif-4d5"; - ti,hwmods = "emif2"; - ti,no-idle-on-init; - phy-type = <2>; /* DDR PHY type: Intelli PHY */ - reg = <0x4d000000 0x400>; - interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; - hw-caps-read-idle-ctrl; - hw-caps-ll-interface; - hw-caps-temp-alert; + target-module@4d000000 { + compatible = "ti,sysc-omap4-simple", "ti,sysc"; + reg = <0x4d000000 0x4>; + reg-names = "rev"; + clocks = <&emif_clkctrl OMAP5_EMIF2_CLKCTRL 0>; + clock-names = "fck"; + ti,no-idle; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x4d000000 0x1000000>; + + emif2: emif@0 { + compatible = "ti,emif-4d5"; + reg = <0 0x400>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + phy-type = <2>; /* DDR PHY type: Intelli PHY */ + hw-caps-read-idle-ctrl; + hw-caps-ll-interface; + hw-caps-temp-alert; + }; }; aes1_target: target-module@4b501000 { @@ -369,18 +436,6 @@ #thermal-sensor-cells = <1>; }; - /* OCP2SCP3 */ - sata: sata@4a141100 { - compatible = "snps,dwc-ahci"; - reg = <0x4a140000 0x1100>, <0x4a141100 0x7>; - interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; - phys = <&sata_phy>; - phy-names = "sata-phy"; - clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 8>; - ti,hwmods = "sata"; - ports-implemented = <0x1>; - }; - target-module@56000000 { compatible = "ti,sysc-omap4", "ti,sysc"; reg = <0x5600fe00 0x4>, diff --git a/arch/arm/boot/dts/owl-s500-roseapplepi.dts b/arch/arm/boot/dts/owl-s500-roseapplepi.dts index ff91561ca99c..b8c5db2344aa 100644 --- a/arch/arm/boot/dts/owl-s500-roseapplepi.dts +++ b/arch/arm/boot/dts/owl-s500-roseapplepi.dts @@ -2,7 +2,7 @@ /* * Roseapple Pi * - * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + * Copyright (C) 2020-2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> */ /dts-v1/; @@ -27,20 +27,140 @@ reg = <0x0 0x80000000>; /* 2GB */ }; - /* Fixed regulator used in the absence of PMIC */ - sd_vcc: sd-vcc { + syspwr: regulator-5v0 { compatible = "regulator-fixed"; - regulator-name = "fixed-3.1V"; - regulator-min-microvolt = <3100000>; - regulator-max-microvolt = <3100000>; + regulator-name = "SYSPWR"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; regulator-always-on; }; }; +&cpu0 { + cpu0-supply = <&vdd_cpu>; +}; + &i2c0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins>; + + atc260x: pmic@65 { + compatible = "actions,atc2603c"; + reg = <0x65>; + interrupt-parent = <&sirq>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + + reset-time-sec = <6>; + + regulators { + compatible = "actions,atc2603c-regulator"; + + dcdc1-supply = <&syspwr>; + dcdc2-supply = <&syspwr>; + dcdc3-supply = <&syspwr>; + ldo1-supply = <&syspwr>; + ldo2-supply = <&syspwr>; + ldo3-supply = <&syspwr>; + ldo5-supply = <&syspwr>; + ldo6-supply = <&syspwr>; + ldo7-supply = <&syspwr>; + ldo8-supply = <&syspwr>; + ldo11-supply = <&syspwr>; + ldo12-supply = <&syspwr>; + switchldo1-supply = <&vcc>; + + vdd_cpu: dcdc1 { + regulator-name = "VDD_CPU"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1400000>; + regulator-always-on; + }; + + vddq: dcdc2 { + regulator-name = "VDDQ"; + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <2150000>; + regulator-always-on; + regulator-boot-on; + }; + + vcc: dcdc3 { + regulator-name = "VCC"; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vcc_3v3: ldo1 { + regulator-name = "VCC_3V3"; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + avcc: ldo2 { + regulator-name = "AVCC"; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vdd_1v8: ldo3 { + regulator-name = "VDD_1V8"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2000000>; + regulator-always-on; + }; + + vcc_3v1: ldo5 { + regulator-name = "VCC_3V1"; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <3300000>; + }; + + avdd: ldo6 { + regulator-name = "AVDD"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1400000>; + regulator-always-on; + }; + + sens_1v8: ldo7 { + regulator-name = "SENS_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + ldo8: ldo8 { + regulator-name = "LDO8"; + regulator-min-microvolt = <2300000>; + regulator-max-microvolt = <3300000>; + }; + + svcc: ldo11 { + regulator-name = "SVCC"; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + rtc_vdd: ldo12 { + regulator-name = "RTC_VDD"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + sd_vcc: switchldo1 { + regulator-name = "SD_VCC"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + }; + }; }; &i2c1 { diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 7bf1da916f25..ff1bdb10ad19 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -480,7 +480,7 @@ status = "disabled"; }; - nand: qpic-nand@79b0000 { + nand: nand-controller@79b0000 { compatible = "qcom,ipq4019-nand"; reg = <0x79b0000 0x1000>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts index 0cda654371ae..30ee913faae6 100644 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts @@ -529,6 +529,10 @@ pinctrl-names = "default"; pinctrl-0 = <&mpu6515_pin>; + mount-matrix = "0", "-1", "0", + "-1", "0", "0", + "0", "0", "1"; + i2c-gate { #address-cells = <1>; #size-cells = <0>; @@ -575,7 +579,7 @@ maxim,rcomp = /bits/ 8 <0x4d>; interrupt-parent = <&msmgpio>; - interrupts = <9 IRQ_TYPE_EDGE_FALLING>; + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&fuelgauge_pin>; diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts index a0f7f461f48c..d737de7173cf 100644 --- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts +++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts @@ -322,6 +322,27 @@ status = "okay"; }; + /* blsp2_uart8 */ + serial@f995e000 { + status = "okay"; + + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&blsp2_uart8_pins_active>; + pinctrl-1 = <&blsp2_uart8_pins_sleep>; + + bluetooth { + compatible = "brcm,bcm43540-bt"; + max-speed = <3000000>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_pins>; + device-wakeup-gpios = <&msmgpio 91 GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpio_expander 9 GPIO_ACTIVE_HIGH>; + interrupt-parent = <&msmgpio>; + interrupts = <75 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host-wakeup"; + }; + }; + gpio-keys { compatible = "gpio-keys"; input-name = "gpio-keys"; @@ -356,6 +377,35 @@ }; pinctrl@fd510000 { + blsp2_uart8_pins_active: blsp2-uart8-pins-active { + pins = "gpio45", "gpio46", "gpio47", "gpio48"; + function = "blsp_uart8"; + drive-strength = <8>; + bias-disable; + }; + + blsp2_uart8_pins_sleep: blsp2-uart8-pins-sleep { + pins = "gpio45", "gpio46", "gpio47", "gpio48"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; + + bt_pins: bt-pins { + hostwake { + pins = "gpio75"; + function = "gpio"; + drive-strength = <16>; + input-enable; + }; + + devwake { + pins = "gpio91"; + function = "gpio"; + drive-strength = <2>; + }; + }; + sdhc1_pin_a: sdhc1-pin-active { clk { pins = "sdc1_clk"; @@ -717,7 +767,7 @@ maxim,rcomp = /bits/ 8 <0x56>; interrupt-parent = <&pma8084_gpios>; - interrupts = <21 IRQ_TYPE_EDGE_FALLING>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&fuelgauge_pin>; diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index c65d33591efa..db4c06bf7888 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -715,6 +715,15 @@ status = "disabled"; }; + blsp2_uart8: serial@f995e000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0xf995e000 0x1000>; + interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + blsp2_uart10: serial@f9960000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0xf9960000 0x1000>; diff --git a/arch/arm/boot/dts/qcom-sdx55-t55.dts b/arch/arm/boot/dts/qcom-sdx55-t55.dts new file mode 100644 index 000000000000..ddcd53aa533d --- /dev/null +++ b/arch/arm/boot/dts/qcom-sdx55-t55.dts @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Ltd. + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/regulator/qcom,rpmh-regulator.h> +#include "qcom-sdx55.dtsi" +#include "qcom-pmx55.dtsi" + +/ { + model = "Thundercomm T55 Development Kit"; + compatible = "qcom,sdx55-t55", "qcom,sdx55"; + qcom,board-id = <0xb010008 0x4>; + + aliases { + serial0 = &blsp1_uart3; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mpss_debug_mem: memory@8ef00000 { + no-map; + reg = <0x8ef00000 0x800000>; + }; + + ipa_fw_mem: memory@8fced000 { + no-map; + reg = <0x8fced000 0x10000>; + }; + + mpss_adsp_mem: memory@90800000 { + no-map; + reg = <0x90800000 0xf800000>; + }; + }; + + vph_pwr: vph-pwr-regulator { + compatible = "regulator-fixed"; + regulator-name = "vph_pwr"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + }; + + vreg_bob_3p3: pmx55-bob { + compatible = "regulator-fixed"; + regulator-name = "vreg_bob_3p3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-always-on; + regulator-boot-on; + + vin-supply = <&vph_pwr>; + }; + + vreg_s7e_mx_0p752: pmx55-s7e { + compatible = "regulator-fixed"; + regulator-name = "vreg_s7e_mx_0p752"; + regulator-min-microvolt = <752000>; + regulator-max-microvolt = <752000>; + + vin-supply = <&vph_pwr>; + }; + + vreg_sd_vdd: sd-vdd { + compatible = "regulator-fixed"; + regulator-name = "vreg_sd_vdd"; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + + vin-supply = <&vreg_vddpx_2>; + }; + + vreg_vddpx_2: vddpx-2 { + compatible = "regulator-gpio"; + regulator-name = "vreg_vddpx_2"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2850000>; + enable-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>; + gpios = <&tlmm 100 GPIO_ACTIVE_HIGH>; + states = <1800000 0>, <2850000 1>; + startup-delay-us = <200000>; + enable-active-high; + regulator-boot-on; + + vin-supply = <&vph_pwr>; + }; +}; + +&apps_rsc { + pmx55-rpmh-regulators { + compatible = "qcom,pmx55-rpmh-regulators"; + qcom,pmic-id = "e"; + + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + vdd-s3-supply = <&vph_pwr>; + vdd-s4-supply = <&vph_pwr>; + vdd-s5-supply = <&vph_pwr>; + vdd-s6-supply = <&vph_pwr>; + vdd-s7-supply = <&vph_pwr>; + vdd-l1-l2-supply = <&vreg_s2e_1p224>; + vdd-l3-l9-supply = <&vreg_s3e_0p824>; + vdd-l4-l12-supply = <&vreg_s4e_1p904>; + vdd-l5-l6-supply = <&vreg_s4e_1p904>; + vdd-l7-l8-supply = <&vreg_s3e_0p824>; + vdd-l10-l11-l13-supply = <&vreg_bob_3p3>; + vdd-l14-supply = <&vreg_s7e_mx_0p752>; + vdd-l15-supply = <&vreg_s2e_1p224>; + vdd-l16-supply = <&vreg_s4e_1p904>; + + vreg_s2e_1p224: smps2 { + regulator-min-microvolt = <1280000>; + regulator-max-microvolt = <1400000>; + }; + + vreg_s3e_0p824: smps3 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1000000>; + }; + + vreg_s4e_1p904: smps4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1960000>; + }; + + vreg_l1e_bb_1p2: ldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo2 { + regulator-min-microvolt = <1128000>; + regulator-max-microvolt = <1128000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo3 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l4e_bb_0p875: ldo4 { + regulator-min-microvolt = <872000>; + regulator-max-microvolt = <872000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l5e_bb_1p7: ldo5 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo7 { + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo8 { + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo9 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l10e_3p1: ldo10 { + regulator-min-microvolt = <3088000>; + regulator-max-microvolt = <3088000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo11 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <2928000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo12 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo13 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <2928000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo14 { + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo15 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo16 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1904000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + }; +}; + +&blsp1_uart3 { + status = "ok"; +}; + +&qpic_bam { + status = "ok"; +}; + +&qpic_nand { + status = "ok"; + + nand@0 { + reg = <0>; + + nand-ecc-strength = <8>; + nand-ecc-step-size = <512>; + nand-bus-width = <8>; + /* efs2 partition is secured */ + secure-regions = <0x500000 0xb00000>; + }; +}; + +&remoteproc_mpss { + status = "okay"; + memory-region = <&mpss_adsp_mem>; +}; + +&usb_hsphy { + status = "okay"; + vdda-pll-supply = <&vreg_l4e_bb_0p875>; + vdda33-supply = <&vreg_l10e_3p1>; + vdda18-supply = <&vreg_l5e_bb_1p7>; +}; + +&usb_qmpphy { + status = "okay"; + vdda-phy-supply = <&vreg_l4e_bb_0p875>; + vdda-pll-supply = <&vreg_l1e_bb_1p2>; +}; + +&usb { + status = "okay"; +}; + +&usb_dwc3 { + dr_mode = "peripheral"; +}; diff --git a/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts new file mode 100644 index 000000000000..3065f84634b8 --- /dev/null +++ b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Ltd. + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/regulator/qcom,rpmh-regulator.h> +#include "qcom-sdx55.dtsi" +#include "qcom-pmx55.dtsi" + +/ { + model = "Telit FN980 TLB"; + compatible = "qcom,sdx55-telit-fn980-tlb", "qcom,sdx55"; + qcom,board-id = <0xb010008 0x0>; + + aliases { + serial0 = &blsp1_uart3; + }; + + chosen { + stdout-path = "serial0:921600n8"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mpss_debug_mem: memory@8ef00000 { + no-map; + reg = <0x8ef00000 0x800000>; + }; + + ipa_fw_mem: memory@8fced000 { + no-map; + reg = <0x8fced000 0x10000>; + }; + + mpss_adsp_mem: memory@90800000 { + no-map; + reg = <0x90800000 0xf800000>; + }; + }; + + vph_pwr: vph-pwr-regulator { + compatible = "regulator-fixed"; + regulator-name = "vph_pwr"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + }; + + vreg_bob_3p3: pmx55-bob { + compatible = "regulator-fixed"; + regulator-name = "vreg_bob_3p3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-always-on; + regulator-boot-on; + + vin-supply = <&vph_pwr>; + }; + + vreg_s7e_mx_0p752: pmx55-s7e { + compatible = "regulator-fixed"; + regulator-name = "vreg_s7e_mx_0p752"; + regulator-min-microvolt = <752000>; + regulator-max-microvolt = <752000>; + + vin-supply = <&vph_pwr>; + }; + + vreg_sd_vdd: sd-vdd { + compatible = "regulator-fixed"; + regulator-name = "vreg_sd_vdd"; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + + vin-supply = <&vreg_vddpx_2>; + }; + + vreg_vddpx_2: vddpx-2 { + compatible = "regulator-gpio"; + regulator-name = "vreg_vddpx_2"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2850000>; + enable-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>; + gpios = <&tlmm 100 GPIO_ACTIVE_HIGH>; + states = <1800000 0>, <2850000 1>; + startup-delay-us = <200000>; + enable-active-high; + regulator-boot-on; + + vin-supply = <&vph_pwr>; + }; +}; + +&apps_rsc { + pmx55-rpmh-regulators { + compatible = "qcom,pmx55-rpmh-regulators"; + qcom,pmic-id = "e"; + + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + vdd-s3-supply = <&vph_pwr>; + vdd-s4-supply = <&vph_pwr>; + vdd-s5-supply = <&vph_pwr>; + vdd-s6-supply = <&vph_pwr>; + vdd-s7-supply = <&vph_pwr>; + vdd-l1-l2-supply = <&vreg_s2e_1p224>; + vdd-l3-l9-supply = <&vreg_s3e_0p824>; + vdd-l4-l12-supply = <&vreg_s4e_1p904>; + vdd-l5-l6-supply = <&vreg_s4e_1p904>; + vdd-l7-l8-supply = <&vreg_s3e_0p824>; + vdd-l10-l11-l13-supply = <&vreg_bob_3p3>; + vdd-l14-supply = <&vreg_s7e_mx_0p752>; + vdd-l15-supply = <&vreg_s2e_1p224>; + vdd-l16-supply = <&vreg_s4e_1p904>; + + vreg_s2e_1p224: smps2 { + regulator-min-microvolt = <1280000>; + regulator-max-microvolt = <1400000>; + }; + + vreg_s3e_0p824: smps3 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1000000>; + }; + + vreg_s4e_1p904: smps4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1960000>; + }; + + vreg_l1e_bb_1p2: ldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo2 { + regulator-min-microvolt = <1128000>; + regulator-max-microvolt = <1128000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo3 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l4e_bb_0p875: ldo4 { + regulator-min-microvolt = <872000>; + regulator-max-microvolt = <872000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l5e_bb_1p7: ldo5 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo7 { + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo8 { + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo9 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + vreg_l10e_3p1: ldo10 { + regulator-min-microvolt = <3088000>; + regulator-max-microvolt = <3088000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo11 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <2928000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo12 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo13 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <2928000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo14 { + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo15 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + + ldo16 { + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1904000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>; + }; + }; +}; + +&blsp1_uart3 { + status = "ok"; +}; + +&qpic_bam { + status = "ok"; +}; + +&qpic_nand { + status = "ok"; + + nand@0 { + reg = <0>; + + nand-ecc-strength = <8>; + nand-ecc-step-size = <512>; + nand-bus-width = <8>; + /* ico and efs2 partitions are secured */ + secure-regions = <0x500000 0x500000 + 0xa00000 0xb00000>; + }; +}; + +&remoteproc_mpss { + status = "okay"; + memory-region = <&mpss_adsp_mem>; +}; + +&usb_hsphy { + status = "okay"; + vdda-pll-supply = <&vreg_l4e_bb_0p875>; + vdda33-supply = <&vreg_l10e_3p1>; + vdda18-supply = <&vreg_l5e_bb_1p7>; +}; + +&usb_qmpphy { + status = "okay"; + vdda-phy-supply = <&vreg_l4e_bb_0p875>; + vdda-pll-supply = <&vreg_l1e_bb_1p2>; +}; + +&usb { + status = "okay"; +}; + +&usb_dwc3 { + dr_mode = "peripheral"; +}; diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi index e4180bbc4655..0057c7c04d31 100644 --- a/arch/arm/boot/dts/qcom-sdx55.dtsi +++ b/arch/arm/boot/dts/qcom-sdx55.dtsi @@ -8,6 +8,7 @@ #include <dt-bindings/clock/qcom,gcc-sdx55.h> #include <dt-bindings/clock/qcom,rpmh.h> +#include <dt-bindings/interconnect/qcom,sdx55.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/power/qcom-rpmpd.h> #include <dt-bindings/soc/qcom,rpmh-rsc.h> @@ -53,6 +54,41 @@ compatible = "arm,cortex-a7"; reg = <0x0>; enable-method = "psci"; + clocks = <&apcs>; + power-domains = <&rpmhpd SDX55_CX>; + power-domain-names = "rpmhpd"; + operating-points-v2 = <&cpu_opp_table>; + }; + }; + + cpu_opp_table: cpu-opp-table { + compatible = "operating-points-v2"; + opp-shared; + + opp-345600000 { + opp-hz = /bits/ 64 <345600000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-576000000 { + opp-hz = /bits/ 64 <576000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-1094400000 { + opp-hz = /bits/ 64 <1094400000>; + required-opps = <&rpmhpd_opp_nom>; + }; + + opp-1555200000 { + opp-hz = /bits/ 64 <1555200000>; + required-opps = <&rpmhpd_opp_turbo>; + }; + }; + + firmware { + scm { + compatible = "qcom,scm-sdx55", "qcom,scm"; }; }; @@ -119,6 +155,37 @@ hwlocks = <&tcsr_mutex 3>; }; + smp2p-mpss { + compatible = "qcom,smp2p"; + qcom,smem = <435>, <428>; + interrupts = <GIC_SPI 113 IRQ_TYPE_EDGE_RISING>; + mboxes = <&apcs 14>; + qcom,local-pid = <0>; + qcom,remote-pid = <1>; + + modem_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + modem_smp2p_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + + ipa_smp2p_out: ipa-ap-to-modem { + qcom,entry-name = "ipa"; + #qcom,smem-state-cells = <1>; + }; + + ipa_smp2p_in: ipa-modem-to-ap { + qcom,entry-name = "ipa"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + soc: soc { #address-cells = <1>; #size-cells = <1>; @@ -187,6 +254,34 @@ }; }; + mc_virt: interconnect@1100000 { + compatible = "qcom,sdx55-mc-virt"; + reg = <0x01100000 0x400000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + mem_noc: interconnect@9680000 { + compatible = "qcom,sdx55-mem-noc"; + reg = <0x09680000 0x40000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + system_noc: interconnect@162c000 { + compatible = "qcom,sdx55-system-noc"; + reg = <0x0162c000 0x31200>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + ipa_virt: interconnect@1e00000 { + compatible = "qcom,sdx55-ipa-virt"; + reg = <0x01e00000 0x100000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + qpic_bam: dma-controller@1b04000 { compatible = "qcom,bam-v1.7.0"; reg = <0x01b04000 0x1c000>; @@ -199,7 +294,7 @@ status = "disabled"; }; - qpic_nand: nand@1b30000 { + qpic_nand: nand-controller@1b30000 { compatible = "qcom,sdx55-nand"; reg = <0x01b30000 0x10000>; #address-cells = <1>; @@ -215,6 +310,47 @@ status = "disabled"; }; + ipa: ipa@1e40000 { + compatible = "qcom,sdx55-ipa"; + + iommus = <&apps_smmu 0x5e0 0x0>, + <&apps_smmu 0x5e2 0x0>; + reg = <0x1e40000 0x7000>, + <0x1e50000 0x4b20>, + <0x1e04000 0x2c000>; + reg-names = "ipa-reg", + "ipa-shared", + "gsi"; + + interrupts-extended = <&intc GIC_SPI 241 IRQ_TYPE_EDGE_RISING>, + <&intc GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>, + <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>, + <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "ipa", + "gsi", + "ipa-clock-query", + "ipa-setup-ready"; + + clocks = <&rpmhcc RPMH_IPA_CLK>; + clock-names = "core"; + + interconnects = <&system_noc MASTER_IPA &system_noc SLAVE_SNOC_MEM_NOC_GC>, + <&mem_noc MASTER_SNOC_GC_MEM_NOC &mc_virt SLAVE_EBI_CH0>, + <&system_noc MASTER_IPA &system_noc SLAVE_OCIMEM>, + <&mem_noc MASTER_AMPSS_M0 &system_noc SLAVE_IPA_CFG>; + interconnect-names = "memory-a", + "memory-b", + "imem", + "config"; + + qcom,smem-states = <&ipa_smp2p_out 0>, + <&ipa_smp2p_out 1>; + qcom,smem-state-names = "ipa-clock-enabled-valid", + "ipa-clock-enabled"; + + status = "disabled"; + }; + tcsr_mutex: hwlock@1f40000 { compatible = "qcom,tcsr-mutex"; reg = <0x01f40000 0x40000>; @@ -233,6 +369,39 @@ status = "disabled"; }; + remoteproc_mpss: remoteproc@4080000 { + compatible = "qcom,sdx55-mpss-pas"; + reg = <0x04080000 0x4040>; + + interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", "handover", + "stop-ack", "shutdown-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd SDX55_CX>, + <&rpmhpd SDX55_MSS>; + power-domain-names = "cx", "mss"; + + qcom,smem-states = <&modem_smp2p_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts = <GIC_SPI 114 IRQ_TYPE_EDGE_RISING>; + label = "mpss"; + qcom,remote-pid = <1>; + mboxes = <&apcs 15>; + }; + }; + usb: usb@a6f8800 { compatible = "qcom,sdx55-dwc3", "qcom,dwc3"; reg = <0x0a6f8800 0x400>; @@ -319,6 +488,21 @@ #interrupt-cells = <2>; }; + imem@1468f000 { + compatible = "simple-mfd"; + reg = <0x1468f000 0x1000>; + + #address-cells = <1>; + #size-cells = <1>; + + ranges = <0x0 0x1468f000 0x1000>; + + pil-reloc@94c { + compatible = "qcom,pil-reloc-info"; + reg = <0x94c 0x200>; + }; + }; + apps_smmu: iommu@15000000 { compatible = "qcom,sdx55-smmu-500", "arm,mmu-500"; reg = <0x15000000 0x20000>; @@ -352,6 +536,23 @@ <0x17802000 0x1000>; }; + a7pll: clock@17808000 { + compatible = "qcom,sdx55-a7pll"; + reg = <0x17808000 0x1000>; + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "bi_tcxo"; + #clock-cells = <0>; + }; + + apcs: mailbox@17810000 { + compatible = "qcom,sdx55-apcs-gcc", "syscon"; + reg = <0x17810000 0x2000>; + #mbox-cells = <1>; + clocks = <&rpmhcc RPMH_CXO_CLK>, <&a7pll>, <&gcc GPLL0>; + clock-names = "ref", "pll", "aux"; + #clock-cells = <0>; + }; + watchdog@17817000 { compatible = "qcom,apss-wdt-sdx55", "qcom,kpss-wdt"; reg = <0x17817000 0x1000>; @@ -491,6 +692,10 @@ }; }; }; + + apps_bcm_voter: bcm_voter { + compatible = "qcom,bcm-voter"; + }; }; }; diff --git a/arch/arm/boot/dts/r7s9210-rza2mevb.dts b/arch/arm/boot/dts/r7s9210-rza2mevb.dts index d062d02865e7..68498ce2aec0 100644 --- a/arch/arm/boot/dts/r7s9210-rza2mevb.dts +++ b/arch/arm/boot/dts/r7s9210-rza2mevb.dts @@ -4,6 +4,28 @@ * * Copyright (C) 2018 Renesas Electronics * + * As upstream Linux does not support XIP, it cannot run in 8 MiB of HyperRAM. + * Hence the 64 MiB of SDRAM on the sub-board needs to be enabled, which has + * the following ramifications: + * - SCIF4 connected to the on-board USB-serial can no longer be used as the + * serial console, + * - Instead, SCIF2 is used as the serial console, by connecting a 3.3V TTL + * USB-to-Serial adapter to the CMOS camera connector: + * - RXD = CN17-9, + * - TXD = CN17-10, + * - GND = CN17-2 or CN17-17, + * - The first Ethernet channel can no longer be used, + * - USB Channel 1 loses the overcurrent input signal. + * + * Please make sure your sub-board matches the following switch settings: + * + * SW6 SW6-1 set to SDRAM + * ON SW6-2 set to Audio + * +---------------------+ SW6-3 set to DRP + * | = = = = = | SW6-4 set to CEU + * | = = | SW6-5 set to Ether2 + * | 1 2 3 4 5 6 7 8 9 0 | SW6-6 set to VDC6 + * +---------------------+ SW6-7 set to VDC6 */ /dts-v1/; @@ -17,9 +39,8 @@ compatible = "renesas,rza2mevb", "renesas,r7s9210"; aliases { - serial0 = &scif4; - ethernet0 = ðer0; - ethernet1 = ðer1; + serial0 = &scif2; + ethernet0 = ðer1; }; chosen { @@ -58,9 +79,9 @@ }; }; - memory@40000000 { + memory@c000000 { device_type = "memory"; - reg = <0x40000000 0x00800000>; /* HyperRAM */ + reg = <0x0c000000 0x04000000>; /* SDRAM */ }; }; @@ -72,17 +93,6 @@ status = "okay"; }; -ðer0 { - pinctrl-names = "default"; - pinctrl-0 = <ð0_pins>; - status = "okay"; - renesas,no-ether-link; - phy-handle = <&phy0>; - phy0: ethernet-phy@0 { - reg = <0>; - }; -}; - ðer1 { pinctrl-names = "default"; pinctrl-0 = <ð1_pins>; @@ -142,9 +152,9 @@ }; /* Serial Console */ - scif4_pins: serial4 { - pinmux = <RZA2_PINMUX(PORT9, 0, 4)>, /* TxD4 */ - <RZA2_PINMUX(PORT9, 1, 4)>; /* RxD4 */ + scif2_pins: serial2 { + pinmux = <RZA2_PINMUX(PORTE, 2, 3)>, /* TxD2 */ + <RZA2_PINMUX(PORTE, 1, 3)>; /* RxD2 */ }; sdhi0_pins: sdhi0 { @@ -165,8 +175,7 @@ usb1_pins: usb1 { pinmux = <RZA2_PINMUX(PORTC, 0, 1)>, /* VBUSIN1 */ - <RZA2_PINMUX(PORTC, 5, 1)>, /* VBUSEN1 */ - <RZA2_PINMUX(PORT7, 5, 5)>; /* OVRCUR1 */ + <RZA2_PINMUX(PORTC, 5, 1)>; /* VBUSEN1 */ }; }; @@ -176,9 +185,9 @@ }; /* Serial Console */ -&scif4 { +&scif2 { pinctrl-names = "default"; - pinctrl-0 = <&scif4_pins>; + pinctrl-0 = <&scif2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts index 98c3fbd89fa6..2bcb229844ab 100644 --- a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts +++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts @@ -91,92 +91,20 @@ status = "okay"; }; -&i2c0 { - ov5640@3c { - compatible = "ovti,ov5640"; - reg = <0x3c>; - clocks = <&mclk_cam1>; - clock-names = "xclk"; - - port { - ov5640_0: endpoint { - bus-width = <8>; - data-shift = <2>; - bus-type = <6>; - pclk-sample = <1>; - remote-endpoint = <&vin0ep>; - }; - }; - }; -}; - &i2c1 { pinctrl-0 = <&i2c1_pins>; pinctrl-names = "default"; - status = "okay"; + /* status set to "okay" when needed by camera configuration below */ clock-frequency = <400000>; - - ov5640@3c { - compatible = "ovti,ov5640"; - reg = <0x3c>; - clocks = <&mclk_cam2>; - clock-names = "xclk"; - - port { - ov5640_1: endpoint { - bus-width = <8>; - data-shift = <2>; - bus-type = <6>; - pclk-sample = <1>; - remote-endpoint = <&vin1ep>; - }; - }; - }; -}; - -&i2c2 { - ov5640@3c { - compatible = "ovti,ov5640"; - reg = <0x3c>; - clocks = <&mclk_cam3>; - clock-names = "xclk"; - - port { - ov5640_2: endpoint { - bus-width = <8>; - data-shift = <2>; - bus-type = <6>; - pclk-sample = <1>; - remote-endpoint = <&vin2ep>; - }; - }; - }; }; &i2c3 { pinctrl-0 = <&i2c3_pins>; pinctrl-names = "default"; - status = "okay"; + /* status set to "okay" when needed by camera configuration below */ clock-frequency = <400000>; - - ov5640@3c { - compatible = "ovti,ov5640"; - reg = <0x3c>; - clocks = <&mclk_cam4>; - clock-names = "xclk"; - - port { - ov5640_3: endpoint { - bus-width = <8>; - data-shift = <2>; - bus-type = <6>; - pclk-sample = <1>; - remote-endpoint = <&vin3ep>; - }; - }; - }; }; &pfc { @@ -267,6 +195,22 @@ cts-gpios = <&gpio4 17 GPIO_ACTIVE_LOW>; }; +/* + * Below configuration ties VINx endpoints to ov5640/ov7725 camera endpoints + * + * (un)comment the #include statements to change configuration + */ + +/* 8bit CMOS Camera 1 (J13) */ +#define CAM_PARENT_I2C i2c0 +#define MCLK_CAM mclk_cam1 +#define CAM_EP cam0ep +#define VIN_EP vin0ep +#undef CAM_ENABLED +#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi" +//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi" + +#ifdef CAM_ENABLED &vin0 { /* * Set SW2 switch on the SOM to 'ON' @@ -278,13 +222,29 @@ port { vin0ep: endpoint { - remote-endpoint = <&ov5640_0>; + remote-endpoint = <&cam0ep>; bus-width = <8>; bus-type = <6>; }; }; }; - +#endif /* CAM_ENABLED */ + +#undef CAM_PARENT_I2C +#undef MCLK_CAM +#undef CAM_EP +#undef VIN_EP + +/* 8bit CMOS Camera 2 (J14) */ +#define CAM_PARENT_I2C i2c1 +#define MCLK_CAM mclk_cam2 +#define CAM_EP cam1ep +#define VIN_EP vin1ep +#undef CAM_ENABLED +#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi" +//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi" + +#ifdef CAM_ENABLED &vin1 { /* Set SW1 switch on the SOM to 'ON' */ status = "okay"; @@ -293,13 +253,30 @@ port { vin1ep: endpoint { - remote-endpoint = <&ov5640_1>; + remote-endpoint = <&cam1ep>; bus-width = <8>; bus-type = <6>; }; }; }; +#endif /* CAM_ENABLED */ + +#undef CAM_PARENT_I2C +#undef MCLK_CAM +#undef CAM_EP +#undef VIN_EP + +/* 8bit CMOS Camera 3 (J12) */ +#define CAM_PARENT_I2C i2c2 +#define MCLK_CAM mclk_cam3 +#define CAM_EP cam2ep +#define VIN_EP vin2ep +#undef CAM_ENABLED +#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi" +//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi" + +#ifdef CAM_ENABLED &vin2 { status = "okay"; pinctrl-0 = <&vin2_pins>; @@ -307,14 +284,30 @@ port { vin2ep: endpoint { - remote-endpoint = <&ov5640_2>; + remote-endpoint = <&cam2ep>; bus-width = <8>; data-shift = <8>; bus-type = <6>; }; }; }; - +#endif /* CAM_ENABLED */ + +#undef CAM_PARENT_I2C +#undef MCLK_CAM +#undef CAM_EP +#undef VIN_EP + +/* 8bit CMOS Camera 4 (J11) */ +#define CAM_PARENT_I2C i2c3 +#define MCLK_CAM mclk_cam4 +#define CAM_EP cam3ep +#define VIN_EP vin3ep +#undef CAM_ENABLED +#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi" +//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi" + +#ifdef CAM_ENABLED &vin3 { status = "okay"; pinctrl-0 = <&vin3_pins>; @@ -322,9 +315,15 @@ port { vin3ep: endpoint { - remote-endpoint = <&ov5640_3>; + remote-endpoint = <&cam3ep>; bus-width = <8>; bus-type = <6>; }; }; }; +#endif /* CAM_ENABLED */ + +#undef CAM_PARENT_I2C +#undef MCLK_CAM +#undef CAM_EP +#undef VIN_EP diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi new file mode 100644 index 000000000000..70c72ba4fe72 --- /dev/null +++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This include file ties a VIN interface with a single ov5640 sensor on + * the iWave-RZ/G1H Qseven board development platform connected with the + * camera daughter board. + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ + +#define CAM_ENABLED 1 + +&CAM_PARENT_I2C { + status = "okay"; + + ov5640@3c { + compatible = "ovti,ov5640"; + reg = <0x3c>; + clocks = <&MCLK_CAM>; + clock-names = "xclk"; + status = "okay"; + + port { + CAM_EP: endpoint { + bus-width = <8>; + data-shift = <2>; + bus-type = <6>; + pclk-sample = <1>; + remote-endpoint = <&VIN_EP>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi new file mode 100644 index 000000000000..f5e77f024251 --- /dev/null +++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This include file ties a VIN interface with a single ov7725 sensor on + * the iWave-RZ/G1H Qseven board development platform connected with the + * camera daughter board. + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ + +#define CAM_ENABLED 1 + +&CAM_PARENT_I2C { + status = "okay"; + + ov7725@21 { + compatible = "ovti,ov7725"; + reg = <0x21>; + clocks = <&MCLK_CAM>; + status = "okay"; + + port { + CAM_EP: endpoint { + bus-width = <8>; + bus-type = <6>; + remote-endpoint = <&VIN_EP>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts index 0063ef92f50e..94bf8a116b52 100644 --- a/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts +++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts @@ -387,8 +387,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi4 &src4 &dvc1>; - capture = <&ssi3 &src3 &dvc0>; + playback = <&ssi4>, <&src4>, <&dvc1>; + capture = <&ssi3>, <&src3>, <&dvc0>; }; }; }; diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi index 6a78c813057b..dd1b976d2a6c 100644 --- a/arch/arm/boot/dts/r8a7742.dtsi +++ b/arch/arm/boot/dts/r8a7742.dtsi @@ -367,13 +367,13 @@ apmu@e6151000 { compatible = "renesas,r8a7742-apmu", "renesas,apmu"; reg = <0 0xe6151000 0 0x188>; - cpus = <&cpu4 &cpu5 &cpu6 &cpu7>; + cpus = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>; }; apmu@e6152000 { compatible = "renesas,r8a7742-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1 &cpu2 &cpu3>; + cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts index 807e7d0d6b62..4ace117470e8 100644 --- a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts +++ b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts @@ -61,7 +61,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index f444e418f408..6e37b8da278b 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -293,7 +293,7 @@ apmu@e6152000 { compatible = "renesas,r8a7743-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi index 0442aad4f9db..ace20861c0c4 100644 --- a/arch/arm/boot/dts/r8a7744.dtsi +++ b/arch/arm/boot/dts/r8a7744.dtsi @@ -293,7 +293,7 @@ apmu@e6152000 { compatible = "renesas,r8a7744-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts index 1c7b37a01f0a..73bd62d8a929 100644 --- a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts +++ b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts @@ -289,8 +289,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi3 &src3 &dvc0>; - capture = <&ssi4 &src4 &dvc1>; + playback = <&ssi3>, <&src3>, <&dvc0>; + capture = <&ssi4>, <&src4>, <&dvc1>; }; }; }; diff --git a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts index db72a801abe5..59d1a9bfab05 100644 --- a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts +++ b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts @@ -56,7 +56,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi index 0f14ac22921d..be33bdabe452 100644 --- a/arch/arm/boot/dts/r8a7745.dtsi +++ b/arch/arm/boot/dts/r8a7745.dtsi @@ -258,7 +258,7 @@ apmu@e6151000 { compatible = "renesas,r8a7745-apmu", "renesas,apmu"; reg = <0 0xe6151000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi index 691b1a131c87..a1d7f6e7a2e3 100644 --- a/arch/arm/boot/dts/r8a77470.dtsi +++ b/arch/arm/boot/dts/r8a77470.dtsi @@ -205,7 +205,7 @@ apmu@e6151000 { compatible = "renesas,r8a77470-apmu", "renesas,apmu"; reg = <0 0xe6151000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts index 09a152b91557..2dad0742d2ba 100644 --- a/arch/arm/boot/dts/r8a7790-lager.dts +++ b/arch/arm/boot/dts/r8a7790-lager.dts @@ -53,6 +53,9 @@ i2c11 = &i2cexio1; i2c12 = &i2chdmi; i2c13 = &i2cpwr; + mmc0 = &mmcif1; + mmc1 = &sdhi0; + mmc2 = &sdhi2; }; chosen { @@ -659,7 +662,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -908,7 +911,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -918,8 +921,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi0 &src2 &dvc0>; - capture = <&ssi1 &src3 &dvc1>; + playback = <&ssi0>, <&src2>, <&dvc0>; + capture = <&ssi1>, <&src3>, <&dvc1>; }; }; }; diff --git a/arch/arm/boot/dts/r8a7790-stout.dts b/arch/arm/boot/dts/r8a7790-stout.dts index 6a457bc9280a..d51f23572d7f 100644 --- a/arch/arm/boot/dts/r8a7790-stout.dts +++ b/arch/arm/boot/dts/r8a7790-stout.dts @@ -191,7 +191,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -321,7 +321,7 @@ &iic3 { pinctrl-names = "default"; - pinctrl-0 = <&iic3_pins &pmic_irq_pins>; + pinctrl-0 = <&iic3_pins>, <&pmic_irq_pins>; status = "okay"; pmic@58 { diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index b0569b4ea5c8..de29394eed63 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -381,13 +381,13 @@ apmu@e6151000 { compatible = "renesas,r8a7790-apmu", "renesas,apmu"; reg = <0 0xe6151000 0 0x188>; - cpus = <&cpu4 &cpu5 &cpu6 &cpu7>; + cpus = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>; }; apmu@e6152000 { compatible = "renesas,r8a7790-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1 &cpu2 &cpu3>; + cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index f603cba5441f..61e881bbbf6e 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -53,6 +53,9 @@ i2c12 = &i2cexio1; i2c13 = &i2chdmi; i2c14 = &i2cexio4; + mmc0 = &sdhi0; + mmc1 = &sdhi1; + mmc2 = &sdhi2; }; chosen { @@ -78,6 +81,9 @@ keyboard { compatible = "gpio-keys"; + pinctrl-0 = <&sw2_pins>; + pinctrl-names = "default"; + key-1 { gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; linux,code = <KEY_1>; @@ -615,10 +621,15 @@ groups = "audio_clk_a"; function = "audio_clk"; }; + + sw2_pins: sw2 { + pins = "GP_5_0", "GP_5_1", "GP_5_2", "GP_5_3"; + bias-pull-up; + }; }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -878,7 +889,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -888,8 +899,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi0 &src2 &dvc0>; - capture = <&ssi1 &src3 &dvc1>; + playback = <&ssi0>, <&src2>, <&dvc0>; + capture = <&ssi1>, <&src3>, <&dvc1>; }; }; }; diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index c6d563fb7ec7..c6ef636965c1 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts @@ -28,6 +28,8 @@ serial0 = &scif0; i2c9 = &gpioi2c2; i2c10 = &i2chdmi; + mmc0 = &sdhi0; + mmc1 = &sdhi2; }; chosen { @@ -292,7 +294,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -494,7 +496,7 @@ }; &rcar_sound { - pinctrl-0 = <&ssi_pins &audio_clk_pins>; + pinctrl-0 = <&ssi_pins>, <&audio_clk_pins>; pinctrl-names = "default"; status = "okay"; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 87f0d6dc3e5a..9d8320f71a6a 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -315,7 +315,7 @@ apmu@e6152000 { compatible = "renesas,r8a7791-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts index 9368ac2cf508..c100ae903a46 100644 --- a/arch/arm/boot/dts/r8a7792-blanche.dts +++ b/arch/arm/boot/dts/r8a7792-blanche.dts @@ -334,7 +334,7 @@ }; &du { - pinctrl-0 = <&du0_pins &du1_pins>; + pinctrl-0 = <&du0_pins>, <&du1_pins>; pinctrl-names = "default"; clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&x1_clk>, <&x2_clk>; diff --git a/arch/arm/boot/dts/r8a7792-wheat.dts b/arch/arm/boot/dts/r8a7792-wheat.dts index ba2d2a589012..434e4655be9d 100644 --- a/arch/arm/boot/dts/r8a7792-wheat.dts +++ b/arch/arm/boot/dts/r8a7792-wheat.dts @@ -307,7 +307,7 @@ }; &du { - pinctrl-0 = <&du0_pins &du1_pins>; + pinctrl-0 = <&du0_pins>, <&du1_pins>; pinctrl-names = "default"; clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&osc2_clk>; diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi index f5b299bfcb23..253e8bf643d1 100644 --- a/arch/arm/boot/dts/r8a7792.dtsi +++ b/arch/arm/boot/dts/r8a7792.dtsi @@ -314,7 +314,7 @@ apmu@e6152000 { compatible = "renesas,r8a7792-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index abf487e8fe0f..87fa57a99399 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -49,6 +49,9 @@ i2c10 = &gpioi2c4; i2c11 = &i2chdmi; i2c12 = &i2cexio4; + mmc0 = &sdhi0; + mmc1 = &sdhi1; + mmc2 = &sdhi2; }; chosen { @@ -576,7 +579,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -751,7 +754,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -761,8 +764,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi0 &src2 &dvc0>; - capture = <&ssi1 &src3 &dvc1>; + playback = <&ssi0>, <&src2>, <&dvc0>; + capture = <&ssi1>, <&src3>, <&dvc1>; }; }; }; diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi index f930f69f7bcc..6d74475030ed 100644 --- a/arch/arm/boot/dts/r8a7793.dtsi +++ b/arch/arm/boot/dts/r8a7793.dtsi @@ -290,7 +290,7 @@ apmu@e6152000 { compatible = "renesas,r8a7793-apmu", "renesas,apmu"; reg = <0 0xe6152000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts index 3f1cc5bbf329..f9dba5688d3f 100644 --- a/arch/arm/boot/dts/r8a7794-alt.dts +++ b/arch/arm/boot/dts/r8a7794-alt.dts @@ -19,6 +19,9 @@ i2c10 = &gpioi2c4; i2c11 = &i2chdmi; i2c12 = &i2cexio4; + mmc0 = &mmcif0; + mmc1 = &sdhi0; + mmc2 = &sdhi1; }; chosen { @@ -330,7 +333,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts index 677596f6c9c9..eb89a27a6ed0 100644 --- a/arch/arm/boot/dts/r8a7794-silk.dts +++ b/arch/arm/boot/dts/r8a7794-silk.dts @@ -31,6 +31,8 @@ serial0 = &scif2; i2c9 = &gpioi2c1; i2c10 = &i2chdmi; + mmc0 = &mmcif0; + mmc1 = &sdhi1; }; chosen { @@ -379,7 +381,7 @@ }; ðer { - pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-0 = <ðer_pins>, <&phy1_pins>; pinctrl-names = "default"; phy-handle = <&phy1>; @@ -518,7 +520,7 @@ }; &du { - pinctrl-0 = <&du0_pins &du1_pins>; + pinctrl-0 = <&du0_pins>, <&du1_pins>; pinctrl-names = "default"; status = "okay"; @@ -541,7 +543,7 @@ }; &rcar_sound { - pinctrl-0 = <&ssi_pins &audio_clk_pins>; + pinctrl-0 = <&ssi_pins>, <&audio_clk_pins>; pinctrl-names = "default"; status = "okay"; diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi index cd5e2904068a..330dc516ecd1 100644 --- a/arch/arm/boot/dts/r8a7794.dtsi +++ b/arch/arm/boot/dts/r8a7794.dtsi @@ -256,7 +256,7 @@ apmu@e6151000 { compatible = "renesas,r8a7794-apmu", "renesas,apmu"; reg = <0 0xe6151000 0 0x188>; - cpus = <&cpu0 &cpu1>; + cpus = <&cpu0>, <&cpu1>; }; rst: reset-controller@e6160000 { diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 47a787a12e55..e24230d50a78 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -355,7 +355,6 @@ reg = <0x20050000 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; status = "disabled"; @@ -366,7 +365,6 @@ reg = <0x20050010 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; status = "disabled"; @@ -377,7 +375,6 @@ reg = <0x20050020 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; status = "disabled"; @@ -388,7 +385,6 @@ reg = <0x20050030 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; status = "disabled"; diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts index aed879db6c15..69a5e239ed1a 100644 --- a/arch/arm/boot/dts/rk3228-evb.dts +++ b/arch/arm/boot/dts/rk3228-evb.dts @@ -8,6 +8,10 @@ model = "Rockchip RK3228 Evaluation board"; compatible = "rockchip,rk3228-evb", "rockchip,rk3228"; + aliases { + mmc0 = &emmc; + }; + memory@60000000 { device_type = "memory"; reg = <0x60000000 0x40000000>; diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts index 350497a3ca86..797476e8bef1 100644 --- a/arch/arm/boot/dts/rk3229-evb.dts +++ b/arch/arm/boot/dts/rk3229-evb.dts @@ -9,6 +9,10 @@ model = "Rockchip RK3229 Evaluation board"; compatible = "rockchip,rk3229-evb", "rockchip,rk3229"; + aliases { + mmc0 = &emmc; + }; + memory@60000000 { device_type = "memory"; reg = <0x60000000 0x40000000>; diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts index 263393ac4fa6..7bfbfd11fb55 100644 --- a/arch/arm/boot/dts/rk3229-xms6.dts +++ b/arch/arm/boot/dts/rk3229-xms6.dts @@ -9,6 +9,12 @@ model = "Mecer Xtreme Mini S6"; compatible = "mecer,xms6", "rockchip,rk3229"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdio; + mmc2 = &emmc; + }; + memory@60000000 { device_type = "memory"; reg = <0x60000000 0x40000000>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index a4dd50aaf3fc..208f21245095 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -14,9 +14,6 @@ interrupt-parent = <&gic>; aliases { - mmc0 = &sdmmc; - mmc1 = &sdio; - mmc2 = &emmc; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -370,7 +367,7 @@ }; wdt: watchdog@110a0000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,rk3228-wdt", "snps,dw-wdt"; reg = <0x110a0000 0x100>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru PCLK_CPU>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ea7416c31f9b..05557ad02b33 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -679,7 +679,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -690,7 +689,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -701,7 +699,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -712,7 +709,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rv1108-elgin-r1.dts index b1db924710c8..f62c9f7af79d 100644 --- a/arch/arm/boot/dts/rv1108-elgin-r1.dts +++ b/arch/arm/boot/dts/rv1108-elgin-r1.dts @@ -12,6 +12,10 @@ model = "Elgin RV1108 R1 board"; compatible = "elgin,rv1108-r1", "rockchip,rv1108"; + aliases { + mmc0 = &emmc; + }; + memory@60000000 { device_type = "memory"; reg = <0x60000000 0x08000000>; diff --git a/arch/arm/boot/dts/rv1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts index 30f3d0470ad9..fe5fc9bf75c9 100644 --- a/arch/arm/boot/dts/rv1108-evb.dts +++ b/arch/arm/boot/dts/rv1108-evb.dts @@ -8,6 +8,10 @@ model = "Rockchip RV1108 Evaluation board"; compatible = "rockchip,rv1108-evb", "rockchip,rv1108"; + aliases { + mmc0 = &sdmmc; + }; + memory@60000000 { device_type = "memory"; reg = <0x60000000 0x08000000>; diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi index 7319a2473b80..884872ca5207 100644 --- a/arch/arm/boot/dts/rv1108.dtsi +++ b/arch/arm/boot/dts/rv1108.dtsi @@ -19,9 +19,6 @@ i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; - mmc0 = &emmc; - mmc1 = &sdio; - mmc2 = &sdmmc; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -303,11 +300,10 @@ }; watchdog: watchdog@10360000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,rv1108-wdt", "snps,dw-wdt"; reg = <0x10360000 0x100>; interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru PCLK_WDT>; - clock-names = "pclk_wdt"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts index ca064359dd30..b47d8300e536 100644 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts @@ -115,7 +115,7 @@ compatible = "maxim,max77836-battery"; interrupt-parent = <&gph3>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&fg_irq>; diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi index 84066c1298df..ec45ced3cde6 100644 --- a/arch/arm/boot/dts/sam9x60.dtsi +++ b/arch/arm/boot/dts/sam9x60.dtsi @@ -606,6 +606,15 @@ compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; ranges = <0xfffff400 0xfffff400 0x800>; + /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */ + atmel,mux-mask = < + /* A B C */ + 0xffffffff 0xffe03fff 0xef00019d /* pioA */ + 0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */ + 0xffffffff 0xffffffff 0xf83fffff /* pioC */ + 0x003fffff 0x003f8000 0x00000000 /* pioD */ + >; + pioA: gpio@fffff400 { compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 2c4952427296..801969c113d6 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -40,7 +40,7 @@ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 0>; }; - etb { + etb@740000 { compatible = "arm,coresight-etb10", "arm,primecell"; reg = <0x740000 0x1000>; @@ -56,9 +56,9 @@ }; }; - etm { + etm@73c000 { compatible = "arm,coresight-etm3x", "arm,primecell"; - reg = <0x73C000 0x1000>; + reg = <0x73c000 0x1000>; clocks = <&pmc PMC_TYPE_CORE PMC_MCK>; clock-names = "apb_pclk"; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 7c979652f330..d1841bffe3c5 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -709,7 +709,7 @@ atmel,pins = <AT91_PIOD 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD5 periph A MCI0_DA4 with pullup, conflicts with TIOA0, PWMH2 */ AT91_PIOD 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD6 periph A MCI0_DA5 with pullup, conflicts with TIOB0, PWML2 */ - AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD7 periph A MCI0_DA6 with pullup, conlicts with TCLK0, PWMH3 */ + AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD7 periph A MCI0_DA6 with pullup, conflicts with TCLK0, PWMH3 */ AT91_PIOD 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PD8 periph A MCI0_DA7 with pullup, conflicts with PWML3 */ }; }; diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi index 4fd09997a2b9..a16a00fb5fa5 100644 --- a/arch/arm/boot/dts/ste-ab8500.dtsi +++ b/arch/arm/boot/dts/ste-ab8500.dtsi @@ -317,8 +317,8 @@ // supplies to the display/camera ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { - regulator-min-microvolt = <2500000>; - regulator-max-microvolt = <2900000>; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <3300000>; regulator-boot-on; /* BUG: If turned off MMC will be affected. */ regulator-always-on; diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi index e024520f4d47..8d59202cebd6 100644 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi @@ -1,14 +1,89 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Device Tree for the TVK1281618 R2 UIB + * Device Tree for the TVK1281618 R2 user interface board (UIB) */ -#include "ste-href-tvk1281618.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/input/input.h> / { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&prox_tvk_mode>, <&hall_tvk_mode>; + + button@139 { + /* Proximity sensor */ + gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>; + linux,code = <11>; /* SW_FRONT_PROXIMITY */ + label = "SFH7741 Proximity Sensor"; + }; + button@145 { + /* Hall sensor */ + gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>; + linux,code = <0>; /* SW_LID */ + label = "HED54XXU11 Hall Effect Sensor"; + }; + }; + soc { + i2c@80004000 { + tc35893@44 { + compatible = "toshiba,tc35893"; + reg = <0x44>; + interrupt-parent = <&gpio6>; + interrupts = <26 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&tc35893_tvk_mode>; + + interrupt-controller; + #interrupt-cells = <1>; + status = "disabled"; + + tc3589x_gpio { + compatible = "toshiba,tc3589x-gpio"; + interrupts = <0>; + + interrupt-controller; + #interrupt-cells = <2>; + gpio-controller; + #gpio-cells = <2>; + }; + tc3589x_keypad { + compatible = "toshiba,tc3589x-keypad"; + interrupts = <6>; + debounce-delay-ms = <4>; + keypad,num-columns = <8>; + keypad,num-rows = <8>; + linux,no-autorepeat; + wakeup-source; + linux,keymap = <MATRIX_KEY(3, 1, KEY_END)>, + <MATRIX_KEY(4, 1, KEY_HOME)>, + <MATRIX_KEY(6, 4, KEY_VOLUMEDOWN)>, + <MATRIX_KEY(4, 2, KEY_EMAIL)>, + <MATRIX_KEY(3, 3, KEY_RIGHT)>, + <MATRIX_KEY(2, 5, KEY_BACKSPACE)>, + <MATRIX_KEY(6, 7, KEY_MENU)>, + <MATRIX_KEY(5, 0, KEY_ENTER)>, + <MATRIX_KEY(4, 3, KEY_0)>, + <MATRIX_KEY(3, 4, KEY_DOT)>, + <MATRIX_KEY(5, 2, KEY_UP)>, + <MATRIX_KEY(3, 5, KEY_DOWN)>, + <MATRIX_KEY(4, 5, KEY_SEND)>, + <MATRIX_KEY(0, 5, KEY_BACK)>, + <MATRIX_KEY(6, 2, KEY_VOLUMEUP)>, + <MATRIX_KEY(1, 3, KEY_SPACE)>, + <MATRIX_KEY(7, 6, KEY_LEFT)>, + <MATRIX_KEY(5, 5, KEY_SEARCH)>; + }; + }; + }; + i2c@80128000 { - lsm303dlh@18 { + accelerometer@18 { /* Accelerometer */ compatible = "st,lsm303dlh-accel"; st,drdy-int-pin = <1>; @@ -30,7 +105,7 @@ * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>; */ }; - lsm303dlh@1e { + magnetometer@1e { /* Magnetometer */ compatible = "st,lsm303dlh-magn"; reg = <0x1e>; @@ -48,7 +123,7 @@ * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>; */ }; - lis331dl@1c { + accelerometer@1c { /* Accelerometer */ compatible = "st,lis331dl-accel"; st,drdy-int-pin = <1>; @@ -62,6 +137,72 @@ interrupts = <18 IRQ_TYPE_EDGE_RISING>, <19 IRQ_TYPE_EDGE_RISING>; }; + magnetometer@f { + /* Magnetometer */ + compatible = "asahi-kasei,ak8974"; + reg = <0x0f>; + avdd-supply = <&ab8500_ldo_aux1_reg>; + dvdd-supply = <&db8500_vsmps2_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&gyro_magn_tvk_mode>; + /* + * These interrupts cannot be used: the other component + * ST-Micro L3D4200D gyro that is connected to the same lines + * cannot set its DRDY line to open drain, so it cannot be + * shared with other peripherals. The should be defined for + * the falling edge if they could be wired together. + * + * interrupts-extended = + * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>, + * <&gpio0 31 IRQ_TYPE_EDGE_FALLING>; + */ + }; + gyroscope@68 { + /* Gyroscope */ + compatible = "st,l3g4200d-gyro"; + st,drdy-int-pin = <2>; + reg = <0x68>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + vddio-supply = <&db8500_vsmps2_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&gyro_magn_tvk_mode>; + interrupts-extended = + <&gpio1 0 IRQ_TYPE_EDGE_RISING>, + <&gpio0 31 IRQ_TYPE_EDGE_RISING>; + }; + pressure@5c { + /* Barometer/pressure sensor */ + compatible = "st,lps001wp-press"; + reg = <0x5c>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + vddio-supply = <&db8500_vsmps2_reg>; + }; + }; + i2c@80110000 { + synaptics@4b { + /* Synaptics RMI4 TM1217 touchscreen */ + compatible = "syna,rmi4-i2c"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x4b>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + vddio-supply = <&db8500_vsmps2_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&synaptics_tvk_mode>; + interrupt-parent = <&gpio2>; + interrupts = <20 IRQ_TYPE_EDGE_FALLING>; + + rmi4-f01@1 { + reg = <0x1>; + syna,nosleep = <1>; + }; + rmi4-f11@11 { + reg = <0x11>; + syna,sensor-type = <1>; + /* This is a landscape display */ + touchscreen-swapped-x-y; + }; + }; }; mcde@a0350000 { status = "okay"; @@ -75,5 +216,68 @@ }; }; }; + pinctrl { + prox { + prox_tvk_mode: prox_tvk { + tvk_cfg { + pins = "GPIO217_AH12"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + hall { + hall_tvk_mode: hall_tvk { + tvk_cfg { + pins = "GPIO145_C13"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + tc35893 { + /* IRQ from the TC35893 */ + tc35893_tvk_mode: tc35893_tvk { + tvk_cfg { + pins = "GPIO218_AH11"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + accelerometer { + accel_tvk_mode: accel_tvk { + /* Accelerometer interrupt lines 1 & 2 */ + tvk_cfg { + pins = "GPIO82_C1", "GPIO83_D3"; + ste,config = <&gpio_in_pd>; + }; + }; + }; + gyroscope { + /* + * These lines are shared between Gyroscope l3g400dh + * and AK8974 magnetometer. + */ + gyro_magn_tvk_mode: gyro_magn_tvk { + /* GPIO 31 used for INT pull down the line */ + tvk_cfg1 { + pins = "GPIO31_V3"; + ste,config = <&gpio_in_pd>; + }; + /* GPIO 32 used for DRDY, pull this down */ + tvk_cfg2 { + pins = "GPIO32_V2"; + ste,config = <&gpio_in_pd>; + }; + }; + }; + synaptics { + synaptics_tvk_mode: synaptics_tvk { + /* Touchscreen uses GPIO 84 */ + tvk_cfg1 { + pins = "GPIO84_C2"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi index cb3677f0a1cb..70f058352efc 100644 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi @@ -1,44 +1,152 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Device Tree for the TVK1281618 R2 UIB + * Device Tree for the TVK1281618 R3 user interface board (UIB) + * also known as the "CYTTSP board" */ -#include "ste-href-tvk1281618.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/input/input.h> / { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&hall_tvk_mode>; + + button@145 { + /* Hall sensor */ + gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>; + linux,code = <0>; /* SW_LID */ + label = "HED54XXU11 Hall Effect Sensor"; + }; + }; + soc { - i2c@80128000 { - /* Marked: - * 129 - * M35 - * L3GD20 - */ - l3gd20@6a { - /* Gyroscope */ - compatible = "st,l3gd20"; + i2c@80004000 { + tc35893@44 { + compatible = "toshiba,tc35893"; + reg = <0x44>; + interrupt-parent = <&gpio2>; + interrupts = <0 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&tc35893_tvk_mode>; + + interrupt-controller; + #interrupt-cells = <1>; status = "disabled"; + + tc3589x_gpio { + compatible = "toshiba,tc3589x-gpio"; + interrupts = <0>; + + interrupt-controller; + #interrupt-cells = <2>; + gpio-controller; + #gpio-cells = <2>; + }; + tc3589x_keypad { + compatible = "toshiba,tc3589x-keypad"; + interrupts = <6>; + debounce-delay-ms = <4>; + keypad,num-columns = <8>; + keypad,num-rows = <8>; + linux,no-autorepeat; + wakeup-source; + linux,keymap = <MATRIX_KEY(3, 1, KEY_END)>, + <MATRIX_KEY(4, 1, KEY_HOME)>, + <MATRIX_KEY(6, 4, KEY_VOLUMEDOWN)>, + <MATRIX_KEY(4, 2, KEY_EMAIL)>, + <MATRIX_KEY(3, 3, KEY_RIGHT)>, + <MATRIX_KEY(2, 5, KEY_BACKSPACE)>, + <MATRIX_KEY(6, 7, KEY_MENU)>, + <MATRIX_KEY(5, 0, KEY_ENTER)>, + <MATRIX_KEY(4, 3, KEY_0)>, + <MATRIX_KEY(3, 4, KEY_DOT)>, + <MATRIX_KEY(5, 2, KEY_UP)>, + <MATRIX_KEY(3, 5, KEY_DOWN)>, + <MATRIX_KEY(4, 5, KEY_SEND)>, + <MATRIX_KEY(0, 5, KEY_BACK)>, + <MATRIX_KEY(6, 2, KEY_VOLUMEUP)>, + <MATRIX_KEY(1, 3, KEY_SPACE)>, + <MATRIX_KEY(7, 6, KEY_LEFT)>, + <MATRIX_KEY(5, 5, KEY_SEARCH)>; + }; + }; + }; + + i2c@80128000 { + accelerometer@19 { + compatible = "st,lsm303dlhc-accel"; st,drdy-int-pin = <1>; - drive-open-drain; - reg = <0x6a>; // 0x6a or 0x6b + reg = <0x19>; vdd-supply = <&ab8500_ldo_aux1_reg>; vddio-supply = <&db8500_vsmps2_reg>; + interrupt-parent = <&gpio2>; + interrupts = <18 IRQ_TYPE_EDGE_RISING>, + <19 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&accel_tvk_mode>; }; - /* - * Marked: - * 2122 - * C3H - * DQEEE - * LIS3DH? - */ - lis3dh@18 { - /* Accelerometer */ - compatible = "st,lis3dh-accel"; + magnetometer@1e { + compatible = "st,lsm303dlm-magn"; st,drdy-int-pin = <1>; - reg = <0x18>; + reg = <0x1e>; vdd-supply = <&ab8500_ldo_aux1_reg>; vddio-supply = <&db8500_vsmps2_reg>; + // This interrupt is not properly working with the driver + // interrupt-parent = <&gpio1>; + // interrupts = <0 IRQ_TYPE_EDGE_RISING>; pinctrl-names = "default"; - pinctrl-0 = <&accel_tvk_mode>; + pinctrl-0 = <&magn_tvk_mode>; + }; + gyroscope@68 { + /* Gyroscope */ + compatible = "st,l3g4200d-gyro"; + reg = <0x68>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + vddio-supply = <&db8500_vsmps2_reg>; + }; + pressure@5c { + /* Barometer/pressure sensor */ + compatible = "st,lps001wp-press"; + reg = <0x5c>; + vdd-supply = <&ab8500_ldo_aux1_reg>; + vddio-supply = <&db8500_vsmps2_reg>; + }; + }; + + spi@80111000 { + num-cs = <1>; + cs-gpios = <&gpio6 24 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&spi2_default_mode>; + status = "okay"; + + touchscreen@0 { + compatible = "cypress,cy8ctma340"; + /* + * Actually the max frequency is 6 MHz, but over 2 MHz the + * data rate needs to be restricted to max 2Mbps which the + * SPI framework cannot handle. + */ + spi-max-frequency = <2000000>; + reg = <0>; + interrupt-parent = <&gpio2>; + interrupts = <20 IRQ_TYPE_EDGE_FALLING>; + vcpin-supply = <&ab8500_ldo_aux1_reg>; + vdd-supply = <&db8500_vsmps2_reg>; + reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; + touchscreen-size-x = <480>; + touchscreen-size-y = <854>; + active-interval-ms = <0>; + touch-timeout-ms = <255>; + lowpower-interval-ms = <10>; + bootloader-key = /bits/ 8 <0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07>; + pinctrl-names = "default"; + pinctrl-0 = <&cyttsp_tvk_mode>; }; }; @@ -54,5 +162,57 @@ }; }; }; + + pinctrl { + hall { + hall_tvk_mode: hall_tvk { + tvk_cfg { + pins = "GPIO145_C13"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + tc35893 { + /* IRQ from the TC35893 */ + tc35893_tvk_mode: tc35893_tvk { + tvk_cfg { + pins = "GPIO64_F3"; + ste,config = <&gpio_in_pu>; + }; + }; + }; + accelerometer { + accel_tvk_mode: accel_tvk { + /* Accelerometer interrupt lines 1 & 2 */ + tvk_cfg { + pins = "GPIO82_C1", "GPIO83_D3"; + ste,config = <&gpio_in_pd>; + }; + }; + }; + magnetometer { + magn_tvk_mode: magn_tvk { + /* GPIO 32 used for DRDY, pull this down */ + tvk_cfg { + pins = "GPIO32_V2"; + ste,config = <&gpio_in_pd>; + }; + }; + }; + cyttsp { + cyttsp_tvk_mode: cyttsp_tvk { + /* Touchscreen uses GPIO84 for IRQ */ + tvk_cfg1 { + pins = "GPIO84_C2"; + ste,config = <&gpio_in_pu>; + }; + /* GPIO143 is reset */ + tvk_cfg2 { + pins = "GPIO143_D12"; + ste,config = <&gpio_out_hi>; + }; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi deleted file mode 100644 index e1dbfae22595..000000000000 --- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi +++ /dev/null @@ -1,218 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2012 ST-Ericsson AB - * - * Device Tree for the TVK1281618 family of UIBs - */ - -#include <dt-bindings/interrupt-controller/irq.h> - -/ { - gpio_keys { - compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; - vdd-supply = <&ab8500_ldo_aux1_reg>; - pinctrl-names = "default"; - pinctrl-0 = <&prox_tvk_mode>, <&hall_tvk_mode>; - - button@139 { - /* Proximity sensor */ - gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>; - linux,code = <11>; /* SW_FRONT_PROXIMITY */ - label = "SFH7741 Proximity Sensor"; - }; - button@145 { - /* Hall sensor */ - gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>; - linux,code = <0>; /* SW_LID */ - label = "HED54XXU11 Hall Effect Sensor"; - }; - }; - - soc { - i2c@80004000 { - tc35893@44 { - compatible = "toshiba,tc35893"; - reg = <0x44>; - interrupt-parent = <&gpio6>; - interrupts = <26 IRQ_TYPE_EDGE_RISING>; - pinctrl-names = "default"; - pinctrl-0 = <&tc35893_tvk_mode>; - - interrupt-controller; - #interrupt-cells = <1>; - - tc3589x_gpio { - compatible = "toshiba,tc3589x-gpio"; - interrupts = <0>; - - interrupt-controller; - #interrupt-cells = <2>; - gpio-controller; - #gpio-cells = <2>; - }; - tc3589x_keypad { - compatible = "toshiba,tc3589x-keypad"; - interrupts = <6>; - debounce-delay-ms = <4>; - keypad,num-columns = <8>; - keypad,num-rows = <8>; - linux,no-autorepeat; - wakeup-source; - linux,keymap = <0x0301006b - 0x04010066 - 0x06040072 - 0x040200d7 - 0x0303006a - 0x0205000e - 0x0607008b - 0x0500001c - 0x0403000b - 0x03040034 - 0x05020067 - 0x0305006c - 0x040500e7 - 0x0005009e - 0x06020073 - 0x01030039 - 0x07060069 - 0x050500d9>; - }; - }; - }; - /* Sensors mounted on all board variants */ - i2c@80128000 { - ak8974@f { - /* Magnetometer */ - compatible = "asahi-kasei,ak8974"; - reg = <0x0f>; - avdd-supply = <&ab8500_ldo_aux1_reg>; - dvdd-supply = <&db8500_vsmps2_reg>; - pinctrl-names = "default"; - pinctrl-0 = <&gyro_magn_tvk_mode>; - /* - * These interrupts cannot be used: the other component - * ST-Micro L3D4200D gyro that is connected to the same lines - * cannot set its DRDY line to open drain, so it cannot be - * shared with other peripherals. The should be defined for - * the falling edge if they could be wired together. - * - * interrupts-extended = - * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>, - * <&gpio0 31 IRQ_TYPE_EDGE_FALLING>; - */ - }; - l3g4200d@68 { - /* Gyroscope */ - compatible = "st,l3g4200d-gyro"; - st,drdy-int-pin = <2>; - reg = <0x68>; - vdd-supply = <&ab8500_ldo_aux1_reg>; - vddio-supply = <&db8500_vsmps2_reg>; - pinctrl-names = "default"; - pinctrl-0 = <&gyro_magn_tvk_mode>; - interrupts-extended = - <&gpio1 0 IRQ_TYPE_EDGE_RISING>, - <&gpio0 31 IRQ_TYPE_EDGE_RISING>; - }; - lsp001wm@5c { - /* Barometer/pressure sensor */ - compatible = "st,lps001wp-press"; - reg = <0x5c>; - vdd-supply = <&ab8500_ldo_aux1_reg>; - vddio-supply = <&db8500_vsmps2_reg>; - }; - }; - - i2c@80110000 { - synaptics@4b { - /* Synaptics RMI4 TM1217 touchscreen */ - compatible = "syna,rmi4-i2c"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0x4b>; - vdd-supply = <&ab8500_ldo_aux1_reg>; - vddio-supply = <&db8500_vsmps2_reg>; - pinctrl-names = "default"; - pinctrl-0 = <&synaptics_tvk_mode>; - interrupt-parent = <&gpio2>; - interrupts = <20 IRQ_TYPE_EDGE_FALLING>; - - rmi-f01@1 { - reg = <0x1>; - syna,nosleep = <1>; - }; - rmi-f11@11 { - reg = <0x11>; - touchscreen-inverted-x; - syna,sensor-type = <1>; - }; - }; - }; - - pinctrl { - /* Pull up this GPIO pin */ - tc35893 { - tc35893_tvk_mode: tc35893_tvk { - tvk_cfg { - pins = "GPIO218_AH11"; - ste,config = <&gpio_in_pu>; - }; - }; - }; - prox { - prox_tvk_mode: prox_tvk { - tvk_cfg { - pins = "GPIO217_AH12"; - ste,config = <&gpio_in_pu>; - }; - }; - }; - hall { - hall_tvk_mode: hall_tvk { - tvk_cfg { - pins = "GPIO145_C13"; - ste,config = <&gpio_in_pu>; - }; - }; - }; - accelerometer { - accel_tvk_mode: accel_tvk { - /* Accelerometer interrupt lines 1 & 2 */ - tvk_cfg { - pins = "GPIO82_C1", "GPIO83_D3"; - ste,config = <&gpio_in_pd>; - }; - }; - }; - gyroscope { - /* - * These lines are shared between Gyroscope l3g400dh - * and AK8974 magnetometer. - */ - gyro_magn_tvk_mode: gyro_magn_tvk { - /* GPIO 31 used for INT pull down the line */ - tvk_cfg1 { - pins = "GPIO31_V3"; - ste,config = <&gpio_in_pd>; - }; - /* GPIO 32 used for DRDY, pull this down */ - tvk_cfg2 { - pins = "GPIO32_V2"; - ste,config = <&gpio_in_pd>; - }; - }; - }; - synaptics { - synaptics_tvk_mode: synaptics_tvk { - /* Touchscreen uses GPIO 84 */ - tvk_cfg1 { - pins = "GPIO84_C2"; - ste,config = <&gpio_in_pu>; - }; - }; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/ste-href520-tvk.dts b/arch/arm/boot/dts/ste-href520-tvk.dts index a036a03f6718..4201547c5988 100644 --- a/arch/arm/boot/dts/ste-href520-tvk.dts +++ b/arch/arm/boot/dts/ste-href520-tvk.dts @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Device Tree for the HREF520 version with the TVK1281618 UIB + * Device Tree for the HREF520 version with the TVK1281618 R3 UIB */ /dts-v1/; @@ -9,7 +9,7 @@ #include "ste-href-tvk1281618-r3.dtsi" / { - model = "ST-Ericsson HREF520 and TVK1281618 UIB"; + model = "ST-Ericsson HREF520 and TVK1281618 R3 UIB"; compatible = "st-ericsson,href520", "st-ericsson,u8500"; diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts index 4e6e4439dcff..75506339a93c 100644 --- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts +++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts @@ -9,7 +9,7 @@ #include "ste-href-tvk1281618-r2.dtsi" / { - model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB"; + model = "ST-Ericsson HREF (pre-v60) and TVK1281618 R2 UIB"; compatible = "st-ericsson,mop500", "st-ericsson,u8500"; /* ST6G3244ME level translator for 1.8/2.9 V */ diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts index 9c2d2ee6d6d8..2db2f8be8b03 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts +++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts @@ -2,7 +2,7 @@ /* * Copyright 2012 ST-Ericsson AB * - * Device Tree for the HREF version 60 or later with the TVK1281618 UIB + * Device Tree for the HREF version 60 or later with the TVK1281618 R2 UIB */ /dts-v1/; @@ -11,7 +11,7 @@ #include "ste-href-tvk1281618-r2.dtsi" / { - model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB"; + model = "ST-Ericsson HREF (v60+) and TVK1281618 R2 UIB"; compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; /* ST6G3244ME level translator for 1.8/2.9 V */ diff --git a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts index 7411bfeda285..f24369873ce2 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts @@ -135,21 +135,22 @@ /* * This regulator is a GPIO line that drives the Broadcom WLAN - * line BT_VREG_EN high and enables the internal regulators - * inside the chip. + * line WL_REG_ON high and enables the internal regulators + * inside the chip. Unfortunatley it is erroneously named + * WLAN_RST_N on the schematic but it is not a reset line. * * The voltage specified here is only used to determine the OCR mask, * the for the SDIO connector, the chip is actually connected * directly to VBAT. */ - wl_bt_reg: regulator-gpio-wlan { + wl_reg: regulator-gpio-wlan { compatible = "regulator-fixed"; - regulator-name = "BT_VREG_EN"; + regulator-name = "WL_REG_ON"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; startup-delay-us = <100000>; - /* GPIO222 (BT_VREG_EN) */ - gpio = <&gpio6 30 GPIO_ACTIVE_HIGH>; + /* GPIO215 (WLAN_RST_N to WL_REG_ON) */ + gpio = <&gpio6 23 GPIO_ACTIVE_HIGH>; enable-active-high; pinctrl-names = "default"; pinctrl-0 = <&wlan_ldo_en_default>; @@ -390,11 +391,10 @@ pinctrl-1 = <&mc1_a_2_sleep>; /* * GPIO-controlled voltage enablement: this drives - * the BT_VREG_EN line high when we use this device. - * Represented as regulator to fill OCR mask and to - * be usable in parallel with the Bluetooth chip. + * the WL_REG_ON line high when we use this device. + * Represented as regulator to fill OCR mask. */ - vmmc-supply = <&wl_bt_reg>; + vmmc-supply = <&wl_reg>; #address-cells = <1>; #size-cells = <0>; @@ -408,9 +408,6 @@ interrupt-parent = <&gpio6>; interrupts = <24 IRQ_TYPE_EDGE_FALLING>; interrupt-names = "host-wake"; - /* GPIO215 WLAN_RST_N */ - /* FIXME: kernel does not use this assert/deassert */ - reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&wlan_default_mode>; }; @@ -440,15 +437,8 @@ bluetooth { compatible = "brcm,bcm4330-bt"; - /* - * We actually have shutdown-gpios, BT_VREG_EN on GPIO222, - * but since this GPIO is shared with the WLAN chip, we need - * to reference the regulator instead. The regulator - * framework will reference count the GPIO usage and - * make sure we can use the same GPIO for several supplies. - */ - // shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>; - vbat-supply = <&wl_bt_reg>; + /* GPIO222 rail BT_VREG_EN to BT_REG_ON */ + shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>; /* BT_WAKE on GPIO199 */ device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; /* BT_HOST_WAKE on GPIO97 */ @@ -759,9 +749,9 @@ /* GPIO that enables the WLAN internal LDO regulators */ wlan-ldo { wlan_ldo_en_default: wlan_ldo_default { - /* GPIO222 BT_VREG_ON */ + /* GPIO215 named WLAN_RST_N */ janice_cfg1 { - pins = "GPIO222_AJ9"; + pins = "GPIO215_AH13"; ste,config = <&gpio_out_lo>; }; }; @@ -875,11 +865,6 @@ }; wlan { wlan_default_mode: wlan_default { - /* GPIO215 used for RESET_N */ - janice_cfg1 { - pins = "GPIO215_AH13"; - ste,config = <&gpio_out_lo>; - }; /* GPIO216 for WL_HOST_WAKE */ janice_cfg2 { pins = "GPIO216_AG12"; @@ -889,14 +874,17 @@ }; bluetooth { bluetooth_default_mode: bluetooth_default { + /* GPIO199 BT_WAKE and GPIO222 BT_VREG_ON */ janice_cfg1 { - pins = "GPIO199_AH23"; + pins = "GPIO199_AH23", "GPIO222_AJ9"; ste,config = <&gpio_out_lo>; }; + /* GPIO97 BT_HOST_WAKE */ janice_cfg2 { pins = "GPIO97_D9"; ste,config = <&gpio_in_nopull>; }; + /* GPIO209 BT_RST_N */ janice_cfg3 { pins = "GPIO209_AG15"; ste,config = <&gpio_out_hi>; diff --git a/arch/arm/boot/dts/stm32h7-pinctrl.dtsi b/arch/arm/boot/dts/stm32h7-pinctrl.dtsi new file mode 100644 index 000000000000..aa1bc3e10a49 --- /dev/null +++ b/arch/arm/boot/dts/stm32h7-pinctrl.dtsi @@ -0,0 +1,275 @@ +/* + * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <dt-bindings/pinctrl/stm32-pinfunc.h> + +&pinctrl { + + i2c1_pins_a: i2c1-0 { + pins { + pinmux = <STM32_PINMUX('B', 6, AF4)>, /* I2C1_SCL */ + <STM32_PINMUX('B', 7, AF4)>; /* I2C1_SDA */ + bias-disable; + drive-open-drain; + slew-rate = <0>; + }; + }; + + ethernet_rmii: rmii-0 { + pins { + pinmux = <STM32_PINMUX('G', 11, AF11)>, + <STM32_PINMUX('G', 13, AF11)>, + <STM32_PINMUX('G', 12, AF11)>, + <STM32_PINMUX('C', 4, AF11)>, + <STM32_PINMUX('C', 5, AF11)>, + <STM32_PINMUX('A', 7, AF11)>, + <STM32_PINMUX('C', 1, AF11)>, + <STM32_PINMUX('A', 2, AF11)>, + <STM32_PINMUX('A', 1, AF11)>; + slew-rate = <2>; + }; + }; + + sdmmc1_b4_pins_a: sdmmc1-b4-0 { + pins { + pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */ + <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */ + <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */ + <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */ + <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */ + <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */ + slew-rate = <3>; + drive-push-pull; + bias-disable; + }; + }; + + sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 { + pins1 { + pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */ + <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */ + <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */ + <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */ + <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */ + slew-rate = <3>; + drive-push-pull; + bias-disable; + }; + pins2{ + pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */ + slew-rate = <3>; + drive-open-drain; + bias-disable; + }; + }; + + sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 { + pins { + pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */ + <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */ + <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */ + <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */ + <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */ + <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */ + }; + }; + + sdmmc1_dir_pins_a: sdmmc1-dir-0 { + pins1 { + pinmux = <STM32_PINMUX('C', 6, AF8)>, /* SDMMC1_D0DIR */ + <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */ + <STM32_PINMUX('B', 9, AF7)>; /* SDMMC1_CDIR */ + slew-rate = <3>; + drive-push-pull; + bias-pull-up; + }; + pins2{ + pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */ + bias-pull-up; + }; + }; + + sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 { + pins { + pinmux = <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC1_D0DIR */ + <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */ + <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */ + <STM32_PINMUX('B', 8, ANALOG)>; /* SDMMC1_CKIN */ + }; + }; + + sdmmc2_b4_pins_a: sdmmc2-b4-0 { + pins { + pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC1_D0 */ + <STM32_PINMUX('B', 15, AF9)>, /* SDMMC1_D1 */ + <STM32_PINMUX('B', 3, AF9)>, /* SDMMC1_D2 */ + <STM32_PINMUX('B', 4, AF9)>, /* SDMMC1_D3 */ + <STM32_PINMUX('D', 6, AF11)>, /* SDMMC1_CK */ + <STM32_PINMUX('D', 7, AF11)>; /* SDMMC1_CMD */ + slew-rate = <3>; + drive-push-pull; + bias-disable; + }; + }; + + sdmmc2_b4_od_pins_a: sdmmc2-b4-od-0 { + pins1 { + pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */ + <STM32_PINMUX('B', 15, AF9)>, /* SDMMC1_D1 */ + <STM32_PINMUX('B', 3, AF9)>, /* SDMMC1_D2 */ + <STM32_PINMUX('B', 4, AF9)>, /* SDMMC1_D3 */ + <STM32_PINMUX('D', 6, AF11)>; /* SDMMC1_CK */ + slew-rate = <3>; + drive-push-pull; + bias-disable; + }; + pins2{ + pinmux = <STM32_PINMUX('D', 7, AF11)>; /* SDMMC1_CMD */ + slew-rate = <3>; + drive-open-drain; + bias-disable; + }; + }; + + sdmmc2_b4_sleep_pins_a: sdmmc2-b4-sleep-0 { + pins { + pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC1_D0 */ + <STM32_PINMUX('B', 15, ANALOG)>, /* SDMMC1_D1 */ + <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC1_D2 */ + <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC1_D3 */ + <STM32_PINMUX('D', 6, ANALOG)>, /* SDMMC1_CK */ + <STM32_PINMUX('D', 7, ANALOG)>; /* SDMMC1_CMD */ + }; + }; + + spi1_pins: spi1-0 { + pins1 { + pinmux = <STM32_PINMUX('A', 5, AF5)>, + /* SPI1_CLK */ + <STM32_PINMUX('B', 5, AF5)>; + /* SPI1_MOSI */ + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + pins2 { + pinmux = <STM32_PINMUX('G', 9, AF5)>; + /* SPI1_MISO */ + bias-disable; + }; + }; + + uart4_pins: uart4-0 { + pins1 { + pinmux = <STM32_PINMUX('A', 0, AF8)>; /* UART4_TX */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = <STM32_PINMUX('I', 9, AF8)>; /* UART4_RX */ + bias-disable; + }; + }; + + usart1_pins: usart1-0 { + pins1 { + pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = <STM32_PINMUX('B', 15, AF4)>; /* USART1_RX */ + bias-disable; + }; + }; + + usart2_pins: usart2-0 { + pins1 { + pinmux = <STM32_PINMUX('D', 5, AF7)>; /* USART2_TX */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */ + bias-disable; + }; + }; + + usart3_pins: usart3-0 { + pins1 { + pinmux = <STM32_PINMUX('B', 10, AF7)>, /* USART3_TX */ + <STM32_PINMUX('D', 12, AF7)>; /* USART3_RTS_DE */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = <STM32_PINMUX('B', 11, AF7)>, /* USART3_RX */ + <STM32_PINMUX('D', 11, AF7)>; /* USART3_CTS_NSS */ + bias-disable; + }; + }; + + usbotg_hs_pins_a: usbotg-hs-0 { + pins { + pinmux = <STM32_PINMUX('H', 4, AF10)>, /* ULPI_NXT */ + <STM32_PINMUX('I', 11, AF10)>, /* ULPI_DIR> */ + <STM32_PINMUX('C', 0, AF10)>, /* ULPI_STP> */ + <STM32_PINMUX('A', 5, AF10)>, /* ULPI_CK> */ + <STM32_PINMUX('A', 3, AF10)>, /* ULPI_D0> */ + <STM32_PINMUX('B', 0, AF10)>, /* ULPI_D1> */ + <STM32_PINMUX('B', 1, AF10)>, /* ULPI_D2> */ + <STM32_PINMUX('B', 10, AF10)>, /* ULPI_D3> */ + <STM32_PINMUX('B', 11, AF10)>, /* ULPI_D4> */ + <STM32_PINMUX('B', 12, AF10)>, /* ULPI_D5> */ + <STM32_PINMUX('B', 13, AF10)>, /* ULPI_D6> */ + <STM32_PINMUX('B', 5, AF10)>; /* ULPI_D7> */ + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + }; +}; + diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi deleted file mode 100644 index fa5dcb6a5fdd..000000000000 --- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> - * - * This file is dual-licensed: you can use it either under the terms - * of the GPL or the X11 license, at your option. Note that this dual - * licensing only applies to this file, and not this project as a - * whole. - * - * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This file is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Or, alternatively, - * - * b) Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <dt-bindings/pinctrl/stm32-pinfunc.h> - -/ { - soc { - pin-controller { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,stm32h743-pinctrl"; - ranges = <0 0x58020000 0x3000>; - interrupt-parent = <&exti>; - st,syscfg = <&syscfg 0x8>; - pins-are-numbered; - - gpioa: gpio@58020000 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x0 0x400>; - clocks = <&rcc GPIOA_CK>; - st,bank-name = "GPIOA"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpiob: gpio@58020400 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x400 0x400>; - clocks = <&rcc GPIOB_CK>; - st,bank-name = "GPIOB"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpioc: gpio@58020800 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x800 0x400>; - clocks = <&rcc GPIOC_CK>; - st,bank-name = "GPIOC"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpiod: gpio@58020c00 { - gpio-controller; - #gpio-cells = <2>; - reg = <0xc00 0x400>; - clocks = <&rcc GPIOD_CK>; - st,bank-name = "GPIOD"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpioe: gpio@58021000 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x1000 0x400>; - clocks = <&rcc GPIOE_CK>; - st,bank-name = "GPIOE"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpiof: gpio@58021400 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x1400 0x400>; - clocks = <&rcc GPIOF_CK>; - st,bank-name = "GPIOF"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpiog: gpio@58021800 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x1800 0x400>; - clocks = <&rcc GPIOG_CK>; - st,bank-name = "GPIOG"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpioh: gpio@58021c00 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x1c00 0x400>; - clocks = <&rcc GPIOH_CK>; - st,bank-name = "GPIOH"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpioi: gpio@58022000 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x2000 0x400>; - clocks = <&rcc GPIOI_CK>; - st,bank-name = "GPIOI"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpioj: gpio@58022400 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x2400 0x400>; - clocks = <&rcc GPIOJ_CK>; - st,bank-name = "GPIOJ"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpiok: gpio@58022800 { - gpio-controller; - #gpio-cells = <2>; - reg = <0x2800 0x400>; - clocks = <&rcc GPIOK_CK>; - st,bank-name = "GPIOK"; - interrupt-controller; - #interrupt-cells = <2>; - }; - - i2c1_pins_a: i2c1-0 { - pins { - pinmux = <STM32_PINMUX('B', 6, AF4)>, /* I2C1_SCL */ - <STM32_PINMUX('B', 7, AF4)>; /* I2C1_SDA */ - bias-disable; - drive-open-drain; - slew-rate = <0>; - }; - }; - - ethernet_rmii: rmii-0 { - pins { - pinmux = <STM32_PINMUX('G', 11, AF11)>, - <STM32_PINMUX('G', 13, AF11)>, - <STM32_PINMUX('G', 12, AF11)>, - <STM32_PINMUX('C', 4, AF11)>, - <STM32_PINMUX('C', 5, AF11)>, - <STM32_PINMUX('A', 7, AF11)>, - <STM32_PINMUX('C', 1, AF11)>, - <STM32_PINMUX('A', 2, AF11)>, - <STM32_PINMUX('A', 1, AF11)>; - slew-rate = <2>; - }; - }; - - sdmmc1_b4_pins_a: sdmmc1-b4-0 { - pins { - pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */ - <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */ - <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */ - <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */ - <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */ - <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */ - slew-rate = <3>; - drive-push-pull; - bias-disable; - }; - }; - - sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 { - pins1 { - pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */ - <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */ - <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */ - <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */ - <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */ - slew-rate = <3>; - drive-push-pull; - bias-disable; - }; - pins2{ - pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */ - slew-rate = <3>; - drive-open-drain; - bias-disable; - }; - }; - - sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 { - pins { - pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */ - <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */ - <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */ - <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */ - <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */ - <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */ - }; - }; - - sdmmc1_dir_pins_a: sdmmc1-dir-0 { - pins1 { - pinmux = <STM32_PINMUX('C', 6, AF8)>, /* SDMMC1_D0DIR */ - <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */ - <STM32_PINMUX('B', 9, AF7)>; /* SDMMC1_CDIR */ - slew-rate = <3>; - drive-push-pull; - bias-pull-up; - }; - pins2{ - pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */ - bias-pull-up; - }; - }; - - sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 { - pins { - pinmux = <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC1_D0DIR */ - <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */ - <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */ - <STM32_PINMUX('B', 8, ANALOG)>; /* SDMMC1_CKIN */ - }; - }; - - usart1_pins: usart1-0 { - pins1 { - pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */ - bias-disable; - drive-push-pull; - slew-rate = <0>; - }; - pins2 { - pinmux = <STM32_PINMUX('B', 15, AF4)>; /* USART1_RX */ - bias-disable; - }; - }; - - usart2_pins: usart2-0 { - pins1 { - pinmux = <STM32_PINMUX('D', 5, AF7)>; /* USART2_TX */ - bias-disable; - drive-push-pull; - slew-rate = <0>; - }; - pins2 { - pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */ - bias-disable; - }; - }; - - usbotg_hs_pins_a: usbotg-hs-0 { - pins { - pinmux = <STM32_PINMUX('H', 4, AF10)>, /* ULPI_NXT */ - <STM32_PINMUX('I', 11, AF10)>, /* ULPI_DIR> */ - <STM32_PINMUX('C', 0, AF10)>, /* ULPI_STP> */ - <STM32_PINMUX('A', 5, AF10)>, /* ULPI_CK> */ - <STM32_PINMUX('A', 3, AF10)>, /* ULPI_D0> */ - <STM32_PINMUX('B', 0, AF10)>, /* ULPI_D1> */ - <STM32_PINMUX('B', 1, AF10)>, /* ULPI_D2> */ - <STM32_PINMUX('B', 10, AF10)>, /* ULPI_D3> */ - <STM32_PINMUX('B', 11, AF10)>, /* ULPI_D4> */ - <STM32_PINMUX('B', 12, AF10)>, /* ULPI_D5> */ - <STM32_PINMUX('B', 13, AF10)>, /* ULPI_D6> */ - <STM32_PINMUX('B', 5, AF10)>; /* ULPI_D7> */ - bias-disable; - drive-push-pull; - slew-rate = <2>; - }; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi index 4ebffb0a45a3..05ecdf9ff03a 100644 --- a/arch/arm/boot/dts/stm32h743.dtsi +++ b/arch/arm/boot/dts/stm32h743.dtsi @@ -135,6 +135,22 @@ clocks = <&rcc USART2_CK>; }; + usart3: serial@40004800 { + compatible = "st,stm32h7-uart"; + reg = <0x40004800 0x400>; + interrupts = <39>; + status = "disabled"; + clocks = <&rcc USART3_CK>; + }; + + uart4: serial@40004c00 { + compatible = "st,stm32h7-uart"; + reg = <0x40004c00 0x400>; + interrupts = <52>; + status = "disabled"; + clocks = <&rcc UART4_CK>; + }; + i2c1: i2c@40005400 { compatible = "st,stm32f7-i2c"; #address-cells = <1>; @@ -159,7 +175,7 @@ status = "disabled"; }; - i2c3: i2c@40005C00 { + i2c3: i2c@40005c00 { compatible = "st,stm32f7-i2c"; #address-cells = <1>; #size-cells = <0>; @@ -368,6 +384,21 @@ max-frequency = <120000000>; }; + sdmmc2: mmc@48022400 { + compatible = "arm,pl18x", "arm,primecell"; + arm,primecell-periphid = <0x10153180>; + reg = <0x48022400 0x400>; + interrupts = <124>; + interrupt-names = "cmd_irq"; + clocks = <&rcc SDMMC2_CK>; + clock-names = "apb_pclk"; + resets = <&rcc STM32H7_AHB2_RESET(SDMMC2)>; + cap-sd-highspeed; + cap-mmc-highspeed; + max-frequency = <120000000>; + status = "disabled"; + }; + exti: interrupt-controller@58000000 { compatible = "st,stm32h7-exti"; interrupt-controller; @@ -392,7 +423,7 @@ status = "disabled"; }; - i2c4: i2c@58001C00 { + i2c4: i2c@58001c00 { compatible = "st,stm32f7-i2c"; #address-cells = <1>; #size-cells = <0>; @@ -555,6 +586,148 @@ snps,pbl = <8>; status = "disabled"; }; + + pinctrl: pin-controller@58020000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "st,stm32h743-pinctrl"; + ranges = <0 0x58020000 0x3000>; + interrupt-parent = <&exti>; + st,syscfg = <&syscfg 0x8>; + pins-are-numbered; + + gpioa: gpio@58020000 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x0 0x400>; + clocks = <&rcc GPIOA_CK>; + st,bank-name = "GPIOA"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 0 16>; + }; + + gpiob: gpio@58020400 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x400 0x400>; + clocks = <&rcc GPIOB_CK>; + st,bank-name = "GPIOB"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 16 16>; + }; + + gpioc: gpio@58020800 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x800 0x400>; + clocks = <&rcc GPIOC_CK>; + st,bank-name = "GPIOC"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 32 16>; + }; + + gpiod: gpio@58020c00 { + gpio-controller; + #gpio-cells = <2>; + reg = <0xc00 0x400>; + clocks = <&rcc GPIOD_CK>; + st,bank-name = "GPIOD"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 48 16>; + }; + + gpioe: gpio@58021000 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x1000 0x400>; + clocks = <&rcc GPIOE_CK>; + st,bank-name = "GPIOE"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 64 16>; + }; + + gpiof: gpio@58021400 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x1400 0x400>; + clocks = <&rcc GPIOF_CK>; + st,bank-name = "GPIOF"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 80 16>; + }; + + gpiog: gpio@58021800 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x1800 0x400>; + clocks = <&rcc GPIOG_CK>; + st,bank-name = "GPIOG"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 96 16>; + }; + + gpioh: gpio@58021c00 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x1c00 0x400>; + clocks = <&rcc GPIOH_CK>; + st,bank-name = "GPIOH"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 112 16>; + }; + + gpioi: gpio@58022000 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x2000 0x400>; + clocks = <&rcc GPIOI_CK>; + st,bank-name = "GPIOI"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 128 16>; + }; + + gpioj: gpio@58022400 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x2400 0x400>; + clocks = <&rcc GPIOJ_CK>; + st,bank-name = "GPIOJ"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <16>; + gpio-ranges = <&pinctrl 0 144 16>; + }; + + gpiok: gpio@58022800 { + gpio-controller; + #gpio-cells = <2>; + reg = <0x2800 0x400>; + clocks = <&rcc GPIOK_CK>; + st,bank-name = "GPIOK"; + interrupt-controller; + #interrupt-cells = <2>; + ngpios = <8>; + gpio-ranges = <&pinctrl 0 160 8>; + }; + }; }; }; diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts index e446d311c520..59e01ce10318 100644 --- a/arch/arm/boot/dts/stm32h743i-disco.dts +++ b/arch/arm/boot/dts/stm32h743i-disco.dts @@ -42,7 +42,7 @@ /dts-v1/; #include "stm32h743.dtsi" -#include "stm32h743-pinctrl.dtsi" +#include "stm32h7-pinctrl.dtsi" / { model = "STMicroelectronics STM32H743i-Discovery board"; diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts index 8f398178f5e5..38cc7faf6884 100644 --- a/arch/arm/boot/dts/stm32h743i-eval.dts +++ b/arch/arm/boot/dts/stm32h743i-eval.dts @@ -42,7 +42,7 @@ /dts-v1/; #include "stm32h743.dtsi" -#include "stm32h743-pinctrl.dtsi" +#include "stm32h7-pinctrl.dtsi" / { model = "STMicroelectronics STM32H743i-EVAL board"; diff --git a/arch/arm/boot/dts/stm32h750.dtsi b/arch/arm/boot/dts/stm32h750.dtsi new file mode 100644 index 000000000000..41e3b1e3a874 --- /dev/null +++ b/arch/arm/boot/dts/stm32h750.dtsi @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright (C) STMicroelectronics 2021 - All Rights Reserved */ + +#include "stm32h743.dtsi" + + diff --git a/arch/arm/boot/dts/stm32h750i-art-pi.dts b/arch/arm/boot/dts/stm32h750i-art-pi.dts new file mode 100644 index 000000000000..9bb73bb61901 --- /dev/null +++ b/arch/arm/boot/dts/stm32h750i-art-pi.dts @@ -0,0 +1,229 @@ +/* + * Copyright 2021 - Dillon Min <dillon.minfei@gmail.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * For art-pi board resources, you can refer to link: + * https://art-pi.gitee.io/website/ + */ + +/dts-v1/; +#include "stm32h750.dtsi" +#include "stm32h7-pinctrl.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "RT-Thread STM32H750i-ART-PI board"; + compatible = "st,stm32h750i-art-pi", "st,stm32h750"; + + chosen { + bootargs = "root=/dev/ram"; + stdout-path = "serial0:2000000n8"; + }; + + memory@c0000000 { + device_type = "memory"; + reg = <0xc0000000 0x2000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + linux,cma { + compatible = "shared-dma-pool"; + no-map; + size = <0x100000>; + linux,dma-default; + }; + }; + + aliases { + serial0 = &uart4; + serial1 = &usart3; + }; + + leds { + compatible = "gpio-leds"; + led-red { + gpios = <&gpioi 8 0>; + }; + led-green { + gpios = <&gpioc 15 0>; + linux,default-trigger = "heartbeat"; + }; + }; + + v3v3: regulator-v3v3 { + compatible = "regulator-fixed"; + regulator-name = "v3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + wlan_pwr: regulator-wlan { + compatible = "regulator-fixed"; + + regulator-name = "wl-reg"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpios = <&gpioc 13 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&clk_hse { + clock-frequency = <25000000>; +}; + +&dma1 { + status = "okay"; +}; + +&dma2 { + status = "okay"; +}; + +&mac { + status = "disabled"; + pinctrl-0 = <ðernet_rmii>; + pinctrl-names = "default"; + phy-mode = "rmii"; + phy-handle = <&phy0>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&sdmmc1 { + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc1_b4_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; + broken-cd; + st,neg-edge; + bus-width = <4>; + vmmc-supply = <&v3v3>; + status = "okay"; +}; + +&sdmmc2 { + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc2_b4_pins_a>; + pinctrl-1 = <&sdmmc2_b4_od_pins_a>; + pinctrl-2 = <&sdmmc2_b4_sleep_pins_a>; + broken-cd; + non-removable; + st,neg-edge; + bus-width = <4>; + vmmc-supply = <&wlan_pwr>; + status = "okay"; + + #address-cells = <1>; + #size-cells = <0>; + brcmf: bcrmf@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +&spi1 { + status = "okay"; + pinctrl-0 = <&spi1_pins>; + pinctrl-names = "default"; + cs-gpios = <&gpioa 4 GPIO_ACTIVE_LOW>; + dmas = <&dmamux1 37 0x400 0x05>, + <&dmamux1 38 0x400 0x05>; + dma-names = "rx", "tx"; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "winbond,w25q128", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <80000000>; + + partition@0 { + label = "root filesystem"; + reg = <0 0x1000000>; + }; + }; +}; + +&usart2 { + pinctrl-0 = <&usart2_pins>; + pinctrl-names = "default"; + status = "disabled"; +}; + +&usart3 { + pinctrl-names = "default"; + pinctrl-0 = <&usart3_pins>; + dmas = <&dmamux1 45 0x400 0x05>, + <&dmamux1 46 0x400 0x05>; + dma-names = "rx", "tx"; + st,hw-flow-ctrl; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + host-wakeup-gpios = <&gpioc 0 GPIO_ACTIVE_HIGH>; + device-wakeup-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpioi 11 GPIO_ACTIVE_HIGH>; + max-speed = <115200>; + }; +}; + +&uart4 { + pinctrl-0 = <&uart4_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + + diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi index 7b4249ed1983..060baa8b7e9d 100644 --- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi @@ -1891,10 +1891,15 @@ usart2_idle_pins_c: usart2-idle-2 { pins1 { pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */ - <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */ <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */ }; pins2 { + pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */ + bias-disable; + drive-push-pull; + slew-rate = <3>; + }; + pins3 { pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */ bias-disable; }; @@ -1940,10 +1945,15 @@ usart3_idle_pins_b: usart3-idle-1 { pins1 { pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */ - <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */ <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */ }; pins2 { + pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins3 { pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */ bias-disable; }; @@ -1976,10 +1986,15 @@ usart3_idle_pins_c: usart3-idle-2 { pins1 { pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */ - <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */ <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */ }; pins2 { + pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins3 { pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */ bias-disable; }; diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi index 4b8031782555..fcd3230c469b 100644 --- a/arch/arm/boot/dts/stm32mp151.dtsi +++ b/arch/arm/boot/dts/stm32mp151.dtsi @@ -452,32 +452,36 @@ usart2: serial@4000e000 { compatible = "st,stm32h7-uart"; reg = <0x4000e000 0x400>; - interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc USART2_K>; + wakeup-source; status = "disabled"; }; usart3: serial@4000f000 { compatible = "st,stm32h7-uart"; reg = <0x4000f000 0x400>; - interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 28 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc USART3_K>; + wakeup-source; status = "disabled"; }; uart4: serial@40010000 { compatible = "st,stm32h7-uart"; reg = <0x40010000 0x400>; - interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 30 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc UART4_K>; + wakeup-source; status = "disabled"; }; uart5: serial@40011000 { compatible = "st,stm32h7-uart"; reg = <0x40011000 0x400>; - interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 31 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc UART5_K>; + wakeup-source; status = "disabled"; }; @@ -493,6 +497,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x1>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; @@ -508,6 +513,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x2>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; @@ -523,6 +529,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x4>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; @@ -538,6 +545,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x10>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; @@ -577,16 +585,18 @@ uart7: serial@40018000 { compatible = "st,stm32h7-uart"; reg = <0x40018000 0x400>; - interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 32 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc UART7_K>; + wakeup-source; status = "disabled"; }; uart8: serial@40019000 { compatible = "st,stm32h7-uart"; reg = <0x40019000 0x400>; - interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 33 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc UART8_K>; + wakeup-source; status = "disabled"; }; @@ -665,8 +675,9 @@ usart6: serial@44003000 { compatible = "st,stm32h7-uart"; reg = <0x44003000 0x400>; - interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 29 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc USART6_K>; + wakeup-source; status = "disabled"; }; @@ -1421,11 +1432,13 @@ "mac-clk-tx", "mac-clk-rx", "eth-ck", + "ptp_ref", "ethstp"; clocks = <&rcc ETHMAC>, <&rcc ETHTX>, <&rcc ETHRX>, <&rcc ETHCK_K>, + <&rcc ETHPTP_K>, <&rcc ETHSTP>; st,syscon = <&syscfg 0x4>; snps,mixed-burst; @@ -1505,8 +1518,9 @@ usart1: serial@5c000000 { compatible = "st,stm32h7-uart"; reg = <0x5c000000 0x400>; - interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc USART1_K>; + wakeup-source; status = "disabled"; }; @@ -1536,6 +1550,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x8>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; @@ -1573,6 +1588,7 @@ #size-cells = <0>; st,syscfg-fmp = <&syscfg 0x4 0x20>; wakeup-source; + i2c-analog-filter; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts b/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts index 02a39132958e..b4e504f026ce 100644 --- a/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts +++ b/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts @@ -20,6 +20,10 @@ "st,stm32mp153"; }; +&cryp1 { + status = "okay"; +}; + &m_can1 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&m_can1_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts new file mode 100644 index 000000000000..d3058a036c74 --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/dts-v1/; +#include "stm32mp157.dtsi" +#include "stm32mp157a-icore-stm32mp1.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "Engicam i.Core STM32MP1 C.TOUCH 2.0"; + compatible = "engicam,icore-stm32mp1-ctouch2", + "engicam,icore-stm32mp1", "st,stm32mp157"; + + aliases { + serial0 = &uart4; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&sdmmc1 { + bus-width = <4>; + disable-wp; + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc1_b4_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; + st,neg-edge; + vmmc-supply = <&v3v3>; + status = "okay"; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts new file mode 100644 index 000000000000..ec9f1d1cd50f --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/dts-v1/; +#include "stm32mp157.dtsi" +#include "stm32mp157a-icore-stm32mp1.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "Engicam i.Core STM32MP1 EDIMM2.2 Starter Kit"; + compatible = "engicam,icore-stm32mp1-edimm2.2", + "engicam,icore-stm32mp1", "st,stm32mp157"; + + aliases { + serial0 = &uart4; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&sdmmc1 { + bus-width = <4>; + disable-wp; + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc1_b4_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; + st,neg-edge; + vmmc-supply = <&v3v3>; + status = "okay"; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi new file mode 100644 index 000000000000..01166ccacf2b --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/ { + compatible = "engicam,icore-stm32mp1", "st,stm32mp157"; + + memory@c0000000 { + device_type = "memory"; + reg = <0xc0000000 0x20000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mcuram2: mcuram2@10000000 { + compatible = "shared-dma-pool"; + reg = <0x10000000 0x40000>; + no-map; + }; + + vdev0vring0: vdev0vring0@10040000 { + compatible = "shared-dma-pool"; + reg = <0x10040000 0x1000>; + no-map; + }; + + vdev0vring1: vdev0vring1@10041000 { + compatible = "shared-dma-pool"; + reg = <0x10041000 0x1000>; + no-map; + }; + + vdev0buffer: vdev0buffer@10042000 { + compatible = "shared-dma-pool"; + reg = <0x10042000 0x4000>; + no-map; + }; + + mcuram: mcuram@30000000 { + compatible = "shared-dma-pool"; + reg = <0x30000000 0x40000>; + no-map; + }; + + retram: retram@38000000 { + compatible = "shared-dma-pool"; + reg = <0x38000000 0x10000>; + no-map; + }; + }; + + vddcore: regulator-vddcore { + compatible = "regulator-fixed"; + regulator-name = "vddcore"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-always-on; + }; + + vdd: regulator-vdd { + compatible = "regulator-fixed"; + regulator-name = "vdd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vdd_usb: regulator-vdd-usb { + compatible = "regulator-fixed"; + regulator-name = "vdd_usb"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vdda: regulator-vdda { + compatible = "regulator-fixed"; + regulator-name = "vdda"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vdd_ddr: regulator-vdd-ddr { + compatible = "regulator-fixed"; + regulator-name = "vdd_ddr"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + regulator-always-on; + }; + + vtt_ddr: regulator-vtt-ddr { + compatible = "regulator-fixed"; + regulator-name = "vtt_ddr"; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <675000>; + regulator-always-on; + vin-supply = <&vdd>; + }; + + vref_ddr: regulator-vref-ddr { + compatible = "regulator-fixed"; + regulator-name = "vref_ddr"; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <675000>; + regulator-always-on; + vin-supply = <&vdd>; + }; + + vdd_sd: regulator-vdd-sd { + compatible = "regulator-fixed"; + regulator-name = "vdd_sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + v3v3: regulator-v3v3 { + compatible = "regulator-fixed"; + regulator-name = "v3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + v2v8: regulator-v2v8 { + compatible = "regulator-fixed"; + regulator-name = "v2v8"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-always-on; + vin-supply = <&v3v3>; + }; + + v1v8: regulator-v1v8 { + compatible = "regulator-fixed"; + regulator-name = "v1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + vin-supply = <&v3v3>; + }; +}; + +&dts { + status = "okay"; +}; + +&i2c2 { + i2c-scl-falling-time-ns = <20>; + i2c-scl-rising-time-ns = <185>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&i2c2_pins_a>; + pinctrl-1 = <&i2c2_sleep_pins_a>; + status = "okay"; +}; + +&ipcc { + status = "okay"; +}; + +&iwdg2{ + timeout-sec = <32>; + status = "okay"; +}; + +&m4_rproc{ + memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>, + <&vdev0vring1>, <&vdev0buffer>; + mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>; + mbox-names = "vq0", "vq1", "shutdown"; + interrupt-parent = <&exti>; + interrupts = <68 1>; + status = "okay"; +}; + +&rng1 { + status = "okay"; +}; + +&rtc{ + status = "okay"; +}; + +&vrefbuf { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + vdda-supply = <&vdd>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts new file mode 100644 index 000000000000..674b2d330dc4 --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/dts-v1/; +#include "stm32mp157.dtsi" +#include "stm32mp157a-microgea-stm32mp1.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "Engicam MicroGEA STM32MP1 MicroDev 2.0 7\" Open Frame"; + compatible = "engicam,microgea-stm32mp1-microdev2.0-of7", + "engicam,microgea-stm32mp1", "st,stm32mp157"; + + aliases { + serial0 = &uart4; + serial1 = &uart8; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + backlight: backlight { + compatible = "gpio-backlight"; + gpios = <&gpiod 13 GPIO_ACTIVE_HIGH>; + default-on; + }; + + lcd_3v3: regulator-lcd-3v3 { + compatible = "regulator-fixed"; + regulator-name = "lcd_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpiof 10 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + power-supply = <&panel_pwr>; + }; + + panel_pwr: regulator-panel-pwr { + compatible = "regulator-fixed"; + regulator-name = "panel_pwr"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpiob 10 GPIO_ACTIVE_HIGH>; + regulator-always-on; + }; + + panel { + compatible = "auo,b101aw03"; + backlight = <&backlight>; + enable-gpios = <&gpiof 2 GPIO_ACTIVE_HIGH>; + power-supply = <&lcd_3v3>; + + port { + panel_in: endpoint { + remote-endpoint = <<dc_ep0_out>; + }; + }; + }; +}; + +&i2c2 { + i2c-scl-falling-time-ns = <20>; + i2c-scl-rising-time-ns = <185>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&i2c2_pins_a>; + pinctrl-1 = <&i2c2_sleep_pins_a>; + status = "okay"; +}; + +<dc { + pinctrl-names = "default"; + pinctrl-0 = <<dc_pins>; + status = "okay"; + + port { + ltdc_ep0_out: endpoint@0 { + reg = <0>; + remote-endpoint = <&panel_in>; + }; + }; +}; + +&pinctrl { + ltdc_pins: ltdc { + pins { + pinmux = <STM32_PINMUX('G', 10, AF14)>, /* LTDC_B2 */ + <STM32_PINMUX('H', 12, AF14)>, /* LTDC_R6 */ + <STM32_PINMUX('H', 11, AF14)>, /* LTDC_R5 */ + <STM32_PINMUX('D', 10, AF14)>, /* LTDC_B3 */ + <STM32_PINMUX('D', 9, AF14)>, /* LTDC_B0 */ + <STM32_PINMUX('E', 5, AF14)>, /* LTDC_G0 */ + <STM32_PINMUX('E', 6, AF14)>, /* LTDC_G1 */ + <STM32_PINMUX('E', 13, AF14)>, /* LTDC_DE */ + <STM32_PINMUX('E', 15, AF14)>, /* LTDC_R7 */ + <STM32_PINMUX('G', 7, AF14)>, /* LTDC_CLK */ + <STM32_PINMUX('G', 12, AF14)>, /* LTDC_B1 */ + <STM32_PINMUX('H', 2, AF14)>, /* LTDC_R0 */ + <STM32_PINMUX('H', 3, AF14)>, /* LTDC_R1 */ + <STM32_PINMUX('H', 8, AF14)>, /* LTDC_R2 */ + <STM32_PINMUX('H', 9, AF14)>, /* LTDC_R3 */ + <STM32_PINMUX('H', 10, AF14)>, /* LTDC_R4 */ + <STM32_PINMUX('H', 13, AF14)>, /* LTDC_G2 */ + <STM32_PINMUX('H', 14, AF14)>, /* LTDC_G3 */ + <STM32_PINMUX('H', 15, AF14)>, /* LTDC_G4 */ + <STM32_PINMUX('I', 0, AF14)>, /* LTDC_G5 */ + <STM32_PINMUX('I', 1, AF14)>, /* LTDC_G6 */ + <STM32_PINMUX('I', 2, AF14)>, /* LTDC_G7 */ + <STM32_PINMUX('I', 4, AF14)>, /* LTDC_B4 */ + <STM32_PINMUX('I', 5, AF14)>, /* LTDC_B5 */ + <STM32_PINMUX('B', 8, AF14)>, /* LTDC_B6 */ + <STM32_PINMUX('I', 7, AF14)>, /* LTDC_B7 */ + <STM32_PINMUX('I', 9, AF14)>, /* LTDC_VSYNC */ + <STM32_PINMUX('I', 10, AF14)>; /* LTDC_HSYNC */ + bias-disable; + drive-push-pull; + slew-rate = <3>; + }; + }; +}; + +&sdmmc1 { + bus-width = <4>; + disable-wp; + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc1_b4_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; + st,neg-edge; + vmmc-supply = <&vdd>; + status = "okay"; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + status = "okay"; +}; + +/* J31: RS323 */ +&uart8 { + pinctrl-names = "default"; + pinctrl-0 = <&uart8_pins_a>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts new file mode 100644 index 000000000000..7a75868164dc --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/dts-v1/; +#include "stm32mp157.dtsi" +#include "stm32mp157a-microgea-stm32mp1.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include <dt-bindings/gpio/gpio.h> + +/ { + model = "Engicam MicroGEA STM32MP1 MicroDev 2.0 Carrier Board"; + compatible = "engicam,microgea-stm32mp1-microdev2.0", + "engicam,microgea-stm32mp1", "st,stm32mp157"; + + aliases { + serial0 = &uart4; + serial1 = &uart8; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&sdmmc1 { + bus-width = <4>; + disable-wp; + pinctrl-names = "default", "opendrain", "sleep"; + pinctrl-0 = <&sdmmc1_b4_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; + st,neg-edge; + vmmc-supply = <&vdd>; + status = "okay"; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + status = "okay"; +}; + +/* J31: RS323 */ +&uart8 { + pinctrl-names = "default"; + pinctrl-0 = <&uart8_pins_a>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi new file mode 100644 index 000000000000..0b85175f151e --- /dev/null +++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (c) STMicroelectronics 2019 - All Rights Reserved + * Copyright (c) 2020 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/ { + compatible = "engicam,microgea-stm32mp1", "st,stm32mp157"; + + memory@c0000000 { + device_type = "memory"; + reg = <0xc0000000 0x10000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mcuram2: mcuram2@10000000 { + compatible = "shared-dma-pool"; + reg = <0x10000000 0x40000>; + no-map; + }; + + vdev0vring0: vdev0vring0@10040000 { + compatible = "shared-dma-pool"; + reg = <0x10040000 0x1000>; + no-map; + }; + + vdev0vring1: vdev0vring1@10041000 { + compatible = "shared-dma-pool"; + reg = <0x10041000 0x1000>; + no-map; + }; + + vdev0buffer: vdev0buffer@10042000 { + compatible = "shared-dma-pool"; + reg = <0x10042000 0x4000>; + no-map; + }; + + mcuram: mcuram@30000000 { + compatible = "shared-dma-pool"; + reg = <0x30000000 0x40000>; + no-map; + }; + + retram: retram@38000000 { + compatible = "shared-dma-pool"; + reg = <0x38000000 0x10000>; + no-map; + }; + }; + + vin: regulator-vin { + compatible = "regulator-fixed"; + regulator-name = "vin"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; + + vddcore: regulator-vddcore { + compatible = "regulator-fixed"; + regulator-name = "vddcore"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-always-on; + vin-supply = <&vin>; + }; + + vdd: regulator-vdd { + compatible = "regulator-fixed"; + regulator-name = "vdd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + vin-supply = <&vin>; + }; + + vddq_ddr: regulator-vddq-ddr { + compatible = "regulator-fixed"; + regulator-name = "vddq_ddr"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + regulator-always-on; + vin-supply = <&vin>; + }; +}; + +&dts { + status = "okay"; +}; + +&fmc { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&fmc_pins_a>; + pinctrl-1 = <&fmc_sleep_pins_a>; + status = "okay"; + + nand-controller@4,0 { + status = "okay"; + + nand@0 { + reg = <0>; + nand-on-flash-bbt; + #address-cells = <1>; + #size-cells = <1>; + }; + }; +}; + +&ipcc { + status = "okay"; +}; + +&iwdg2{ + timeout-sec = <32>; + status = "okay"; +}; + +&m4_rproc{ + memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>, + <&vdev0vring1>, <&vdev0buffer>; + mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>; + mbox-names = "vq0", "vq1", "shutdown"; + interrupt-parent = <&exti>; + interrupts = <68 1>; + status = "okay"; +}; + +&rng1 { + status = "okay"; +}; + +&rtc{ + status = "okay"; +}; + +&vrefbuf { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + vdda-supply = <&vdd>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts index d3b81382f97c..6dd8216c235e 100644 --- a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts +++ b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts @@ -20,6 +20,10 @@ "st,stm32mp157"; }; +&cryp1 { + status = "okay"; +}; + &m_can1 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&m_can1_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts index cfb8f8a0c82d..7067a860aaff 100644 --- a/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts +++ b/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts @@ -20,6 +20,10 @@ "st,stm32mp157"; }; +&cryp1 { + status = "okay"; +}; + &m_can1 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&m_can1_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi index fad23d6f69b8..fb45c5aa878d 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi @@ -43,15 +43,15 @@ &gpiod { gpio-line-names = "", "", "", "", - "", "", "", "", - "", "", "", "Out1", - "Out2", "", "", ""; + "", "", "DHCOM-B", "", + "", "", "", "DRC02-Out1", + "DRC02-Out2", "", "", ""; }; &gpioi { - gpio-line-names = "In1", "", "", "", - "", "", "", "", - "In2", "", "", "", + gpio-line-names = "DRC02-In1", "DHCOM-O", "DHCOM-H", "DHCOM-I", + "DHCOM-R", "DHCOM-M", "", "", + "DRC02-In2", "", "", "", "", "", "", ""; /* diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi index cd3a1798ca68..ba816ef8b9b2 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi @@ -57,22 +57,22 @@ &gpioc { gpio-line-names = "", "", "", "", - "", "", "In1", "", + "", "", "PicoITX-In1", "", "", "", "", "", "", "", "", ""; }; &gpiod { gpio-line-names = "", "", "", "", - "", "", "", "", - "", "", "", "Out1", - "Out2", "", "", ""; + "", "", "DHCOM-B", "", + "", "", "", "PicoITX-Out1", + "PicoITX-Out2", "", "", ""; }; &gpiog { - gpio-line-names = "In2", "", "", "", - "", "", "", "", + gpio-line-names = "PicoITX-In2", "", "", "", "", "", "", "", + "DHCOM-L", "", "", "", "", "", "", ""; }; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi index 2617815e42a6..272a1a67a9ad 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi @@ -92,6 +92,10 @@ }; }; +&crc1 { + status = "okay"; +}; + &dac { pinctrl-names = "default"; pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>; @@ -164,10 +168,70 @@ }; }; +&gpioa { + gpio-line-names = "", "", "", "", + "", "", "DHCOM-K", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpiob { + gpio-line-names = "", "", "", "", + "", "", "", "", + "DHCOM-Q", "", "", "", + "", "", "", ""; +}; + &gpioc { + gpio-line-names = "", "", "", "", + "", "", "DHCOM-E", "", + "", "", "", "", + "", "", "", ""; status = "okay"; }; +&gpiod { + gpio-line-names = "", "", "", "", + "", "", "DHCOM-B", "", + "", "", "", "DHCOM-F", + "DHCOM-D", "", "", ""; +}; + +&gpioe { + gpio-line-names = "", "", "", "", + "", "", "DHCOM-P", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpiof { + gpio-line-names = "", "", "", "DHCOM-A", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpiog { + gpio-line-names = "DHCOM-C", "", "", "", + "", "", "", "", + "DHCOM-L", "", "", "", + "", "", "", ""; +}; + +&gpioh { + gpio-line-names = "", "", "", "", + "", "", "", "DHCOM-N", + "DHCOM-J", "DHCOM-W", "DHCOM-V", "DHCOM-U", + "DHCOM-T", "", "DHCOM-S", ""; +}; + +&gpioi { + gpio-line-names = "DHCOM-G", "DHCOM-O", "DHCOM-H", "DHCOM-I", + "DHCOM-R", "DHCOM-M", "", "", + "", "", "", "", + "", "", "", ""; +}; + &i2c4 { pinctrl-names = "default"; pinctrl-0 = <&i2c4_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi index b09e87fe901a..64dca5b7f748 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi @@ -162,6 +162,41 @@ }; }; +&gpioa { + gpio-line-names = "", "", "", "", + "", "", "", "", + "", "", "", "AV96-K", + "AV96-I", "", "AV96-A", ""; +}; + +&gpiob { + gpio-line-names = "", "", "", "", + "", "AV96-J", "", "", + "", "", "", "AV96-B", + "", "AV96-L", "", ""; +}; + +&gpioc { + gpio-line-names = "", "", "", "AV96-C", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpiod { + gpio-line-names = "", "", "", "", + "", "", "", "", + "AV96-D", "", "", "", + "", "", "AV96-E", "AV96-F"; +}; + +&gpiof { + gpio-line-names = "", "", "", "", + "", "", "", "", + "", "", "", "", + "AV96-G", "AV96-H", "", ""; +}; + &i2c1 { /* X6 I2C1 */ pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins_b>; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi index 803eb8bc9c85..013ae369791d 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi @@ -21,6 +21,10 @@ }; }; +&crc1 { + status = "okay"; +}; + &dts { status = "okay"; }; diff --git a/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts b/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts new file mode 100644 index 000000000000..3628f12d2521 --- /dev/null +++ b/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020 Pascal Roeleven <dev@pascalroeleven.nl> + */ + +/dts-v1/; +#include "sun4i-a10.dtsi" +#include "sunxi-common-regulators.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/pwm/pwm.h> + +/ { + model = "Topwise A721"; + compatible = "topwise,a721", "allwinner,sun4i-a10"; + + aliases { + serial0 = &uart0; + }; + + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pwm 0 100000 PWM_POLARITY_INVERTED>; + power-supply = <®_vbat>; + enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */ + brightness-levels = <0 30 40 50 60 70 80 90 100>; + default-brightness-level = <8>; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + panel { + compatible = "starry,kr070pe2t"; + backlight = <&backlight>; + power-supply = <®_lcd_power>; + + port { + panel_input: endpoint { + remote-endpoint = <&tcon0_out_panel>; + }; + }; + }; + + reg_lcd_power: reg-lcd-power { + compatible = "regulator-fixed"; + regulator-name = "reg-lcd-power"; + gpio = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */ + enable-active-high; + }; + + reg_vbat: reg-vbat { + compatible = "regulator-fixed"; + regulator-name = "vbat"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + }; + +}; + +&codec { + status = "okay"; +}; + +&cpu0 { + cpu-supply = <®_dcdc2>; +}; + +&de { + status = "okay"; +}; + +&ehci0 { + status = "okay"; +}; + +&ehci1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + axp209: pmic@34 { + reg = <0x34>; + interrupts = <0>; + }; +}; + +#include "axp209.dtsi" + +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + +&i2c1 { + status = "okay"; + + accelerometer@4c { + compatible = "fsl,mma7660"; + reg = <0x4c>; + }; +}; + +&i2c2 { + status = "okay"; + + touchscreen@38 { + compatible = "edt,edt-ft5406"; + reg = <0x38>; + interrupt-parent = <&pio>; + interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>; + touchscreen-size-x = <800>; + touchscreen-size-y = <480>; + vcc-supply = <®_vcc3v3>; + }; +}; + +&lradc { + vref-supply = <®_ldo2>; + status = "okay"; + + button-571 { + label = "Volume Up"; + linux,code = <KEY_VOLUMEUP>; + channel = <0>; + voltage = <571428>; + }; + + button-761 { + label = "Volume Down"; + linux,code = <KEY_VOLUMEDOWN>; + channel = <0>; + voltage = <761904>; + }; +}; + +&mmc0 { + vmmc-supply = <®_vcc3v3>; + bus-width = <4>; + cd-gpios = <&pio 7 1 GPIO_ACTIVE_LOW>; /* PH01 */ + status = "okay"; +}; + +&ohci0 { + status = "okay"; +}; + +&ohci1 { + status = "okay"; +}; + +&otg_sram { + status = "okay"; +}; + +&pio { + vcc-pb-supply = <®_vcc3v3>; + vcc-pf-supply = <®_vcc3v3>; + vcc-ph-supply = <®_vcc3v3>; +}; + +&pwm { + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_pin>; + status = "okay"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1400000>; + regulator-name = "vdd-cpu"; +}; + +®_dcdc3 { + regulator-always-on; + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; + regulator-name = "vdd-int-dll"; +}; + +®_ldo1 { + regulator-name = "vdd-rtc"; +}; + +®_ldo2 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "avcc"; +}; + +®_usb0_vbus { + status = "okay"; +}; + +®_usb1_vbus { + status = "okay"; +}; + +®_usb2_vbus { + status = "okay"; +}; + +&tcon0_out { + tcon0_out_panel: endpoint@0 { + reg = <0>; + remote-endpoint = <&panel_input>; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pb_pins>; + status = "okay"; +}; + +&usb_otg { + dr_mode = "otg"; + status = "okay"; +}; + +&usb_power_supply { + status = "okay"; +}; + +&usbphy { + usb0_id_det-gpios = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */ + usb0_vbus_det-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ + usb0_vbus-supply = <®_usb0_vbus>; + usb1_vbus-supply = <®_usb1_vbus>; + usb2_vbus-supply = <®_usb2_vbus>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts index 486cec6f71e0..236ebfc06192 100644 --- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts +++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts @@ -227,7 +227,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31-m9.dts b/arch/arm/boot/dts/sun6i-a31-m9.dts index e4f3415e6108..7d2eaaf5c33e 100644 --- a/arch/arm/boot/dts/sun6i-a31-m9.dts +++ b/arch/arm/boot/dts/sun6i-a31-m9.dts @@ -116,7 +116,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts index 7bd4bdd66a76..83611434270c 100644 --- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts +++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts @@ -116,7 +116,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index a75033e85fcb..a31f9072bf79 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -611,6 +611,7 @@ pio: pinctrl@1c20800 { compatible = "allwinner,sun6i-a31-pinctrl"; reg = <0x01c20800 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, @@ -802,6 +803,7 @@ lradc: lradc@1c22800 { compatible = "allwinner,sun4i-a10-lradc-keys"; reg = <0x01c22800 0x100>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -1299,6 +1301,7 @@ #clock-cells = <1>; compatible = "allwinner,sun6i-a31-rtc"; reg = <0x01f00000 0x54>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clocks = <&osc32k>; @@ -1308,7 +1311,7 @@ r_intc: interrupt-controller@1f00c00 { compatible = "allwinner,sun6i-a31-r-intc"; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <3>; reg = <0x01f00c00 0x400>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; }; @@ -1383,6 +1386,7 @@ r_pio: pinctrl@1f02c00 { compatible = "allwinner,sun6i-a31-r-pinctrl"; reg = <0x01f02c00 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; clocks = <&apb0_gates 0>, <&osc24M>, <&rtc 0>; diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts index 66bc6ca77afb..b32b70ada7fd 100644 --- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts +++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts @@ -159,7 +159,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi index 7455c0db4a8a..227ad489731c 100644 --- a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi +++ b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi @@ -79,7 +79,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts index efb25b949f30..96554ab4f6d3 100644 --- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts +++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts @@ -149,7 +149,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; x-powers,drive-vbus-en; }; diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts index cadc45255d7b..0b61f5368d44 100644 --- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts +++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts @@ -99,7 +99,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi index 6bf3fbdd738f..f38d19c6be8c 100644 --- a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi @@ -80,7 +80,7 @@ compatible = "x-powers,axp221"; reg = <0x68>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; drivevbus-supply = <®_vcc5v0>; x-powers,drive-vbus-en; }; diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a42fac676b31..4461d5098b20 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -338,6 +338,7 @@ pio: pinctrl@1c20800 { /* compatible gets set in SoC specific dtsi file */ reg = <0x01c20800 0x400>; + interrupt-parent = <&r_intc>; /* interrupts get set in SoC specific dtsi file */ clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&rtc 0>; clock-names = "apb", "hosc", "losc"; @@ -473,6 +474,7 @@ lradc: lradc@1c22800 { compatible = "allwinner,sun4i-a10-lradc-keys"; reg = <0x01c22800 0x100>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -709,6 +711,7 @@ rtc: rtc@1f00000 { compatible = "allwinner,sun8i-a23-rtc"; reg = <0x01f00000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clock-output-names = "osc32k", "osc32k-out"; @@ -719,7 +722,7 @@ r_intc: interrupt-controller@1f00c00 { compatible = "allwinner,sun6i-a31-r-intc"; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <3>; reg = <0x01f00c00 0x400>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; }; @@ -805,6 +808,7 @@ r_pio: pinctrl@1f02c00 { compatible = "allwinner,sun8i-a23-r-pinctrl"; reg = <0x01f02c00 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&apb0_gates 0>, <&osc24M>, <&rtc 0>; clock-names = "apb", "hosc", "losc"; diff --git a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts index 8538514c8588..6fee8f133508 100644 --- a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts +++ b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts @@ -99,7 +99,7 @@ compatible = "x-powers,axp223"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; x-powers,drive-vbus-en; }; diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts index d54a067fc76e..0c82ff3c7cb4 100644 --- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts +++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts @@ -166,7 +166,7 @@ compatible = "x-powers,axp223"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; }; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts index 9c006fc18821..c31c97d16024 100644 --- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts +++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts @@ -122,7 +122,7 @@ compatible = "x-powers,axp818", "x-powers,axp813"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; swin-supply = <®_dcdc1>; }; @@ -142,7 +142,7 @@ ac100_rtc: rtc { compatible = "x-powers,ac100-rtc"; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; clocks = <&ac100_codec>; #clock-cells = <1>; clock-output-names = "cko1_rtc", diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index b60016a4429c..5a7e1bd5f825 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -203,7 +203,7 @@ compatible = "x-powers,axp813"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; fldoin-supply = <®_dcdc5>; swin-supply = <®_dcdc1>; @@ -225,7 +225,7 @@ ac100_rtc: rtc { compatible = "x-powers,ac100-rtc"; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; clocks = <&ac100_codec>; #clock-cells = <1>; clock-output-names = "cko1_rtc", diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index e26af7cf10e0..870993393fc2 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -239,7 +239,7 @@ compatible = "x-powers,axp818", "x-powers,axp813"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; swin-supply = <®_dcdc1>; x-powers,drive-vbus-en; @@ -260,7 +260,7 @@ ac100_rtc: rtc { compatible = "x-powers,ac100-rtc"; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; clocks = <&ac100_codec>; #clock-cells = <1>; clock-output-names = "cko1_rtc", diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 83b01b03e08e..7fe2a584ddf9 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -263,7 +263,7 @@ compatible = "x-powers,axp813"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; swin-supply = <®_dcdc1>; x-powers,drive-vbus-en; }; @@ -283,7 +283,7 @@ ac100_rtc: rtc { compatible = "x-powers,ac100-rtc"; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; clocks = <&ac100_codec>; #clock-cells = <1>; clock-output-names = "cko1_rtc", diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi index bd898b250e74..ac97eac91349 100644 --- a/arch/arm/boot/dts/sun8i-a83t.dtsi +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi @@ -708,6 +708,7 @@ pio: pinctrl@1c20800 { compatible = "allwinner,sun8i-a83t-pinctrl"; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; @@ -1111,7 +1112,7 @@ compatible = "allwinner,sun8i-a83t-r-intc", "allwinner,sun6i-a31-r-intc"; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <3>; reg = <0x01f00c00 0x400>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; }; @@ -1147,6 +1148,7 @@ r_lradc: lradc@1f03c00 { compatible = "allwinner,sun8i-a83t-r-lradc"; reg = <0x01f03c00 0x100>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -1154,6 +1156,7 @@ r_pio: pinctrl@1f02c00 { compatible = "allwinner,sun8i-a83t-r-pinctrl"; reg = <0x01f02c00 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&osc16Md512>; diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts index f3f7a2c912ab..8e8634ff2f9d 100644 --- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts +++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts @@ -26,6 +26,17 @@ stdout-path = "serial0:115200n8"; }; + connector { + compatible = "hdmi-connector"; + type = "c"; + + port { + hdmi_con_in: endpoint { + remote-endpoint = <&hdmi_out_con>; + }; + }; + }; + leds { compatible = "gpio-leds"; @@ -103,10 +114,24 @@ cpu-supply = <®_vdd_cpux>; }; +&de { + status = "okay"; +}; + &ehci0 { status = "okay"; }; +&hdmi { + status = "okay"; +}; + +&hdmi_out { + hdmi_out_con: endpoint { + remote-endpoint = <&hdmi_con_in>; + }; +}; + &mmc0 { vmmc-supply = <®_vcc3v3>; bus-width = <4>; diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 62b5280ec093..f0e591e1c771 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -111,6 +111,17 @@ #sound-dai-cells = <0>; compatible = "linux,spdif-dit"; }; + + r-gpio-keys { + compatible = "gpio-keys"; + + power { + label = "power"; + linux,code = <KEY_POWER>; + gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>; + wakeup-source; + }; + }; }; &de { diff --git a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts index 293016d081cd..bf5b5e2f6168 100644 --- a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts +++ b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts @@ -164,7 +164,7 @@ compatible = "x-powers,axp223"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; x-powers,drive-vbus-en; }; diff --git a/arch/arm/boot/dts/sun8i-r16-parrot.dts b/arch/arm/boot/dts/sun8i-r16-parrot.dts index 2be1b76fe2f6..95543a9c2118 100644 --- a/arch/arm/boot/dts/sun8i-r16-parrot.dts +++ b/arch/arm/boot/dts/sun8i-r16-parrot.dts @@ -165,7 +165,7 @@ compatible = "x-powers,axp223"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; drivevbus-supply = <®_vcc5v0>; x-powers,drive-vbus-en; }; diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi index 797d61cff11e..872d56caa9ce 100644 --- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi @@ -94,7 +94,7 @@ compatible = "x-powers,axp223"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; eldoin-supply = <®_dcdc1>; drivevbus-supply = <®_vcc5v0>; x-powers,drive-vbus-en; diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index 9be13378d4df..c7428df9469e 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -395,6 +395,7 @@ pio: pinctrl@1c20800 { /* compatible is in per SoC .dtsi file */ reg = <0x01c20800 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&rtc 0>; @@ -852,6 +853,7 @@ rtc: rtc@1f00000 { /* compatible is in per SoC .dtsi file */ reg = <0x01f00000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clock-output-names = "osc32k", "osc32k-out", "iosc"; @@ -859,6 +861,15 @@ #clock-cells = <1>; }; + r_intc: interrupt-controller@1f00c00 { + compatible = "allwinner,sun8i-h3-r-intc", + "allwinner,sun6i-a31-r-intc"; + interrupt-controller; + #interrupt-cells = <3>; + reg = <0x01f00c00 0x400>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; + }; + r_ccu: clock@1f01400 { compatible = "allwinner,sun8i-h3-r-ccu"; reg = <0x01f01400 0x100>; @@ -900,6 +911,7 @@ r_pio: pinctrl@1f02c00 { compatible = "allwinner,sun8i-h3-r-pinctrl"; reg = <0x01f02c00 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&rtc 0>; clock-names = "apb", "hosc", "losc"; diff --git a/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi index 49d9420a3289..781ac8601030 100644 --- a/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi +++ b/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi @@ -128,24 +128,28 @@ opp-microvolt = <800000 800000 1150000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0003>; + opp-suspend; }; opp@204000000,950 { opp-microvolt = <950000 950000 1150000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0008>; + opp-suspend; }; opp@204000000,1050 { opp-microvolt = <1050000 1050000 1150000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0010>; + opp-suspend; }; opp@204000000,1110 { opp-microvolt = <1110000 1110000 1150000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0004>; + opp-suspend; }; opp@264000000,800 { @@ -360,6 +364,7 @@ opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x001F>; opp-peak-kBps = <3264000>; + opp-suspend; }; opp@264000000 { diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts index d3b99535d755..2298fc034183 100644 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts @@ -448,8 +448,10 @@ reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>; - avdd-supply = <&vdd_3v3_sys>; + vdda-supply = <&vdd_3v3_sys>; vdd-supply = <&vdd_3v3_sys>; + + atmel,wakeup-method = <1>; }; gyroscope@68 { @@ -575,7 +577,7 @@ vdd_core: sm0 { regulator-name = "vdd_sm0,vdd_core"; - regulator-min-microvolt = <1200000>; + regulator-min-microvolt = <950000>; regulator-max-microvolt = <1300000>; regulator-coupled-with = <&rtc_vdd &vdd_cpu>; regulator-coupled-max-spread = <170000 550000>; @@ -616,7 +618,7 @@ rtc_vdd: ldo2 { regulator-name = "vdd_ldo2,vdd_rtc"; - regulator-min-microvolt = <1200000>; + regulator-min-microvolt = <950000>; regulator-max-microvolt = <1300000>; regulator-coupled-with = <&vdd_core &vdd_cpu>; regulator-coupled-max-spread = <170000 550000>; @@ -838,9 +840,10 @@ #cooling-cells = <2>; }; - cpu@1 { + cpu1: cpu@1 { cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; }; @@ -1055,7 +1058,7 @@ trip0: cpu-alert0 { /* start throttling at 50C */ temperature = <50000>; - hysteresis = <3000>; + hysteresis = <200>; type = "passive"; }; @@ -1070,7 +1073,8 @@ cooling-maps { map0 { trip = <&trip0>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; diff --git a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi index 702a635e88e7..135de316383b 100644 --- a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi +++ b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi @@ -9,12 +9,14 @@ clock-latency-ns = <400000>; opp-supported-hw = <0x0F 0x0003>; opp-hz = /bits/ 64 <216000000>; + opp-suspend; }; opp@216000000,800 { clock-latency-ns = <400000>; opp-supported-hw = <0x0F 0x0004>; opp-hz = /bits/ 64 <216000000>; + opp-suspend; }; opp@312000000,750 { diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index 7e49112cd9a1..940a9f31cd86 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -387,10 +387,10 @@ core_vdd_reg: sm0 { regulator-name = "+1.2vs_sm0,vdd_core"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1225000>; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1300000>; regulator-coupled-with = <&rtc_vdd_reg &cpu_vdd_reg>; - regulator-coupled-max-spread = <170000 450000>; + regulator-coupled-max-spread = <170000 550000>; regulator-always-on; nvidia,tegra-core-regulator; @@ -401,7 +401,7 @@ regulator-min-microvolt = <750000>; regulator-max-microvolt = <1100000>; regulator-coupled-with = <&core_vdd_reg &rtc_vdd_reg>; - regulator-coupled-max-spread = <450000 450000>; + regulator-coupled-max-spread = <550000 550000>; regulator-always-on; nvidia,tegra-cpu-regulator; @@ -425,10 +425,10 @@ rtc_vdd_reg: ldo2 { regulator-name = "+1.2vs_ldo2,vdd_rtc"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1225000>; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1300000>; regulator-coupled-with = <&core_vdd_reg &cpu_vdd_reg>; - regulator-coupled-max-spread = <170000 450000>; + regulator-coupled-max-spread = <170000 550000>; regulator-always-on; nvidia,tegra-rtc-regulator; diff --git a/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi index b84afecea154..ef3ad2e5f270 100644 --- a/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi +++ b/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi @@ -68,6 +68,7 @@ opp-microvolt = <1000000 1000000 1300000>; opp-hz = /bits/ 64 <216000000>; opp-supported-hw = <0x000F>; + opp-suspend; }; opp@300000000 { diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts index 055334ae3d28..99a356c1ccec 100644 --- a/arch/arm/boot/dts/tegra20-ventana.dts +++ b/arch/arm/boot/dts/tegra20-ventana.dts @@ -2,8 +2,10 @@ /dts-v1/; #include <dt-bindings/input/input.h> +#include <dt-bindings/thermal/thermal.h> #include "tegra20.dtsi" #include "tegra20-cpu-opp.dtsi" +#include "tegra20-cpu-opp-microvolt.dtsi" / { model = "NVIDIA Tegra20 Ventana evaluation board"; @@ -420,18 +422,28 @@ regulator-always-on; }; - sm0 { + vdd_core: sm0 { regulator-name = "vdd_sm0,vdd_core"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1300000>; + regulator-coupled-with = <&rtc_vdd &vdd_cpu>; + regulator-coupled-max-spread = <170000 550000>; regulator-always-on; + regulator-boot-on; + + nvidia,tegra-core-regulator; }; - sm1 { + vdd_cpu: sm1 { regulator-name = "vdd_sm1,vdd_cpu"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <1000000>; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1125000>; + regulator-coupled-with = <&vdd_core &rtc_vdd>; + regulator-coupled-max-spread = <550000 550000>; regulator-always-on; + regulator-boot-on; + + nvidia,tegra-cpu-regulator; }; sm2_reg: sm2 { @@ -450,10 +462,16 @@ regulator-always-on; }; - ldo2 { + rtc_vdd: ldo2 { regulator-name = "vdd_ldo2,vdd_rtc"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1300000>; + regulator-coupled-with = <&vdd_core &vdd_cpu>; + regulator-coupled-max-spread = <170000 550000>; + regulator-always-on; + regulator-boot-on; + + nvidia,tegra-rtc-regulator; }; ldo3 { @@ -511,9 +529,10 @@ }; }; - temperature-sensor@4c { + nct1008: temperature-sensor@4c { compatible = "onnn,nct1008"; reg = <0x4c>; + #thermal-sensor-cells = <1>; }; }; @@ -595,11 +614,15 @@ cpus { cpu0: cpu@0 { + cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; - cpu@1 { + cpu1: cpu@1 { + cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; }; @@ -697,4 +720,37 @@ <&tegra_car TEGRA20_CLK_CDEV1>; clock-names = "pll_a", "pll_a_out0", "mclk"; }; + + thermal-zones { + cpu-thermal { + polling-delay-passive = <1000>; /* milliseconds */ + polling-delay = <5000>; /* milliseconds */ + + thermal-sensors = <&nct1008 1>; + + trips { + trip0: cpu-alert0 { + /* start throttling at 50C */ + temperature = <50000>; + hysteresis = <200>; + type = "passive"; + }; + + trip1: cpu-crit { + /* shut down at 60C */ + temperature = <60000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&trip0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; }; diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi index 6544ce70b46f..b2ac51fb15b1 100644 --- a/arch/arm/boot/dts/tegra30-apalis.dtsi +++ b/arch/arm/boot/dts/tegra30-apalis.dtsi @@ -860,6 +860,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,system-power-controller; diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi index ac1c1a63eb0e..dc773b1bf8ee 100644 --- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi +++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi @@ -1056,19 +1056,22 @@ #cooling-cells = <2>; }; - cpu@1 { + cpu1: cpu@1 { cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; - cpu@2 { + cpu2: cpu@2 { cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; - cpu@3 { + cpu3: cpu@3 { cpu-supply = <&vdd_cpu>; operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; }; }; @@ -1281,7 +1284,10 @@ cooling-maps { map0 { trip = <&trip0>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi index bfc06b988781..b97da45ebdb4 100644 --- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi +++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi @@ -12,6 +12,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>; ti,system-power-controller; diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index e0624b74fb50..e159feeedef7 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -1776,6 +1776,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,system-power-controller; diff --git a/arch/arm/boot/dts/tegra30-cardhu-a04.dts b/arch/arm/boot/dts/tegra30-cardhu-a04.dts index c1c0ca628af1..a11028b8b67b 100644 --- a/arch/arm/boot/dts/tegra30-cardhu-a04.dts +++ b/arch/arm/boot/dts/tegra30-cardhu-a04.dts @@ -2,8 +2,6 @@ /dts-v1/; #include "tegra30-cardhu.dtsi" -#include "tegra30-cpu-opp.dtsi" -#include "tegra30-cpu-opp-microvolt.dtsi" /* This dts file support the cardhu A04 and later versions of board */ @@ -92,50 +90,4 @@ enable-active-high; gpio = <&gpio TEGRA_GPIO(DD, 0) GPIO_ACTIVE_HIGH>; }; - - i2c@7000d000 { - pmic: tps65911@2d { - regulators { - vddctrl_reg: vddctrl { - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1125000>; - regulator-coupled-with = <&vddcore_reg>; - regulator-coupled-max-spread = <300000>; - regulator-max-step-microvolt = <100000>; - - nvidia,tegra-cpu-regulator; - }; - }; - }; - - vddcore_reg: tps62361@60 { - regulator-coupled-with = <&vddctrl_reg>; - regulator-coupled-max-spread = <300000>; - regulator-max-step-microvolt = <100000>; - - nvidia,tegra-core-regulator; - }; - }; - - cpus { - cpu0: cpu@0 { - cpu-supply = <&vddctrl_reg>; - operating-points-v2 = <&cpu0_opp_table>; - }; - - cpu@1 { - cpu-supply = <&vddctrl_reg>; - operating-points-v2 = <&cpu0_opp_table>; - }; - - cpu@2 { - cpu-supply = <&vddctrl_reg>; - operating-points-v2 = <&cpu0_opp_table>; - }; - - cpu@3 { - cpu-supply = <&vddctrl_reg>; - operating-points-v2 = <&cpu0_opp_table>; - }; - }; }; diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index dab9989fa760..2dff14b87f3e 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/input/input.h> +#include <dt-bindings/thermal/thermal.h> #include "tegra30.dtsi" +#include "tegra30-cpu-opp.dtsi" +#include "tegra30-cpu-opp-microvolt.dtsi" /** * This file contains common DT entry for all fab version of Cardhu. @@ -240,6 +243,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,system-power-controller; @@ -272,9 +276,14 @@ vddctrl_reg: vddctrl { regulator-name = "vdd_cpu,vdd_sys"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <1000000>; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1250000>; + regulator-coupled-with = <&vdd_core>; + regulator-coupled-max-spread = <300000>; + regulator-max-step-microvolt = <100000>; regulator-always-on; + + nvidia,tegra-cpu-regulator; }; vio_reg: vio { @@ -334,25 +343,31 @@ }; }; - temperature-sensor@4c { + nct1008: temperature-sensor@4c { compatible = "onnn,nct1008"; reg = <0x4c>; vcc-supply = <&sys_3v3_reg>; interrupt-parent = <&gpio>; interrupts = <TEGRA_GPIO(CC, 2) IRQ_TYPE_LEVEL_LOW>; + #thermal-sensor-cells = <1>; }; - tps62361@60 { + vdd_core: tps62361@60 { compatible = "ti,tps62361"; reg = <0x60>; regulator-name = "tps62361-vout"; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1500000>; + regulator-coupled-with = <&vddctrl_reg>; + regulator-coupled-max-spread = <300000>; + regulator-max-step-microvolt = <100000>; regulator-boot-on; regulator-always-on; ti,vsel0-state-high; ti,vsel1-state-high; + + nvidia,tegra-core-regulator; }; }; @@ -424,6 +439,32 @@ #clock-cells = <0>; }; + cpus { + cpu0: cpu@0 { + cpu-supply = <&vddctrl_reg>; + operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; + }; + + cpu1: cpu@1 { + cpu-supply = <&vddctrl_reg>; + operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; + }; + + cpu2: cpu@2 { + cpu-supply = <&vddctrl_reg>; + operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; + }; + + cpu3: cpu@3 { + cpu-supply = <&vddctrl_reg>; + operating-points-v2 = <&cpu0_opp_table>; + #cooling-cells = <2>; + }; + }; + panel: panel { compatible = "chunghwa,claa101wb01"; ddc-i2c-bus = <&panelddc>; @@ -603,6 +644,41 @@ <&tegra_car TEGRA30_CLK_EXTERN1>; }; + thermal-zones { + cpu-thermal { + polling-delay-passive = <1000>; /* milliseconds */ + polling-delay = <5000>; /* milliseconds */ + + thermal-sensors = <&nct1008 1>; + + trips { + trip0: cpu-alert0 { + /* throttle at 57C until temperature drops to 56.8C */ + temperature = <57000>; + hysteresis = <200>; + type = "passive"; + }; + + trip1: cpu-crit { + /* shut down at 60C */ + temperature = <60000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&trip0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi index e36aa3ce6c3d..413e35215804 100644 --- a/arch/arm/boot/dts/tegra30-colibri.dtsi +++ b/arch/arm/boot/dts/tegra30-colibri.dtsi @@ -737,6 +737,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,system-power-controller; diff --git a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi index 0f7135006d19..72f2fe26cc0e 100644 --- a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi +++ b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi @@ -45,18 +45,21 @@ clock-latency-ns = <100000>; opp-supported-hw = <0x1F 0x31FE>; opp-hz = /bits/ 64 <204000000>; + opp-suspend; }; opp@204000000,850 { clock-latency-ns = <100000>; opp-supported-hw = <0x1F 0x0C01>; opp-hz = /bits/ 64 <204000000>; + opp-suspend; }; opp@204000000,912 { clock-latency-ns = <100000>; opp-supported-hw = <0x1F 0x0200>; opp-hz = /bits/ 64 <204000000>; + opp-suspend; }; opp@312000000,850 { diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts index 0368b3b816ef..9a10e0d69762 100644 --- a/arch/arm/boot/dts/tegra30-ouya.dts +++ b/arch/arm/boot/dts/tegra30-ouya.dts @@ -139,6 +139,7 @@ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; + wakeup-source; ti,en-gpio-sleep = <0 1 1 1 1 1 0 0 1>; ti,system-power-controller; @@ -391,19 +392,23 @@ cpu-supply = <&vdd_cpu>; #cooling-cells = <2>; }; - cpu@1 { + + cpu1: cpu@1 { operating-points-v2 = <&cpu0_opp_table>; cpu-supply = <&vdd_cpu>; + #cooling-cells = <2>; }; - cpu@2 { + cpu2: cpu@2 { operating-points-v2 = <&cpu0_opp_table>; cpu-supply = <&vdd_cpu>; + #cooling-cells = <2>; }; - cpu@3 { + cpu3: cpu@3 { operating-points-v2 = <&cpu0_opp_table>; cpu-supply = <&vdd_cpu>; + #cooling-cells = <2>; }; }; @@ -455,7 +460,10 @@ }; map1 { trip = <&cpu_alert1>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; diff --git a/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi index cbe84d25e726..2c9780319725 100644 --- a/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi +++ b/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi @@ -128,12 +128,14 @@ opp-microvolt = <1000000 1000000 1350000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0007>; + opp-suspend; }; opp@204000000,1250 { opp-microvolt = <1250000 1250000 1350000>; opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x0008>; + opp-suspend; }; opp@333500000,1000 { @@ -312,6 +314,7 @@ opp-hz = /bits/ 64 <204000000>; opp-supported-hw = <0x000F>; opp-peak-kBps = <1632000>; + opp-suspend; }; opp@333500000 { diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi index b0b15c97306b..e81e5937a60a 100644 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi @@ -583,7 +583,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 6403b064e8dc..06c888a45eb3 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -46,6 +46,7 @@ CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set CONFIG_MTD=y +CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_DATAFLASH=y @@ -53,7 +54,7 @@ CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_ATMEL=y CONFIG_MTD_SPI_NOR=y CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_GLUEBI=y +CONFIG_MTD_UBI_FASTMAP=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=4 @@ -131,6 +132,8 @@ CONFIG_MEDIA_SUPPORT_FILTER=y # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_VIDEO_ATMEL_ISI=y CONFIG_VIDEO_OV2640=m diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index e70c997d5f4c..b935162a8bba 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -63,7 +63,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index 81665b7abf83..a49e699e52de 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig @@ -213,7 +213,6 @@ CONFIG_POWER_SUPPLY=y CONFIG_EZX_PCAP=y CONFIG_REGULATOR=y CONFIG_REGULATOR_DEBUG=y -CONFIG_REGULATOR_USERSPACE_CONSUMER=y CONFIG_REGULATOR_PCAP=y CONFIG_MEDIA_SUPPORT=y CONFIG_VIDEO_DEV=y diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 3a7938f244e5..2aa3ebeb89d7 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -7,7 +7,6 @@ CONFIG_EXPERT=y CONFIG_MODULES=y CONFIG_ARCH_FOOTBRIDGE=y CONFIG_ARCH_CATS=y -CONFIG_ARCH_PERSONAL_SERVER=y CONFIG_ARCH_EBSA285_HOST=y CONFIG_ARCH_NETWINDER=y CONFIG_LEDS=y diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index ae15a2a33802..118c4c927f26 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig @@ -194,7 +194,6 @@ CONFIG_POWER_SUPPLY=y CONFIG_PMIC_DA903X=y CONFIG_REGULATOR=y CONFIG_REGULATOR_DEBUG=y -CONFIG_REGULATOR_USERSPACE_CONSUMER=y CONFIG_REGULATOR_DA903X=y CONFIG_MEDIA_SUPPORT=y CONFIG_VIDEO_DEV=y diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index b4670d42f378..abde1fb23b20 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig @@ -72,7 +72,6 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=m # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index 6834e97af348..eacc089d86c5 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -79,7 +79,6 @@ CONFIG_INPUT_EVBUG=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig index 1d923dbb9928..89f4a6ff30bd 100644 --- a/arch/arm/configs/mps2_defconfig +++ b/arch/arm/configs/mps2_defconfig @@ -69,7 +69,6 @@ CONFIG_SMSC911X=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MPS2_UART_CONSOLE=y CONFIG_SERIAL_MPS2_UART=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index 9f862b21b40a..80a3ae02d759 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -33,6 +33,8 @@ CONFIG_SOC_IMX25=y CONFIG_SOC_IMX27=y CONFIG_ARCH_MVEBU=y CONFIG_MACH_KIRKWOOD=y +CONFIG_ARCH_NPCM=y +CONFIG_ARCH_WPCM450=y CONFIG_ARCH_ORION5X=y CONFIG_MACH_DB88F5281=y CONFIG_MACH_RD88F5182=y @@ -178,6 +180,7 @@ CONFIG_THERMAL=y CONFIG_KIRKWOOD_THERMAL=y CONFIG_AT91SAM9X_WATCHDOG=y CONFIG_ORION_WATCHDOG=y +CONFIG_NPCM7XX_WATCHDOG=y CONFIG_IMX2_WDT=y CONFIG_MFD_ATMEL_HLCDC=y # CONFIG_ABX500_CORE is not set diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 3823da605430..52a0400fdd92 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -79,7 +79,7 @@ CONFIG_ARCH_MSM8960=y CONFIG_ARCH_MSM8974=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_RENESAS=y -CONFIG_ARCH_SOCFPGA=y +CONFIG_ARCH_INTEL_SOCFPGA=y CONFIG_PLAT_SPEAR=y CONFIG_ARCH_SPEAR13XX=y CONFIG_MACH_SPEAR1310=y @@ -791,7 +791,6 @@ CONFIG_USB_XHCI_MVEBU=y CONFIG_USB_XHCI_TEGRA=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_STI=y -CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_EXYNOS=m CONFIG_USB_EHCI_MV=m CONFIG_USB_OHCI_HCD=y @@ -817,6 +816,7 @@ CONFIG_USB_DWC2=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_TEGRA=y CONFIG_USB_ISP1760=y CONFIG_USB_HSIC_USB3503=y CONFIG_AB8500_USB=y diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index 4f16716bfc32..d57ff30dabff 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -100,7 +100,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index f250bf1cc022..77b4bea12feb 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -87,6 +87,155 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m CONFIG_BRIDGE=m CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_VLAN_8021Q=m @@ -137,7 +286,6 @@ CONFIG_PCI_EPF_TEST=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_OMAP_OCP2SCP=y -CONFIG_SIMPLE_PM_BUS=y CONFIG_CONNECTOR=m CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y @@ -165,6 +313,7 @@ CONFIG_SENSORS_TSL2550=m CONFIG_SRAM=y CONFIG_PCI_ENDPOINT_TEST=m CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_AT25=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_ATA=y diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index bd7dd81c9c54..875a3c28a267 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -415,7 +415,6 @@ CONFIG_MFD_TC6393XB=y CONFIG_REGULATOR=y CONFIG_REGULATOR_DEBUG=y CONFIG_REGULATOR_FIXED_VOLTAGE=m -CONFIG_REGULATOR_USERSPACE_CONSUMER=m CONFIG_REGULATOR_ACT8865=m CONFIG_REGULATOR_AS3711=m CONFIG_REGULATOR_AXP20X=m diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig index 3f36887e8333..26353cbfa968 100644 --- a/arch/arm/configs/qcom_defconfig +++ b/arch/arm/configs/qcom_defconfig @@ -215,6 +215,8 @@ CONFIG_DMADEVICES=y CONFIG_QCOM_BAM_DMA=y CONFIG_STAGING=y CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_A7PLL=y +CONFIG_QCOM_CLK_APCS_SDX55=y CONFIG_QCOM_CLK_RPM=y CONFIG_QCOM_CLK_RPMH=y CONFIG_QCOM_CLK_SMD_RPM=y @@ -232,11 +234,14 @@ CONFIG_ARM_SMMU=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y +CONFIG_QCOM_APCS_IPC=y CONFIG_REMOTEPROC=y CONFIG_QCOM_ADSP_PIL=y +CONFIG_QCOM_Q6V5_PAS=y CONFIG_QCOM_Q6V5_PIL=y CONFIG_QCOM_WCNSS_PIL=y CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y CONFIG_RPMSG_QCOM_SMD=y CONFIG_QCOM_COMMAND_DB=y CONFIG_QCOM_GSBI=y @@ -273,6 +278,7 @@ CONFIG_QCOM_QFPROM=y CONFIG_INTERCONNECT=y CONFIG_INTERCONNECT_QCOM=y CONFIG_INTERCONNECT_QCOM_MSM8974=m +CONFIG_INTERCONNECT_QCOM_SDX55=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y @@ -290,7 +296,7 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_DMA_CMA=y -CONFIG_CMA_SIZE_MBYTES=256 +CONFIG_CMA_SIZE_MBYTES=64 CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y @@ -299,3 +305,5 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_WATCHDOG=y CONFIG_QCOM_WDT=y CONFIG_ARM_PSCI=y +CONFIG_CPU_FREQ=y +CONFIG_CPUFREQ_DT=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index f4c3c0652432..17db3b3e2dd3 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -8,11 +8,6 @@ CONFIG_CGROUPS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_AT91=y CONFIG_SOC_SAMA5D2=y CONFIG_SOC_SAMA5D3=y @@ -25,9 +20,14 @@ CONFIG_KEXEC=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_KERNEL_MODE_NEON=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_PM_DEBUG=y CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -55,6 +55,7 @@ CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set CONFIG_MTD=y +CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y @@ -62,7 +63,7 @@ CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_ATMEL=y CONFIG_MTD_SPI_NOR=y CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_GLUEBI=y +CONFIG_MTD_UBI_FASTMAP=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=4 @@ -75,8 +76,8 @@ CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_NET_DSA_MICROCHIP_KSZ9477=m CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m -CONFIG_MACB=y # CONFIG_NET_VENDOR_BROADCOM is not set +CONFIG_MACB=y # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_FARADAY is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -89,9 +90,12 @@ CONFIG_MACB=y # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_MICREL_PHY=y +CONFIG_USB_LAN78XX=m CONFIG_LIBERTAS_THINFIRM=m CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_RTL8187=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_USB=m CONFIG_RT2X00=m CONFIG_RT2500USB=m CONFIG_RT73USB=m @@ -99,10 +103,7 @@ CONFIG_RT2800USB=m CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m -CONFIG_MWIFIEX_USB=m -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_RTL8187=m CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_QT1070=y @@ -124,10 +125,10 @@ CONFIG_SPI_ATMEL=y CONFIG_SPI_ATMEL_QUADSPI=y CONFIG_SPI_GPIO=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_SAMA5D2_PIOBU=m +CONFIG_GPIO_SAMA5D2_PIOBU=y +CONFIG_POWER_RESET=y CONFIG_POWER_SUPPLY=y CONFIG_BATTERY_ACT8945A=y -CONFIG_POWER_RESET=y CONFIG_SENSORS_JC42=m CONFIG_WATCHDOG=y CONFIG_AT91SAM9X_WATCHDOG=y @@ -139,13 +140,15 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_ACT8865=y CONFIG_REGULATOR_ACT8945A=y -CONFIG_REGULATOR_MCP16502=m +CONFIG_REGULATOR_MCP16502=y CONFIG_REGULATOR_PWM=m CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_SUPPORT_FILTER=y # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_VIDEO_ATMEL_ISC=y CONFIG_VIDEO_ATMEL_ISI=y @@ -166,11 +169,12 @@ CONFIG_SND=y CONFIG_SND_SOC=y CONFIG_SND_ATMEL_SOC=y CONFIG_SND_ATMEL_SOC_WM8904=y -# CONFIG_HID_GENERIC is not set CONFIG_SND_ATMEL_SOC_CLASSD=y CONFIG_SND_ATMEL_SOC_PDMIC=y CONFIG_SND_ATMEL_SOC_TSE850_PCM5142=m CONFIG_SND_ATMEL_SOC_I2S=y +CONFIG_SND_SOC_MIKROE_PROTO=m +# CONFIG_HID_GENERIC is not set CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_EHCI_HCD=y @@ -201,6 +205,9 @@ CONFIG_RTC_DRV_AT91RM9200=y CONFIG_DMADEVICES=y CONFIG_AT_HDMAC=y CONFIG_AT_XDMAC=y +CONFIG_STAGING=y +CONFIG_STAGING_MEDIA=y +CONFIG_VIDEO_HANTRO=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_IIO=y CONFIG_AT91_ADC=y @@ -224,14 +231,14 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_850=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_DEV_ATMEL_AES=y +CONFIG_CRYPTO_DEV_ATMEL_TDES=y +CONFIG_CRYPTO_DEV_ATMEL_SHA=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_SCHED_DEBUG is not set # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_DEV_ATMEL_AES=y -CONFIG_CRYPTO_DEV_ATMEL_TDES=y -CONFIG_CRYPTO_DEV_ATMEL_SHA=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 01a6ebdf033a..f25b09efb816 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -220,6 +220,5 @@ CONFIG_NLS_ISO8859_1=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 0c60eb382c80..2d9404ea52c6 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -9,7 +9,7 @@ CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y -CONFIG_ARCH_SOCFPGA=y +CONFIG_ARCH_INTEL_SOCFPGA=y CONFIG_ARM_THUMBEE=y CONFIG_SMP=y CONFIG_NR_CPUS=2 diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index bcedfe1422aa..dbb1ef601762 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -55,6 +55,7 @@ CONFIG_KEYBOARD_GPIO=y CONFIG_KEYBOARD_NOMADIK=y CONFIG_KEYBOARD_STMPE=y CONFIG_KEYBOARD_TC3589X=y +CONFIG_KEYBOARD_TM2_TOUCHKEY=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y @@ -74,10 +75,12 @@ CONFIG_SERIAL_DEV_BUS=y CONFIG_HW_RANDOM=y CONFIG_I2C_GPIO=y CONFIG_SPI=y +CONFIG_SPI_GPIO=y CONFIG_SPI_PL022=y CONFIG_GPIO_STMPE=y CONFIG_GPIO_TC3589X=y CONFIG_SENSORS_IIO_HWMON=y +CONFIG_SENSORS_NTC_THERMISTOR=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y CONFIG_WATCHDOG=y @@ -85,6 +88,10 @@ CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_GPIO=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L2_FLASH_LED_CLASS=y CONFIG_DRM=y CONFIG_DRM_PANEL_NOVATEK_NT35510=y CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y @@ -112,10 +119,12 @@ CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=y CONFIG_LEDS_LM3530=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_LP55XX_COMMON=y CONFIG_LEDS_LP5521=y +CONFIG_LEDS_RT8515=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AB8500=y @@ -124,16 +133,21 @@ CONFIG_DMADEVICES=y CONFIG_STE_DMA40=y CONFIG_HWSPINLOCK=y CONFIG_HSEM_U8500=y +CONFIG_EXTCON_FSA9480=y CONFIG_IIO=y CONFIG_IIO_SW_TRIGGER=y CONFIG_BMA180=y +CONFIG_BMC150_ACCEL=y CONFIG_IIO_ST_ACCEL_3AXIS=y +CONFIG_IIO_RESCALE=y +CONFIG_MPU3050_I2C=y CONFIG_IIO_ST_GYRO_3AXIS=y CONFIG_INV_MPU6050_I2C=y CONFIG_BH1780=y CONFIG_GP2AP002=y CONFIG_AK8974=y CONFIG_IIO_ST_MAGN_3AXIS=y +CONFIG_YAMAHA_YAS530=y CONFIG_IIO_HRTIMER_TRIGGER=y CONFIG_IIO_ST_PRESS=y CONFIG_EXT2_FS=y diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig index f1fbdfc5c8c6..4d8e7f2eaef7 100644 --- a/arch/arm/configs/xcep_defconfig +++ b/arch/arm/configs/xcep_defconfig @@ -53,7 +53,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y CONFIG_SERIAL_PXA_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S index 472e56d09eea..1da3f41359aa 100644 --- a/arch/arm/crypto/aes-cipher-core.S +++ b/arch/arm/crypto/aes-cipher-core.S @@ -99,28 +99,6 @@ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr .endm - .macro __rev, out, in - .if __LINUX_ARM_ARCH__ < 6 - lsl t0, \in, #24 - and t1, \in, #0xff00 - and t2, \in, #0xff0000 - orr \out, t0, \in, lsr #24 - orr \out, \out, t1, lsl #8 - orr \out, \out, t2, lsr #8 - .else - rev \out, \in - .endif - .endm - - .macro __adrl, out, sym, c - .if __LINUX_ARM_ARCH__ < 7 - ldr\c \out, =\sym - .else - movw\c \out, #:lower16:\sym - movt\c \out, #:upper16:\sym - .endif - .endm - .macro do_crypt, round, ttab, ltab, bsz push {r3-r11, lr} @@ -133,10 +111,10 @@ ldr r7, [in, #12] #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif eor r4, r4, r8 @@ -144,7 +122,7 @@ eor r6, r6, r10 eor r7, r7, r11 - __adrl ttab, \ttab + mov_l ttab, \ttab /* * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into * L1 cache, assuming cacheline size >= 32. This is a hardening measure @@ -180,7 +158,7 @@ 2: .ifb \ltab add ttab, ttab, #1 .else - __adrl ttab, \ltab + mov_l ttab, \ltab // Prefetch inverse S-box for final round; see explanation above .set i, 0 .rept 256 / 64 @@ -194,10 +172,10 @@ \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif ldr out, [sp] diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c index 34d73200e7fa..4b59d027ba4a 100644 --- a/arch/arm/crypto/blake2b-neon-glue.c +++ b/arch/arm/crypto/blake2b-neon-glue.c @@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void) static void __exit blake2b_neon_mod_exit(void) { - return crypto_unregister_shashes(blake2b_neon_algs, - ARRAY_SIZE(blake2b_neon_algs)); + crypto_unregister_shashes(blake2b_neon_algs, + ARRAY_SIZE(blake2b_neon_algs)); } module_init(blake2b_neon_mod_init); diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S index bed897e9a181..86345751bbf3 100644 --- a/arch/arm/crypto/blake2s-core.S +++ b/arch/arm/crypto/blake2s-core.S @@ -8,6 +8,7 @@ */ #include <linux/linkage.h> +#include <asm/assembler.h> // Registers used to hold message words temporarily. There aren't // enough ARM registers to hold the whole message block, so we have to @@ -38,6 +39,23 @@ #endif .endm +.macro _le32_bswap a, tmp +#ifdef __ARMEB__ + rev_l \a, \tmp +#endif +.endm + +.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp + _le32_bswap \a, \tmp + _le32_bswap \b, \tmp + _le32_bswap \c, \tmp + _le32_bswap \d, \tmp + _le32_bswap \e, \tmp + _le32_bswap \f, \tmp + _le32_bswap \g, \tmp + _le32_bswap \h, \tmp +.endm + // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals. // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two // columns/diagonals. s0-s1 are the word offsets to the message words the first @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch) tst r1, #3 bne .Lcopy_block_misaligned ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12!, {r2-r9} ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12, {r2-r9} .Lcopy_block_done: str r1, [sp, #68] // Update message pointer @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch) 1: #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ldr r3, [r1], #4 + _le32_bswap r3, r4 #else ldrb r3, [r1, #0] ldrb r4, [r1, #1] diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/crypto/chacha-scalar-core.S index 2985b80a45b5..083fe1ab96d0 100644 --- a/arch/arm/crypto/chacha-scalar-core.S +++ b/arch/arm/crypto/chacha-scalar-core.S @@ -41,32 +41,15 @@ X14 .req r12 X15 .req r14 -.macro __rev out, in, t0, t1, t2 -.if __LINUX_ARM_ARCH__ >= 6 - rev \out, \in -.else - lsl \t0, \in, #24 - and \t1, \in, #0xff00 - and \t2, \in, #0xff0000 - orr \out, \t0, \in, lsr #24 - orr \out, \out, \t1, lsl #8 - orr \out, \out, \t2, lsr #8 -.endif -.endm - -.macro _le32_bswap x, t0, t1, t2 +.macro _le32_bswap_4x a, b, c, d, tmp #ifdef __ARMEB__ - __rev \x, \x, \t0, \t1, \t2 + rev_l \a, \tmp + rev_l \b, \tmp + rev_l \c, \tmp + rev_l \d, \tmp #endif .endm -.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 - _le32_bswap \a, \t0, \t1, \t2 - _le32_bswap \b, \t0, \t1, \t2 - _le32_bswap \c, \t0, \t1, \t2 - _le32_bswap \d, \t0, \t1, \t2 -.endm - .macro __ldrd a, b, src, offset #if __LINUX_ARM_ARCH__ >= 6 ldrd \a, \b, [\src, #\offset] @@ -200,7 +183,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 ldmia r12!, {r8-r11} eor X0, X0, r8 eor X1, X1, r9 @@ -216,7 +199,7 @@ ldmia r12!, {X0-X3} add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 eor X4, X4, X0 eor X5, X5, X1 eor X6, X6, X2 @@ -231,7 +214,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 ldmia r12!, {r8-r11} eor r0, r0, r8 // x8 eor r1, r1, r9 // x9 @@ -245,7 +228,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 ldr r9, [sp, #72] // load LEN eor r2, r2, r0 // x12 eor r3, r3, r1 // x13 @@ -301,7 +284,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 stmia r14!, {X0-X3} // Save keystream for x4-x7 @@ -311,7 +294,7 @@ add X5, r9, X5, ror #brot add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 add r8, sp, #64 stmia r14!, {X4-X7} @@ -323,7 +306,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 stmia r14!, {r0,r1,r6,r7} __ldrd r8, r9, sp, 144 __ldrd r10, r11, sp, 152 @@ -331,7 +314,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 stmia r14, {r2-r5} // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S index be18af52e7dc..b697fa5d059a 100644 --- a/arch/arm/crypto/curve25519-core.S +++ b/arch/arm/crypto/curve25519-core.S @@ -10,8 +10,8 @@ #include <linux/linkage.h> .text -.fpu neon .arch armv7-a +.fpu neon .align 4 ENTRY(curve25519_neon) diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c index 3023c1acfa19..c31bd8f7c092 100644 --- a/arch/arm/crypto/poly1305-glue.c +++ b/arch/arm/crypto/poly1305-glue.c @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_arm(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 673c7dd75ab9..ba8d9d7d242b 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -88,5 +88,6 @@ extern asmlinkage void c_backtrace(unsigned long fp, int pmode, struct mm_struct; void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); +extern void __show_regs_alloc_free(struct pt_regs *regs); #endif diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h index df8524365637..bd61502b9715 100644 --- a/arch/arm/include/asm/hypervisor.h +++ b/arch/arm/include/asm/hypervisor.h @@ -4,4 +4,7 @@ #include <asm/xen/hypervisor.h> +void kvm_init_hyp_services(void); +bool kvm_arm_hyp_service_available(u32 func_id); + #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index fc748122f1e0..f74944c6fe8d 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -430,11 +430,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #include <asm-generic/io.h> #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 22751b5b5735..e62832dcba76 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -56,9 +56,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, } } -/* Function pointer to optional machine-specific reinitialization */ -extern void (*kexec_reinit)(void); - static inline unsigned long phys_to_boot_phys(phys_addr_t phys) { return phys_to_idmap(phys); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2f841cb65c30..a711322d9f40 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -150,21 +150,6 @@ extern unsigned long vectors_base; */ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#ifdef CONFIG_XIP_KERNEL -/* - * When referencing data in RAM from the XIP region in a relative manner - * with the MMU off, we need the relative offset between the two physical - * addresses. The macro below achieves this, which is: - * __pa(v_data) - __xip_pa(v_text) - */ -#define PHYS_RELATIVE(v_data, v_text) \ - (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ - ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ - CONFIG_XIP_PHYS_ADDR)) -#else -#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) -#endif - #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h index cdbf02d9c1d4..95d5b0d625cd 100644 --- a/arch/arm/include/asm/paravirt.h +++ b/arch/arm/include/asm/paravirt.h @@ -3,23 +3,19 @@ #define _ASM_ARM_PARAVIRT_H #ifdef CONFIG_PARAVIRT +#include <linux/static_call_types.h> + struct static_key; extern struct static_key paravirt_steal_enabled; extern struct static_key paravirt_steal_rq_enabled; -struct pv_time_ops { - unsigned long long (*steal_clock)(int cpu); -}; - -struct paravirt_patch_template { - struct pv_time_ops time; -}; +u64 dummy_steal_clock(int cpu); -extern struct paravirt_patch_template pv_ops; +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); static inline u64 paravirt_steal_clock(int cpu) { - return pv_ops.time.steal_clock(cpu); + return static_call(pv_steal_clock)(cpu); } #endif diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 2b85d175e999..d4edab51a77c 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -186,8 +186,6 @@ static inline pte_t pte_mkspecial(pte_t pte) #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) -#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) -#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c02f24400369..d63a5bb6bd0c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -166,6 +166,9 @@ extern struct page *empty_zero_page; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) +#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) + #define pmd_none(pmd) (!pmd_val(pmd)) static inline pte_t *pmd_page_vaddr(pmd_t pmd) diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h index a1ceff4295d3..ec17fc0fda7a 100644 --- a/arch/arm/include/asm/set_memory.h +++ b/arch/arm/include/asm/set_memory.h @@ -18,12 +18,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif -#ifdef CONFIG_STRICT_KERNEL_RWX -void set_kernel_text_rw(void); -void set_kernel_text_ro(void); -#else -static inline void set_kernel_text_rw(void) { } -static inline void set_kernel_text_ro(void) { } -#endif - #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 8f009e788ad4..f610a773f2be 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -22,7 +22,7 @@ * assembler to insert a extra (16-bit) IT instruction, depending on the * presence or absence of neighbouring conditional instructions. * - * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * To avoid this unpredictability, an appropriate IT is inserted explicitly: * the assembler won't change IT instructions which are explicitly present * in the input. */ diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h new file mode 100644 index 000000000000..455ade5d5320 --- /dev/null +++ b/arch/arm/include/asm/xen/swiotlb-xen.h @@ -0,0 +1 @@ +#include <xen/arm/swiotlb-xen.h> diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index ce8573157774..63748af8bc9d 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -generated-y += unistd-common.h generated-y += unistd-oabi.h generated-y += unistd-eabi.h generic-y += kvm_para.h diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 93ecf8aa4fe5..ae7749e15726 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -24,7 +24,6 @@ #include <asm/unistd-oabi.h> #endif -#include <asm/unistd-common.h> #define __NR_sync_file_range2 __NR_arm_sync_file_range /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index be8050b0c3df..70993af22d80 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> +#include <linux/arm-smccc.h> #include "signal.h" /* @@ -148,6 +149,8 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e0d7833a1827..7f0b7aba1498 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -344,20 +344,19 @@ ENTRY(\sym) .size \sym, . - \sym .endm -#define NATIVE(nr, func) syscall nr, func +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, func) syscall nr, func /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ syscall_table_start sys_call_table -#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI #include <calls-eabi.S> #else #include <calls-oabi.S> #endif -#undef COMPAT syscall_table_end sys_call_table /*============================================================================ @@ -455,7 +454,8 @@ ENDPROC(sys_oabi_readahead) * using the compatibility syscall entries. */ syscall_table_start sys_oabi_call_table -#define COMPAT(nr, native, compat) syscall nr, compat +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #include <calls-oabi.S> syscall_table_end sys_oabi_call_table diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 08660ae9dcbc..b1423fb130ea 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (!bp->overflow_handler) + if (is_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 2b09dad7935e..f567032a09c0 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -147,11 +147,6 @@ void machine_crash_shutdown(struct pt_regs *regs) pr_info("Loading crashdump kernel...\n"); } -/* - * Function pointer to optional machine-specific reinitialization - */ -void (*kexec_reinit)(void); - void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; @@ -187,9 +182,6 @@ void machine_kexec(struct kimage *image) pr_info("Bye!\n"); - if (kexec_reinit) - kexec_reinit(); - soft_restart(reboot_entry_phys); } diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c index 4cfed91fe256..7dd9806369fb 100644 --- a/arch/arm/kernel/paravirt.c +++ b/arch/arm/kernel/paravirt.c @@ -9,10 +9,15 @@ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/types.h> +#include <linux/static_call.h> #include <asm/paravirt.h> struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; -struct paravirt_patch_template pv_ops; -EXPORT_SYMBOL_GPL(pv_ops); +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 5199a2bb4111..6324f4db9b02 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -92,6 +92,17 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } +void __show_regs_alloc_free(struct pt_regs *regs) +{ + int i; + + /* check for r0 - r12 only */ + for (i = 0; i < 13; i++) { + pr_alert("Register r%d information:", i); + mem_dump_obj((void *)regs->uregs[i]); + } +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 00664c78faca..931df62a7831 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -3,7 +3,9 @@ * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> +#include <linux/arm-smccc.h> +#include <asm/asm-offsets.h> #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #include <asm/unwind.h> @@ -27,7 +29,14 @@ UNWIND( .fnstart) UNWIND( .save {r4-r7}) ldm r12, {r4-r7} \instr - pop {r4-r7} + ldr r4, [sp, #36] + cmp r4, #0 + beq 1f // No quirk structure + ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS] + cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6 + bne 1f // No quirk present + str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS] +1: pop {r4-r7} ldr r12, [sp, #(4 * 4)] stm r12, {r0-r3} bx lr diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 24bd20564be7..43f0a3ebf390 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/ftrace.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm_types.h> @@ -26,12 +27,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return -EINVAL; /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * Provide a temporary page table with an identity mapping for * the MMU-enable code, required for resuming. On successful * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ ret = __cpu_suspend(arg, fn, __mpidr); + + unpause_graph_tracing(); + if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { u32 __mpidr = cpu_logical_map(smp_processor_id()); - return __cpu_suspend(arg, fn, __mpidr); + int ret; + + pause_graph_tracing(); + ret = __cpu_suspend(arg, fn, __mpidr); + unpause_graph_tracing(); + + return ret; } #define idmap_pgd NULL #endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 17d5a785df28..64308e3a5d0c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -287,6 +287,7 @@ static int __die(const char *str, int err, struct pt_regs *regs) print_modules(); __show_regs(regs); + __show_regs_alloc_free(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 120f9aa6fff3..90dcdfe3b3d0 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -17,6 +17,8 @@ #include <linux/clk/at91_pmc.h> #include <linux/platform_data/atmel.h> +#include <soc/at91/pm.h> + #include <asm/cacheflush.h> #include <asm/fncpy.h> #include <asm/system_misc.h> @@ -25,17 +27,6 @@ #include "generic.h" #include "pm.h" -/* - * FIXME: this is needed to communicate between the pinctrl driver and - * the PM implementation in the machine. Possibly part of the PM - * implementation should be moved down into the pinctrl driver and get - * called as part of the generic suspend/resume path. - */ -#ifdef CONFIG_PINCTRL_AT91 -extern void at91_pinctrl_gpio_suspend(void); -extern void at91_pinctrl_gpio_resume(void); -#endif - struct at91_soc_pm { int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity); int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity); @@ -326,6 +317,12 @@ static void at91_pm_suspend(suspend_state_t state) static int at91_pm_enter(suspend_state_t state) { #ifdef CONFIG_PINCTRL_AT91 + /* + * FIXME: this is needed to communicate between the pinctrl driver and + * the PM implementation in the machine. Possibly part of the PM + * implementation should be moved down into the pinctrl driver and get + * called as part of the generic suspend/resume path. + */ at91_pinctrl_gpio_suspend(); #endif diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index a20ba12d876c..823c9cc98f18 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -454,6 +454,10 @@ static const struct property_entry da830_evm_i2c_eeprom_properties[] = { { } }; +static const struct software_node da830_evm_i2c_eeprom_node = { + .properties = da830_evm_i2c_eeprom_properties, +}; + static int __init da830_evm_ui_expander_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *context) { @@ -485,7 +489,7 @@ static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = { static struct i2c_board_info __initdata da830_evm_i2c_devices[] = { { I2C_BOARD_INFO("24c256", 0x50), - .properties = da830_evm_i2c_eeprom_properties, + .swnode = &da830_evm_i2c_eeprom_node, }, { I2C_BOARD_INFO("tlv320aic3x", 0x18), diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index bdf31eb77620..b3bef74c982a 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -232,10 +232,14 @@ static const struct property_entry eeprom_properties[] = { { } }; +static const struct software_node eeprom_node = { + .properties = eeprom_properties, +}; + static struct i2c_board_info i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), - .properties = eeprom_properties, + .swnode = &eeprom_node, }, { I2C_BOARD_INFO("tlv320aic3x", 0x18), diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 7755cccec550..cce3a621eb20 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -541,6 +541,10 @@ static const struct property_entry eeprom_properties[] = { { } }; +static const struct software_node eeprom_node = { + .properties = eeprom_properties, +}; + /* * MSP430 supports RTC, card detection, input from IR remote, and * a bit more. It triggers interrupts on GPIO(7) from pressing @@ -647,7 +651,7 @@ static struct i2c_board_info __initdata i2c_info[] = { }, { I2C_BOARD_INFO("24c256", 0x50), - .properties = eeprom_properties, + .swnode = &eeprom_node, }, { I2C_BOARD_INFO("tlv320aic33", 0x1b), diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 952ddabc743e..ee91d81ebbfd 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -362,6 +362,10 @@ static const struct property_entry eeprom_properties[] = { PROPERTY_ENTRY_U32("pagesize", 64), { } }; + +static const struct software_node eeprom_node = { + .properties = eeprom_properties, +}; #endif static u8 dm646x_iis_serializer_direction[] = { @@ -430,7 +434,7 @@ static void evm_init_cpld(void) static struct i2c_board_info __initdata i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), - .properties = eeprom_properties, + .swnode = &eeprom_node, }, { I2C_BOARD_INFO("pcf8574a", 0x38), diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 5205008c8061..2127969beb96 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -197,6 +197,10 @@ static const struct property_entry mityomapl138_fd_chip_properties[] = { { } }; +static const struct software_node mityomapl138_fd_chip_node = { + .properties = mityomapl138_fd_chip_properties, +}; + static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = { .bus_freq = 100, /* kHz */ .bus_delay = 0, /* usec */ @@ -323,7 +327,7 @@ static struct i2c_board_info __initdata mityomap_tps65023_info[] = { }, { I2C_BOARD_INFO("24c02", 0x50), - .properties = mityomapl138_fd_chip_properties, + .swnode = &mityomapl138_fd_chip_node, }, }; diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 79b47958e992..6930b2f485d1 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -84,10 +84,14 @@ static const struct property_entry eeprom_properties[] = { { } }; +static const struct software_node eeprom_node = { + .properties = eeprom_properties, +}; + static struct i2c_board_info __initdata i2c_info[] = { { I2C_BOARD_INFO("24c64", 0x50), - .properties = eeprom_properties, + .swnode = &eeprom_node, }, /* Other I2C devices: * MSP430, addr 0x23 (not used) diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index ea0be59f469a..fb4a394ece3a 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -78,12 +78,11 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) #endif /* CONFIG_HOTPLUG_CPU */ /** - * exynos_core_power_down : power down the specified cpu - * @cpu : the cpu to power down + * exynos_cpu_power_down() - power down the specified cpu + * @cpu: the cpu to power down * * Power down the specified cpu. The sequence must be finished by a * call to cpu_do_idle() - * */ void exynos_cpu_power_down(int cpu) { @@ -107,8 +106,8 @@ void exynos_cpu_power_down(int cpu) } /** - * exynos_cpu_power_up : power up the specified cpu - * @cpu : the cpu to power up + * exynos_cpu_power_up() - power up the specified cpu + * @cpu: the cpu to power up * * Power up the specified cpu */ @@ -124,9 +123,8 @@ void exynos_cpu_power_up(int cpu) } /** - * exynos_cpu_power_state : returns the power state of the cpu - * @cpu : the cpu to retrieve the power state from - * + * exynos_cpu_power_state() - returns the power state of the cpu + * @cpu: the cpu to retrieve the power state from */ int exynos_cpu_power_state(int cpu) { @@ -135,8 +133,8 @@ int exynos_cpu_power_state(int cpu) } /** - * exynos_cluster_power_down : power down the specified cluster - * @cluster : the cluster to power down + * exynos_cluster_power_down() - power down the specified cluster + * @cluster: the cluster to power down */ void exynos_cluster_power_down(int cluster) { @@ -144,8 +142,8 @@ void exynos_cluster_power_down(int cluster) } /** - * exynos_cluster_power_up : power up the specified cluster - * @cluster : the cluster to power up + * exynos_cluster_power_up() - power up the specified cluster + * @cluster: the cluster to power up */ void exynos_cluster_power_up(int cluster) { @@ -154,8 +152,8 @@ void exynos_cluster_power_up(int cluster) } /** - * exynos_cluster_power_state : returns the power state of the cluster - * @cluster : the cluster to retrieve the power state from + * exynos_cluster_power_state() - returns the power state of the cluster + * @cluster: the cluster to retrieve the power state from * */ int exynos_cluster_power_state(int cluster) @@ -165,7 +163,7 @@ int exynos_cluster_power_state(int cluster) } /** - * exynos_scu_enable : enables SCU for Cortex-A9 based system + * exynos_scu_enable() - enables SCU for Cortex-A9 based system */ void exynos_scu_enable(void) { diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 844aa585b966..728aff93fba9 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -16,27 +16,6 @@ config ARCH_CATS Saying N will reduce the size of the Footbridge kernel. -config ARCH_PERSONAL_SERVER - bool "Compaq Personal Server" - select FOOTBRIDGE_HOST - select ISA - select ISA_DMA - select FORCE_PCI - help - Say Y here if you intend to run this kernel on the Compaq - Personal Server. - - Saying N will reduce the size of the Footbridge kernel. - - The Compaq Personal Server is not available for purchase. - There are no product plans beyond the current research - prototypes at this time. Information is available at: - - <http://www.crl.hpl.hp.com/projects/personalserver/> - - If you have any questions or comments about the Compaq Personal - Server, send e-mail to <skiff@crl.dec.com>. - config ARCH_EBSA285_ADDIN bool "EBSA285 (addin mode)" select ARCH_EBSA285 diff --git a/arch/arm/mach-footbridge/Makefile b/arch/arm/mach-footbridge/Makefile index a09f1041f141..6262993c0555 100644 --- a/arch/arm/mach-footbridge/Makefile +++ b/arch/arm/mach-footbridge/Makefile @@ -11,12 +11,10 @@ pci-y += dc21285.o pci-$(CONFIG_ARCH_CATS) += cats-pci.o pci-$(CONFIG_ARCH_EBSA285_HOST) += ebsa285-pci.o pci-$(CONFIG_ARCH_NETWINDER) += netwinder-pci.o -pci-$(CONFIG_ARCH_PERSONAL_SERVER) += personal-pci.o obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o obj-$(CONFIG_ARCH_EBSA285) += ebsa285.o dc21285-timer.o obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o -obj-$(CONFIG_ARCH_PERSONAL_SERVER) += personal.o dc21285-timer.o obj-$(CONFIG_PCI) +=$(pci-y) diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c index 0b2fd7e2e9b4..90b1e9be430e 100644 --- a/arch/arm/mach-footbridge/cats-pci.c +++ b/arch/arm/mach-footbridge/cats-pci.c @@ -15,14 +15,14 @@ #include <asm/mach-types.h> /* cats host-specific stuff */ -static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; +static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin) { return 0; } -static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->irq >= 255) return -1; /* not a valid interrupt. */ diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c index 6f28aaa9ca79..c3f280d08fa7 100644 --- a/arch/arm/mach-footbridge/ebsa285-pci.c +++ b/arch/arm/mach-footbridge/ebsa285-pci.c @@ -14,9 +14,9 @@ #include <asm/mach/pci.h> #include <asm/mach-types.h> -static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; +static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; -static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->vendor == PCI_VENDOR_ID_CONTAQ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693) diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c index 9473aa0305e5..e8304392074b 100644 --- a/arch/arm/mach-footbridge/netwinder-pci.c +++ b/arch/arm/mach-footbridge/netwinder-pci.c @@ -18,7 +18,7 @@ * We now use the slot ID instead of the device identifiers to select * which interrupt is routed where. */ -static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 0: /* host bridge */ diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c deleted file mode 100644 index 4391e433a4b2..000000000000 --- a/arch/arm/mach-footbridge/personal-pci.c +++ /dev/null @@ -1,58 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal-pci.c - * - * PCI bios-type initialisation for PCI machines - * - * Bits taken from various places. - */ -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/init.h> - -#include <asm/irq.h> -#include <asm/mach/pci.h> -#include <asm/mach-types.h> - -static int irqmap_personal_server[] __initdata = { - IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0, - IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI -}; - -static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot, - u8 pin) -{ - unsigned char line; - - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); - - if (line > 0x40 && line <= 0x5f) { - /* line corresponds to the bit controlling this interrupt - * in the footbridge. Ignore the first 8 interrupt bits, - * look up the rest in the map. IN0 is bit number 8 - */ - return irqmap_personal_server[(line & 0x1f) - 8]; - } else if (line == 0) { - /* no interrupt */ - return 0; - } else - return irqmap_personal_server[(line - 1) & 3]; -} - -static struct hw_pci personal_server_pci __initdata = { - .map_irq = personal_server_map_irq, - .nr_controllers = 1, - .ops = &dc21285_ops, - .setup = dc21285_setup, - .preinit = dc21285_preinit, - .postinit = dc21285_postinit, -}; - -static int __init personal_pci_init(void) -{ - if (machine_is_personal_server()) - pci_common_init(&personal_server_pci); - return 0; -} - -subsys_initcall(personal_pci_init); diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c deleted file mode 100644 index ca715754fc00..000000000000 --- a/arch/arm/mach-footbridge/personal.c +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal.c - * - * Personal server (Skiff) machine fixup - */ -#include <linux/init.h> -#include <linux/spinlock.h> - -#include <asm/hardware/dec21285.h> -#include <asm/mach-types.h> - -#include <asm/mach/arch.h> - -#include "common.h" - -MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer") - /* Maintainer: Jamey Hicks / George France */ - .atag_offset = 0x100, - .map_io = footbridge_map_io, - .init_irq = footbridge_init_irq, - .init_time = footbridge_timer_init, - .restart = footbridge_restart, -MACHINE_END - diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c index 07ea28b99cd0..b8d14b369cc9 100644 --- a/arch/arm/mach-hisi/hisilicon.c +++ b/arch/arm/mach-hisi/hisilicon.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * (Hisilicon's SoC based) flattened device tree enabled machine + * (HiSilicon's SoC based) flattened device tree enabled machine * - * Copyright (c) 2012-2013 Hisilicon Ltd. + * Copyright (c) 2012-2013 HiSilicon Ltd. * Copyright (c) 2012-2013 Linaro Ltd. * * Author: Haojian Zhuang <haojian.zhuang@linaro.org> diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c index 5c5f255abc3a..c517941416f1 100644 --- a/arch/arm/mach-hisi/hotplug.c +++ b/arch/arm/mach-hisi/hotplug.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Linaro Ltd. - * Copyright (c) 2013 Hisilicon Limited. + * Copyright (c) 2013 HiSilicon Limited. */ #include <linux/cpu.h> diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c index f155e32f8420..96a484095194 100644 --- a/arch/arm/mach-hisi/platmcpm.c +++ b/arch/arm/mach-hisi/platmcpm.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013-2014 Linaro Ltd. - * Copyright (c) 2013-2014 Hisilicon Limited. + * Copyright (c) 2013-2014 HiSilicon Limited. */ #include <linux/init.h> #include <linux/smp.h> diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c index da7a09c1dae5..a56cc64deeb8 100644 --- a/arch/arm/mach-hisi/platsmp.c +++ b/arch/arm/mach-hisi/platsmp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Linaro Ltd. - * Copyright (c) 2013 Hisilicon Limited. + * Copyright (c) 2013 HiSilicon Limited. * Based on arch/arm/mach-vexpress/platsmp.c, Copyright (C) 2002 ARM Ltd. */ #include <linux/smp.h> diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 52902782cc5f..b407b024dde3 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -63,7 +63,7 @@ config SOC_IMX35 select MXC_AVIC select PINCTRL_IMX35 help - This enables support for Freescale i.MX31 processor + This enables support for Freescale i.MX35 processor endif diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c index 322caa21bcb3..21bce4049cec 100644 --- a/arch/arm/mach-imx/avic.c +++ b/arch/arm/mach-imx/avic.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/irqchip.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> @@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) * interrupts. It registers the interrupt enable and disable functions * to the kernel for each interrupt source. */ -void __init mxc_init_irq(void __iomem *irqbase) +static void __init mxc_init_irq(void __iomem *irqbase) { struct device_node *np; int irq_base; @@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase) printk(KERN_INFO "MXC IRQ initialized\n"); } + +static int __init imx_avic_init(struct device_node *node, + struct device_node *parent) +{ + void __iomem *avic_base; + + avic_base = of_iomap(node, 0); + BUG_ON(!avic_base); + mxc_init_irq(avic_base); + return 0; +} + +IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 2b004cc4f95e..474dedb73bc7 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -22,7 +22,6 @@ void mx35_map_io(void); void imx21_init_early(void); void imx31_init_early(void); void imx35_init_early(void); -void mxc_init_irq(void __iomem *); void mx31_init_irq(void); void mx35_init_irq(void); void mxc_set_cpu_type(unsigned int type); diff --git a/arch/arm/mach-imx/mach-imx1.c b/arch/arm/mach-imx/mach-imx1.c index 32df3b8012f9..8eca92d66a2e 100644 --- a/arch/arm/mach-imx/mach-imx1.c +++ b/arch/arm/mach-imx/mach-imx1.c @@ -17,16 +17,6 @@ static void __init imx1_init_early(void) mxc_set_cpu_type(MXC_CPU_MX1); } -static void __init imx1_init_irq(void) -{ - void __iomem *avic_addr; - - avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K); - WARN_ON(!avic_addr); - - mxc_init_irq(avic_addr); -} - static const char * const imx1_dt_board_compat[] __initconst = { "fsl,imx1", NULL @@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = { DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)") .init_early = imx1_init_early, - .init_irq = imx1_init_irq, .dt_compat = imx1_dt_board_compat, .restart = mxc_restart, MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx25.c b/arch/arm/mach-imx/mach-imx25.c index 95de48a1aa7d..51927bd08aef 100644 --- a/arch/arm/mach-imx/mach-imx25.c +++ b/arch/arm/mach-imx/mach-imx25.c @@ -22,17 +22,6 @@ static void __init imx25_dt_init(void) imx_aips_allow_unprivileged_access("fsl,imx25-aips"); } -static void __init mx25_init_irq(void) -{ - struct device_node *np; - void __iomem *avic_base; - - np = of_find_compatible_node(NULL, NULL, "fsl,avic"); - avic_base = of_iomap(np, 0); - BUG_ON(!avic_base); - mxc_init_irq(avic_base); -} - static const char * const imx25_dt_board_compat[] __initconst = { "fsl,imx25", NULL @@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") .init_early = imx25_init_early, .init_machine = imx25_dt_init, .init_late = imx25_pm_init, - .init_irq = mx25_init_irq, .dt_compat = imx25_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx27.c b/arch/arm/mach-imx/mach-imx27.c index 262422a9c196..e325c9468105 100644 --- a/arch/arm/mach-imx/mach-imx27.c +++ b/arch/arm/mach-imx/mach-imx27.c @@ -56,17 +56,6 @@ static void __init imx27_init_early(void) mxc_set_cpu_type(MXC_CPU_MX27); } -static void __init mx27_init_irq(void) -{ - void __iomem *avic_base; - struct device_node *np; - - np = of_find_compatible_node(NULL, NULL, "fsl,avic"); - avic_base = of_iomap(np, 0); - BUG_ON(!avic_base); - mxc_init_irq(avic_base); -} - static const char * const imx27_dt_board_compat[] __initconst = { "fsl,imx27", NULL @@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = { DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)") .map_io = mx27_map_io, .init_early = imx27_init_early, - .init_irq = mx27_init_irq, .init_late = imx27_pm_init, .dt_compat = imx27_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx31.c b/arch/arm/mach-imx/mach-imx31.c index dc69dfe600df..e9a1092b6093 100644 --- a/arch/arm/mach-imx/mach-imx31.c +++ b/arch/arm/mach-imx/mach-imx31.c @@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = { DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)") .map_io = mx31_map_io, .init_early = imx31_init_early, - .init_irq = mx31_init_irq, .dt_compat = imx31_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx35.c b/arch/arm/mach-imx/mach-imx35.c index ec5c3068715c..0fc08218b77d 100644 --- a/arch/arm/mach-imx/mach-imx35.c +++ b/arch/arm/mach-imx/mach-imx35.c @@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)") .l2c_aux_mask = ~0, .map_io = mx35_map_io, .init_early = imx35_init_early, - .init_irq = mx35_init_irq, .dt_compat = imx35_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index 5056438e5b42..28db97289ee8 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c @@ -109,18 +109,6 @@ void __init imx31_init_early(void) mx3_ccm_base = of_iomap(np, 0); BUG_ON(!mx3_ccm_base); } - -void __init mx31_init_irq(void) -{ - void __iomem *avic_base; - struct device_node *np; - - np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic"); - avic_base = of_iomap(np, 0); - BUG_ON(!avic_base); - - mxc_init_irq(avic_base); -} #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 @@ -158,16 +146,4 @@ void __init imx35_init_early(void) mx3_ccm_base = of_iomap(np, 0); BUG_ON(!mx3_ccm_base); } - -void __init mx35_init_irq(void) -{ - void __iomem *avic_base; - struct device_node *np; - - np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic"); - avic_base = of_iomap(np, 0); - BUG_ON(!avic_base); - - mxc_init_irq(avic_base); -} #endif /* ifdef CONFIG_SOC_IMX35 */ diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index e9962b48e30c..2e3af2bc7758 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -45,7 +45,7 @@ * This is also the lowest power state possible without affecting * non-cpu parts of the system. For these reasons, imx5 should default * to always using this state for cpu idling. The PM_SUSPEND_STANDBY also - * uses this state and needs to take no action when registers remain confgiured + * uses this state and needs to take no action when registers remain configured * for this state. */ #define IMX5_DEFAULT_CPU_IDLE_STATE WAIT_UNCLOCKED_POWER_OFF diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 78b9a5ee41c9..bf99e718f8b8 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -116,16 +116,16 @@ static struct hw_pci n2100_pci __initdata = { }; /* - * Both r8169 chips on the n2100 exhibit PCI parity problems. Set - * the ->broken_parity_status flag for both ports so that the r8169 - * driver knows it should ignore error interrupts. + * Both r8169 chips on the n2100 exhibit PCI parity problems. Turn + * off parity reporting for both ports so we don't get error interrupts + * for them. */ static void n2100_fixup_r8169(struct pci_dev *dev) { if (dev->bus->number == 0 && (dev->devfn == PCI_DEVFN(1, 0) || dev->devfn == PCI_DEVFN(2, 0))) - dev->broken_parity_status = 1; + pci_disable_parity(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, PCI_ANY_ID, n2100_fixup_r8169); diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index cd711bfc591f..2c647bdf8d25 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -65,7 +65,7 @@ static void __init keystone_init(void) static long long __init keystone_pv_fixup(void) { long long offset; - phys_addr_t mem_start, mem_end; + u64 mem_start, mem_end; mem_start = memblock_start_of_DRAM(); mem_end = memblock_end_of_DRAM(); @@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void) if (mem_start < KEYSTONE_HIGH_PHYS_START || mem_end > KEYSTONE_HIGH_PHYS_END) { pr_crit("Invalid address space for memory (%08llx-%08llx)\n", - (u64)mem_start, (u64)mem_end); + mem_start, mem_end); return 0; } diff --git a/arch/arm/mach-mstar/Kconfig b/arch/arm/mach-mstar/Kconfig index 576d1ab293c8..cd300eeedc20 100644 --- a/arch/arm/mach-mstar/Kconfig +++ b/arch/arm/mach-mstar/Kconfig @@ -4,6 +4,7 @@ menuconfig ARCH_MSTARV7 select ARM_GIC select ARM_HEAVY_MB select MST_IRQ + select MSTAR_MSC313_MPLL help Support for newer MStar/Sigmastar SoC families that are based on Armv7 cores like the Cortex A7 and share the same diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c index ceaad6d5927e..06b1706595f4 100644 --- a/arch/arm/mach-mvebu/kirkwood.c +++ b/arch/arm/mach-mvebu/kirkwood.c @@ -84,6 +84,7 @@ static void __init kirkwood_dt_eth_fixup(void) struct device_node *pnp = of_get_parent(np); struct clk *clk; struct property *pmac; + u8 tmpmac[ETH_ALEN]; void __iomem *io; u8 *macaddr; u32 reg; @@ -93,7 +94,7 @@ static void __init kirkwood_dt_eth_fixup(void) /* skip disabled nodes or nodes with valid MAC address*/ if (!of_device_is_available(pnp) || - !IS_ERR(of_get_mac_address(np))) + !of_get_mac_address(np, tmpmac)) goto eth_fixup_skip; clk = of_clk_get(pnp, 0); diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig index 7f7002dc2b21..658c8efb4ca1 100644 --- a/arch/arm/mach-npcm/Kconfig +++ b/arch/arm/mach-npcm/Kconfig @@ -1,11 +1,21 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig ARCH_NPCM bool "Nuvoton NPCM Architecture" - depends on ARCH_MULTI_V7 + depends on ARCH_MULTI_V5 || ARCH_MULTI_V7 select PINCTRL if ARCH_NPCM +config ARCH_WPCM450 + bool "Support for WPCM450 BMC (Hermon)" + depends on ARCH_MULTI_V5 + select CPU_ARM926T + select NPCM7XX_TIMER + help + General support for WPCM450 BMC (Hermon). + + Winbond/Nuvoton WPCM450 BMC based on the ARM926EJ-S. + config ARCH_NPCM7XX bool "Support for NPCM7xx BMC (Poleg)" depends on ARCH_MULTI_V7 diff --git a/arch/arm/mach-npcm/Makefile b/arch/arm/mach-npcm/Makefile index 1bc3a70bfab8..8d61fcd42fb1 100644 --- a/arch/arm/mach-npcm/Makefile +++ b/arch/arm/mach-npcm/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only AFLAGS_headsmp.o += -march=armv7-a +obj-$(CONFIG_ARCH_WPCM450) += wpcm450.o obj-$(CONFIG_ARCH_NPCM7XX) += npcm7xx.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o diff --git a/arch/arm/mach-npcm/wpcm450.c b/arch/arm/mach-npcm/wpcm450.c new file mode 100644 index 000000000000..f17b3dab45af --- /dev/null +++ b/arch/arm/mach-npcm/wpcm450.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2021 Jonathan Neuschäfer + +#include <asm/mach/arch.h> + +static const char *const wpcm450_dt_match[] = { + "nuvoton,wpcm450", + NULL +}; + +DT_MACHINE_START(WPCM450_DT, "WPCM450 chip") + .dt_compat = wpcm450_dt_match, +MACHINE_END diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S index 14a6c3eb3298..f745a65d3bd7 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S +++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S @@ -15,6 +15,7 @@ #include <linux/platform_data/gpio-omap.h> #include <asm/assembler.h> +#include <asm/irq.h> #include "ams-delta-fiq.h" #include "board-ams-delta.h" diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index 0a4c9b0b13b0..e18b6f13300e 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -332,11 +332,15 @@ static const struct property_entry mistral_at24_properties[] = { { } }; +static const struct software_node mistral_at24_node = { + .properties = mistral_at24_properties, +}; + static struct i2c_board_info __initdata mistral_i2c_board_info[] = { { /* NOTE: powered from LCD supply */ I2C_BOARD_INFO("24c04", 0x50), - .properties = mistral_at24_properties, + .swnode = &mistral_at24_node, }, /* TODO when driver support is ready: * - optionally ov9640 camera sensor at 0x30 diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c index 97fc2096b970..0411d5508d63 100644 --- a/arch/arm/mach-omap1/timer.c +++ b/arch/arm/mach-omap1/timer.c @@ -1,4 +1,4 @@ -/** +/* * OMAP1 Dual-Mode Timers - platform device registration * * Contains first level initialization routines which internally diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 4178c0ee46eb..7df8f5276ddf 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -34,7 +34,6 @@ config ARCH_OMAP4 select ARM_GIC select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP - select OMAP_HWMOD select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER select PL310_ERRATA_588369 if CACHE_L2X0 @@ -54,7 +53,6 @@ config SOC_OMAP5 select HAVE_ARM_SCU if SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP - select OMAP_HWMOD select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER select PM_OPP @@ -90,7 +88,6 @@ config SOC_DRA7XX select HAVE_ARM_ARCH_TIMER select IRQ_CROSSBAR select ARM_ERRATA_798181 if SMP - select OMAP_HWMOD select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER select PM_OPP diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 9bcfb34a2206..8306ad686bc8 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -20,14 +20,14 @@ secure-common = omap-smc.o omap-secure.o obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) -obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP4) += $(secure-common) obj-$(CONFIG_SOC_AM33XX) += $(secure-common) -obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_OMAP5) += $(secure-common) obj-$(CONFIG_SOC_AM43XX) += $(secure-common) -obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_DRA7XX) += $(secure-common) ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),) -obj-y += mcbsp.o +obj-$(CONFIG_OMAP_HWMOD) += mcbsp.o endif obj-$(CONFIG_TWL4030_CORE) += omap_twl.o @@ -207,9 +207,6 @@ obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o obj-$(CONFIG_SOC_TI81XX) += omap_hwmod_81xx_data.o -obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o -obj-$(CONFIG_SOC_OMAP5) += omap_hwmod_54xx_data.o -obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o # OMAP2420 MSDI controller integration support ("MMC") obj-$(CONFIG_SOC_OMAP2420) += msdi.o diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 7290f033fd2d..1610c567a6a3 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void) } /* Clocks are needed early, see drivers/clocksource for the rest */ -void __init __maybe_unused omap_init_time_of(void) +static void __init __maybe_unused omap_init_time_of(void) { omap_clk_init(); timer_probe(); diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 49926eced5f1..db446f271f5d 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -343,15 +343,6 @@ static inline void omap5_secondary_hyp_startup(void) } #endif -#ifdef CONFIG_SOC_DRA7XX -extern int dra7xx_pciess_reset(struct omap_hwmod *oh); -#else -static inline int dra7xx_pciess_reset(struct omap_hwmod *oh) -{ - return 0; -} -#endif - struct omap_system_dma_plat_info; void pdata_quirks_init(const struct of_device_id *); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 060ba6957b7c..fba0c7aa398c 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -402,6 +402,7 @@ static int __init _omap2_init_reprogram_sdrc(void) return v; } +#ifdef CONFIG_OMAP_HWMOD static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data) { return omap_hwmod_set_postsetup_state(oh, *(u8 *)data); @@ -414,6 +415,11 @@ static void __init __maybe_unused omap_hwmod_init_postsetup(void) /* Set the default postsetup state for all hwmods */ omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state); } +#else +static inline void omap_hwmod_init_postsetup(void) +{ +} +#endif #ifdef CONFIG_SOC_OMAP2420 void __init omap2420_init_early(void) @@ -615,8 +621,6 @@ void __init omap4430_init_early(void) omap44xx_voltagedomains_init(); omap44xx_powerdomains_init(); omap44xx_clockdomains_init(); - omap44xx_hwmod_init(); - omap_hwmod_init_postsetup(); omap_l2_cache_init(); omap_clk_soc_init = omap4xxx_dt_clk_init; omap_secure_init(); @@ -643,8 +647,6 @@ void __init omap5_init_early(void) omap54xx_voltagedomains_init(); omap54xx_powerdomains_init(); omap54xx_clockdomains_init(); - omap54xx_hwmod_init(); - omap_hwmod_init_postsetup(); omap_clk_soc_init = omap5xxx_dt_clk_init; omap_secure_init(); } @@ -667,8 +669,6 @@ void __init dra7xx_init_early(void) dra7xxx_check_revision(); dra7xx_powerdomains_init(); dra7xx_clockdomains_init(); - dra7xx_hwmod_init(); - omap_hwmod_init_postsetup(); omap_clk_soc_init = dra7xx_dt_clk_init; omap_secure_init(); } diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index f70d561f37f7..0659ab4cb0af 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -9,6 +9,7 @@ */ #include <linux/arm-smccc.h> +#include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> @@ -20,6 +21,7 @@ #include "common.h" #include "omap-secure.h" +#include "soc.h" static phys_addr_t omap_secure_memblock_base; @@ -213,3 +215,40 @@ void __init omap_secure_init(void) { omap_optee_init_check(); } + +/* + * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return + * address after MMU has been re-enabled after CPU1 has been woken up again. + * Otherwise the ROM code will attempt to use the earlier physical return + * address that got set with MMU off when waking up CPU1. Only used on secure + * devices. + */ +static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_CLUSTER_PM_EXIT: + omap_secure_dispatcher(OMAP4_PPA_SERVICE_0, + FLAG_START_CRITICAL, + 0, 0, 0, 0, 0); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block secure_notifier_block = { + .notifier_call = cpu_notifier, +}; + +static int __init secure_pm_init(void) +{ + if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx()) + return 0; + + cpu_pm_register_notifier(&secure_notifier_block); + + return 0; +} +omap_arch_initcall(secure_pm_init); diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index 4aaa95706d39..172069f31616 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -50,6 +50,7 @@ #define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107 /* Secure PPA(Primary Protected Application) APIs */ +#define OMAP4_PPA_SERVICE_0 0x21 #define OMAP4_PPA_L2_POR_INDEX 0x23 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2310cd56e99b..65934b2924fb 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2137,6 +2137,7 @@ static int of_dev_hwmod_lookup(struct device_node *np, if (res == 0) { *found = fc; *index = i; + of_node_put(np0); return 0; } } @@ -3495,10 +3496,6 @@ static const struct omap_hwmod_reset omap24xx_reset_quirks[] = { { .match = "msdi", .len = 4, .reset = omap_msdi_reset, }, }; -static const struct omap_hwmod_reset dra7_reset_quirks[] = { - { .match = "pcie", .len = 4, .reset = dra7xx_pciess_reset, }, -}; - static const struct omap_hwmod_reset omap_reset_quirks[] = { { .match = "dss_core", .len = 8, .reset = omap_dss_reset, }, { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, }, @@ -3534,10 +3531,6 @@ omap_hwmod_init_reset_quirks(struct device *dev, struct omap_hwmod *oh, omap24xx_reset_quirks, ARRAY_SIZE(omap24xx_reset_quirks)); - if (soc_is_dra7xx()) - omap_hwmod_init_reset_quirk(dev, oh, data, dra7_reset_quirks, - ARRAY_SIZE(dra7_reset_quirks)); - omap_hwmod_init_reset_quirk(dev, oh, data, omap_reset_quirks, ARRAY_SIZE(omap_reset_quirks)); } diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index eebf2fdf434c..6962a8d267e7 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -607,6 +607,8 @@ struct omap_hwmod { struct omap_hwmod *parent_hwmod; }; +#ifdef CONFIG_OMAP_HWMOD + struct device_node; struct omap_hwmod *omap_hwmod_lookup(const char *name); @@ -656,6 +658,17 @@ extern void __init omap_hwmod_init(void); const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh); +#else /* CONFIG_OMAP_HWMOD */ + +static inline int +omap_hwmod_for_each_by_class(const char *classname, + int (*fn)(struct omap_hwmod *oh, void *user), + void *user) +{ + return 0; +} +#endif /* CONFIG_OMAP_HWMOD */ + /* * */ @@ -671,7 +684,6 @@ extern int omap2420_hwmod_init(void); extern int omap2430_hwmod_init(void); extern int omap3xxx_hwmod_init(void); extern int omap44xx_hwmod_init(void); -extern int omap54xx_hwmod_init(void); extern int am33xx_hwmod_init(void); extern int dm814x_hwmod_init(void); extern int dm816x_hwmod_init(void); diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c deleted file mode 100644 index 6aa3b8e81a0c..000000000000 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ /dev/null @@ -1,877 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Hardware modules present on the OMAP44xx chips - * - * Copyright (C) 2009-2012 Texas Instruments, Inc. - * Copyright (C) 2009-2010 Nokia Corporation - * - * Paul Walmsley - * Benoit Cousson - * - * This file is automatically generated from the OMAP hardware databases. - * We respectfully ask that any modifications to this file be coordinated - * with the public linux-omap@vger.kernel.org mailing list and the - * authors above to ensure that the autogeneration scripts are kept - * up-to-date with the file contents. - * Note that this file is currently not in sync with autogeneration scripts. - * The above note to be removed, once it is synced up. - */ - -#include <linux/io.h> - -#include "omap_hwmod.h" -#include "omap_hwmod_common_data.h" -#include "cm1_44xx.h" -#include "cm2_44xx.h" -#include "prm44xx.h" -#include "prm-regbits-44xx.h" - -/* Base offset for all OMAP4 interrupts external to MPUSS */ -#define OMAP44XX_IRQ_GIC_START 32 - -/* - * IP blocks - */ - -/* - * 'dmm' class - * instance(s): dmm - */ -static struct omap_hwmod_class omap44xx_dmm_hwmod_class = { - .name = "dmm", -}; - -/* dmm */ -static struct omap_hwmod omap44xx_dmm_hwmod = { - .name = "dmm", - .class = &omap44xx_dmm_hwmod_class, - .clkdm_name = "l3_emif_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'l3' class - * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3 - */ -static struct omap_hwmod_class omap44xx_l3_hwmod_class = { - .name = "l3", -}; - -/* l3_instr */ -static struct omap_hwmod omap44xx_l3_instr_hwmod = { - .name = "l3_instr", - .class = &omap44xx_l3_hwmod_class, - .clkdm_name = "l3_instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* l3_main_1 */ -static struct omap_hwmod omap44xx_l3_main_1_hwmod = { - .name = "l3_main_1", - .class = &omap44xx_l3_hwmod_class, - .clkdm_name = "l3_1_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET, - }, - }, -}; - -/* l3_main_2 */ -static struct omap_hwmod omap44xx_l3_main_2_hwmod = { - .name = "l3_main_2", - .class = &omap44xx_l3_hwmod_class, - .clkdm_name = "l3_2_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET, - }, - }, -}; - -/* l3_main_3 */ -static struct omap_hwmod omap44xx_l3_main_3_hwmod = { - .name = "l3_main_3", - .class = &omap44xx_l3_hwmod_class, - .clkdm_name = "l3_instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * 'l4' class - * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup - */ -static struct omap_hwmod_class omap44xx_l4_hwmod_class = { - .name = "l4", -}; - -/* l4_cfg */ -static struct omap_hwmod omap44xx_l4_cfg_hwmod = { - .name = "l4_cfg", - .class = &omap44xx_l4_hwmod_class, - .clkdm_name = "l4_cfg_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET, - }, - }, -}; - -/* l4_per */ -static struct omap_hwmod omap44xx_l4_per_hwmod = { - .name = "l4_per", - .class = &omap44xx_l4_hwmod_class, - .clkdm_name = "l4_per_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET, - }, - }, -}; - -/* l4_wkup */ -static struct omap_hwmod omap44xx_l4_wkup_hwmod = { - .name = "l4_wkup", - .class = &omap44xx_l4_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'mpu_bus' class - * instance(s): mpu_private - */ -static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = { - .name = "mpu_bus", -}; - -/* mpu_private */ -static struct omap_hwmod omap44xx_mpu_private_hwmod = { - .name = "mpu_private", - .class = &omap44xx_mpu_bus_hwmod_class, - .clkdm_name = "mpuss_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* - * 'ocp_wp_noc' class - * instance(s): ocp_wp_noc - */ -static struct omap_hwmod_class omap44xx_ocp_wp_noc_hwmod_class = { - .name = "ocp_wp_noc", -}; - -/* ocp_wp_noc */ -static struct omap_hwmod omap44xx_ocp_wp_noc_hwmod = { - .name = "ocp_wp_noc", - .class = &omap44xx_ocp_wp_noc_hwmod_class, - .clkdm_name = "l3_instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3INSTR_OCP_WP1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * Modules omap_hwmod structures - * - * The following IPs are excluded for the moment because: - * - They do not need an explicit SW control using omap_hwmod API. - * - They still need to be validated with the driver - * properly adapted to omap_hwmod / omap_device - * - * usim - */ - -/* - * 'ctrl_module' class - * attila core control module + core pad control module + wkup pad control - * module + attila wkup control module - */ - -static struct omap_hwmod_class_sysconfig omap44xx_ctrl_module_sysc = { - .rev_offs = 0x0000, - .sysc_offs = 0x0010, - .sysc_flags = SYSC_HAS_SIDLEMODE, - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class omap44xx_ctrl_module_hwmod_class = { - .name = "ctrl_module", - .sysc = &omap44xx_ctrl_module_sysc, -}; - -/* ctrl_module_core */ -static struct omap_hwmod omap44xx_ctrl_module_core_hwmod = { - .name = "ctrl_module_core", - .class = &omap44xx_ctrl_module_hwmod_class, - .clkdm_name = "l4_cfg_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* ctrl_module_pad_core */ -static struct omap_hwmod omap44xx_ctrl_module_pad_core_hwmod = { - .name = "ctrl_module_pad_core", - .class = &omap44xx_ctrl_module_hwmod_class, - .clkdm_name = "l4_cfg_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* ctrl_module_wkup */ -static struct omap_hwmod omap44xx_ctrl_module_wkup_hwmod = { - .name = "ctrl_module_wkup", - .class = &omap44xx_ctrl_module_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* ctrl_module_pad_wkup */ -static struct omap_hwmod omap44xx_ctrl_module_pad_wkup_hwmod = { - .name = "ctrl_module_pad_wkup", - .class = &omap44xx_ctrl_module_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* - * 'debugss' class - * debug and emulation sub system - */ - -static struct omap_hwmod_class omap44xx_debugss_hwmod_class = { - .name = "debugss", -}; - -/* debugss */ -static struct omap_hwmod omap44xx_debugss_hwmod = { - .name = "debugss", - .class = &omap44xx_debugss_hwmod_class, - .clkdm_name = "emu_sys_clkdm", - .main_clk = "trace_clk_div_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'emif' class - * external memory interface no1 - */ - -static struct omap_hwmod_class_sysconfig omap44xx_emif_sysc = { - .rev_offs = 0x0000, -}; - -static struct omap_hwmod_class omap44xx_emif_hwmod_class = { - .name = "emif", - .sysc = &omap44xx_emif_sysc, -}; - -/* emif1 */ -static struct omap_hwmod omap44xx_emif1_hwmod = { - .name = "emif1", - .class = &omap44xx_emif_hwmod_class, - .clkdm_name = "l3_emif_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "ddrphy_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_MEMIF_EMIF_1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* emif2 */ -static struct omap_hwmod omap44xx_emif2_hwmod = { - .name = "emif2", - .class = &omap44xx_emif_hwmod_class, - .clkdm_name = "l3_emif_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "ddrphy_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_MEMIF_EMIF_2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * 'iss' class - * external images sensor pixel data processor - */ - -static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { - .rev_offs = 0x0000, - .sysc_offs = 0x0010, - /* - * ISS needs 100 OCP clk cycles delay after a softreset before - * accessing sysconfig again. - * The lowest frequency at the moment for L3 bus is 100 MHz, so - * 1usec delay is needed. Add an x2 margin to be safe (2 usecs). - * - * TODO: Indicate errata when available. - */ - .srst_udelay = 2, - .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | - SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | - MSTANDBY_SMART | MSTANDBY_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class omap44xx_iss_hwmod_class = { - .name = "iss", - .sysc = &omap44xx_iss_sysc, -}; - -/* iss */ -static struct omap_hwmod_opt_clk iss_opt_clks[] = { - { .role = "ctrlclk", .clk = "iss_ctrlclk" }, -}; - -static struct omap_hwmod omap44xx_iss_hwmod = { - .name = "iss", - .class = &omap44xx_iss_hwmod_class, - .clkdm_name = "iss_clkdm", - .main_clk = "ducati_clk_mux_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_CAM_ISS_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = iss_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(iss_opt_clks), -}; - -/* - * 'mpu' class - * mpu sub-system - */ - -static struct omap_hwmod_class omap44xx_mpu_hwmod_class = { - .name = "mpu", -}; - -/* mpu */ -static struct omap_hwmod omap44xx_mpu_hwmod = { - .name = "mpu", - .class = &omap44xx_mpu_hwmod_class, - .clkdm_name = "mpuss_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "dpll_mpu_m2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_MPU_MPU_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'ocmc_ram' class - * top-level core on-chip ram - */ - -static struct omap_hwmod_class omap44xx_ocmc_ram_hwmod_class = { - .name = "ocmc_ram", -}; - -/* ocmc_ram */ -static struct omap_hwmod omap44xx_ocmc_ram_hwmod = { - .name = "ocmc_ram", - .class = &omap44xx_ocmc_ram_hwmod_class, - .clkdm_name = "l3_2_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_L3_2_OCMC_RAM_CONTEXT_OFFSET, - }, - }, -}; - - -/* - * 'prcm' class - * power and reset manager (part of the prcm infrastructure) + clock manager 2 - * + clock manager 1 (in always on power domain) + local prm in mpu - */ - -static struct omap_hwmod_class omap44xx_prcm_hwmod_class = { - .name = "prcm", -}; - -/* prcm_mpu */ -static struct omap_hwmod omap44xx_prcm_mpu_hwmod = { - .name = "prcm_mpu", - .class = &omap44xx_prcm_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .flags = HWMOD_NO_IDLEST, - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* cm_core_aon */ -static struct omap_hwmod omap44xx_cm_core_aon_hwmod = { - .name = "cm_core_aon", - .class = &omap44xx_prcm_hwmod_class, - .flags = HWMOD_NO_IDLEST, - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* cm_core */ -static struct omap_hwmod omap44xx_cm_core_hwmod = { - .name = "cm_core", - .class = &omap44xx_prcm_hwmod_class, - .flags = HWMOD_NO_IDLEST, - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* prm */ -static struct omap_hwmod_rst_info omap44xx_prm_resets[] = { - { .name = "rst_global_warm_sw", .rst_shift = 0 }, - { .name = "rst_global_cold_sw", .rst_shift = 1 }, -}; - -static struct omap_hwmod omap44xx_prm_hwmod = { - .name = "prm", - .class = &omap44xx_prcm_hwmod_class, - .rst_lines = omap44xx_prm_resets, - .rst_lines_cnt = ARRAY_SIZE(omap44xx_prm_resets), -}; - -/* - * 'scrm' class - * system clock and reset manager - */ - -static struct omap_hwmod_class omap44xx_scrm_hwmod_class = { - .name = "scrm", -}; - -/* scrm */ -static struct omap_hwmod omap44xx_scrm_hwmod = { - .name = "scrm", - .class = &omap44xx_scrm_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* - * 'sl2if' class - * shared level 2 memory interface - */ - -static struct omap_hwmod_class omap44xx_sl2if_hwmod_class = { - .name = "sl2if", -}; - -/* sl2if */ -static struct omap_hwmod omap44xx_sl2if_hwmod = { - .name = "sl2if", - .class = &omap44xx_sl2if_hwmod_class, - .clkdm_name = "ivahd_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_IVAHD_SL2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * interfaces - */ - -/* l3_main_1 -> dmm */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = { - .master = &omap44xx_l3_main_1_hwmod, - .slave = &omap44xx_dmm_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_SDMA, -}; - -/* mpu -> dmm */ -static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = { - .master = &omap44xx_mpu_hwmod, - .slave = &omap44xx_dmm_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU, -}; - -/* l3_main_3 -> l3_instr */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = { - .master = &omap44xx_l3_main_3_hwmod, - .slave = &omap44xx_l3_instr_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* ocp_wp_noc -> l3_instr */ -static struct omap_hwmod_ocp_if omap44xx_ocp_wp_noc__l3_instr = { - .master = &omap44xx_ocp_wp_noc_hwmod, - .slave = &omap44xx_l3_instr_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_l3_main_1_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_l3_main_1_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = { - .master = &omap44xx_mpu_hwmod, - .slave = &omap44xx_l3_main_1_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU, -}; - -/* debugss -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap44xx_debugss__l3_main_2 = { - .master = &omap44xx_debugss_hwmod, - .slave = &omap44xx_l3_main_2_hwmod, - .clk = "dbgclk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* iss -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = { - .master = &omap44xx_iss_hwmod, - .slave = &omap44xx_l3_main_2_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = { - .master = &omap44xx_l3_main_1_hwmod, - .slave = &omap44xx_l3_main_2_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU, -}; - -/* l4_cfg -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_l3_main_2_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = { - .master = &omap44xx_l3_main_1_hwmod, - .slave = &omap44xx_l3_main_3_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU, -}; - -/* l3_main_2 -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_3 = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_l3_main_3_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_l3_main_3_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_cfg */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = { - .master = &omap44xx_l3_main_1_hwmod, - .slave = &omap44xx_l4_cfg_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> l4_per */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_l4_per_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l4_wkup */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_l4_wkup_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> mpu_private */ -static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = { - .master = &omap44xx_mpu_hwmod, - .slave = &omap44xx_mpu_private_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> ocp_wp_noc */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_ocp_wp_noc_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> ctrl_module_core */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_core = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_ctrl_module_core_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> ctrl_module_pad_core */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_pad_core = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_ctrl_module_pad_core_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> ctrl_module_wkup */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__ctrl_module_wkup = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_ctrl_module_wkup_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> ctrl_module_pad_wkup */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__ctrl_module_pad_wkup = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_ctrl_module_pad_wkup_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_instr -> debugss */ -static struct omap_hwmod_ocp_if omap44xx_l3_instr__debugss = { - .master = &omap44xx_l3_instr_hwmod, - .slave = &omap44xx_debugss_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> iss */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_iss_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> ocmc_ram */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ocmc_ram = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_ocmc_ram_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu_private -> prcm_mpu */ -static struct omap_hwmod_ocp_if omap44xx_mpu_private__prcm_mpu = { - .master = &omap44xx_mpu_private_hwmod, - .slave = &omap44xx_prcm_mpu_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> cm_core_aon */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__cm_core_aon = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_cm_core_aon_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> cm_core */ -static struct omap_hwmod_ocp_if omap44xx_l4_cfg__cm_core = { - .master = &omap44xx_l4_cfg_hwmod, - .slave = &omap44xx_cm_core_hwmod, - .clk = "l4_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> prm */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__prm = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_prm_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> scrm */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_scrm_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> sl2if */ -static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_sl2if_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> emif1 */ -static struct omap_hwmod_ocp_if omap44xx_mpu__emif1 = { - .master = &omap44xx_mpu_hwmod, - .slave = &omap44xx_emif1_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> emif2 */ -static struct omap_hwmod_ocp_if omap44xx_mpu__emif2 = { - .master = &omap44xx_mpu_hwmod, - .slave = &omap44xx_emif2_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { - &omap44xx_l3_main_1__dmm, - &omap44xx_mpu__dmm, - &omap44xx_l3_main_3__l3_instr, - &omap44xx_ocp_wp_noc__l3_instr, - &omap44xx_l3_main_2__l3_main_1, - &omap44xx_l4_cfg__l3_main_1, - &omap44xx_mpu__l3_main_1, - &omap44xx_debugss__l3_main_2, - &omap44xx_iss__l3_main_2, - &omap44xx_l3_main_1__l3_main_2, - &omap44xx_l4_cfg__l3_main_2, - &omap44xx_l3_main_1__l3_main_3, - &omap44xx_l3_main_2__l3_main_3, - &omap44xx_l4_cfg__l3_main_3, - &omap44xx_l3_main_1__l4_cfg, - &omap44xx_l3_main_2__l4_per, - &omap44xx_l4_cfg__l4_wkup, - &omap44xx_mpu__mpu_private, - &omap44xx_l4_cfg__ocp_wp_noc, - &omap44xx_l4_cfg__ctrl_module_core, - &omap44xx_l4_cfg__ctrl_module_pad_core, - &omap44xx_l4_wkup__ctrl_module_wkup, - &omap44xx_l4_wkup__ctrl_module_pad_wkup, - &omap44xx_l3_instr__debugss, - &omap44xx_l3_main_2__iss, - &omap44xx_l3_main_2__ocmc_ram, - &omap44xx_mpu_private__prcm_mpu, - &omap44xx_l4_wkup__cm_core_aon, - &omap44xx_l4_cfg__cm_core, - &omap44xx_l4_wkup__prm, - &omap44xx_l4_wkup__scrm, - /* &omap44xx_l3_main_2__sl2if, */ - &omap44xx_mpu__emif1, - &omap44xx_mpu__emif2, - NULL, -}; - -int __init omap44xx_hwmod_init(void) -{ - omap_hwmod_init(); - return omap_hwmod_register_links(omap44xx_hwmod_ocp_ifs); -} - diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c deleted file mode 100644 index 85b9ab4756ed..000000000000 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ /dev/null @@ -1,467 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Hardware modules present on the OMAP54xx chips - * - * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com - * - * Paul Walmsley - * Benoit Cousson - * - * This file is automatically generated from the OMAP hardware databases. - * We respectfully ask that any modifications to this file be coordinated - * with the public linux-omap@vger.kernel.org mailing list and the - * authors above to ensure that the autogeneration scripts are kept - * up-to-date with the file contents. - */ - -#include <linux/io.h> -#include <linux/power/smartreflex.h> - -#include "omap_hwmod.h" -#include "omap_hwmod_common_data.h" -#include "cm1_54xx.h" -#include "cm2_54xx.h" -#include "prm54xx.h" - -/* Base offset for all OMAP5 interrupts external to MPUSS */ -#define OMAP54XX_IRQ_GIC_START 32 - -/* - * IP blocks - */ - -/* - * 'dmm' class - * instance(s): dmm - */ -static struct omap_hwmod_class omap54xx_dmm_hwmod_class = { - .name = "dmm", -}; - -/* dmm */ -static struct omap_hwmod omap54xx_dmm_hwmod = { - .name = "dmm", - .class = &omap54xx_dmm_hwmod_class, - .clkdm_name = "emif_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_EMIF_DMM_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_EMIF_DMM_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'l3' class - * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3 - */ -static struct omap_hwmod_class omap54xx_l3_hwmod_class = { - .name = "l3", -}; - -/* l3_instr */ -static struct omap_hwmod omap54xx_l3_instr_hwmod = { - .name = "l3_instr", - .class = &omap54xx_l3_hwmod_class, - .clkdm_name = "l3instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* l3_main_1 */ -static struct omap_hwmod omap54xx_l3_main_1_hwmod = { - .name = "l3_main_1", - .class = &omap54xx_l3_hwmod_class, - .clkdm_name = "l3main1_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET, - }, - }, -}; - -/* l3_main_2 */ -static struct omap_hwmod omap54xx_l3_main_2_hwmod = { - .name = "l3_main_2", - .class = &omap54xx_l3_hwmod_class, - .clkdm_name = "l3main2_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L3MAIN2_L3_MAIN_2_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L3MAIN2_L3_MAIN_2_CONTEXT_OFFSET, - }, - }, -}; - -/* l3_main_3 */ -static struct omap_hwmod omap54xx_l3_main_3_hwmod = { - .name = "l3_main_3", - .class = &omap54xx_l3_hwmod_class, - .clkdm_name = "l3instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L3INSTR_L3_MAIN_3_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L3INSTR_L3_MAIN_3_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * 'l4' class - * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup - */ -static struct omap_hwmod_class omap54xx_l4_hwmod_class = { - .name = "l4", -}; - -/* l4_cfg */ -static struct omap_hwmod omap54xx_l4_cfg_hwmod = { - .name = "l4_cfg", - .class = &omap54xx_l4_hwmod_class, - .clkdm_name = "l4cfg_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET, - }, - }, -}; - -/* l4_per */ -static struct omap_hwmod omap54xx_l4_per_hwmod = { - .name = "l4_per", - .class = &omap54xx_l4_hwmod_class, - .clkdm_name = "l4per_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L4PER_L4_PER_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L4PER_L4_PER_CONTEXT_OFFSET, - }, - }, -}; - -/* l4_wkup */ -static struct omap_hwmod omap54xx_l4_wkup_hwmod = { - .name = "l4_wkup", - .class = &omap54xx_l4_hwmod_class, - .clkdm_name = "wkupaon_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'mpu_bus' class - * instance(s): mpu_private - */ -static struct omap_hwmod_class omap54xx_mpu_bus_hwmod_class = { - .name = "mpu_bus", -}; - -/* mpu_private */ -static struct omap_hwmod omap54xx_mpu_private_hwmod = { - .name = "mpu_private", - .class = &omap54xx_mpu_bus_hwmod_class, - .clkdm_name = "mpu_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* - * 'emif' class - * external memory interface no1 (wrapper) - */ - -static struct omap_hwmod_class_sysconfig omap54xx_emif_sysc = { - .rev_offs = 0x0000, -}; - -static struct omap_hwmod_class omap54xx_emif_hwmod_class = { - .name = "emif", - .sysc = &omap54xx_emif_sysc, -}; - -/* emif1 */ -static struct omap_hwmod omap54xx_emif1_hwmod = { - .name = "emif1", - .class = &omap54xx_emif_hwmod_class, - .clkdm_name = "emif_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "dpll_core_h11x2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_EMIF_EMIF1_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_EMIF_EMIF1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* emif2 */ -static struct omap_hwmod omap54xx_emif2_hwmod = { - .name = "emif2", - .class = &omap54xx_emif_hwmod_class, - .clkdm_name = "emif_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "dpll_core_h11x2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_EMIF_EMIF2_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_EMIF_EMIF2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - - - - -/* - * 'mpu' class - * mpu sub-system - */ - -static struct omap_hwmod_class omap54xx_mpu_hwmod_class = { - .name = "mpu", -}; - -/* mpu */ -static struct omap_hwmod omap54xx_mpu_hwmod = { - .name = "mpu", - .class = &omap54xx_mpu_hwmod_class, - .clkdm_name = "mpu_clkdm", - .flags = HWMOD_INIT_NO_IDLE, - .main_clk = "dpll_mpu_m2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_MPU_MPU_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_MPU_MPU_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'sata' class - * sata: serial ata interface gen2 compliant ( 1 rx/ 1 tx) - */ - -static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = { - .rev_offs = 0x00fc, - .sysc_offs = 0x0000, - .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | - MSTANDBY_SMART | MSTANDBY_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class omap54xx_sata_hwmod_class = { - .name = "sata", - .sysc = &omap54xx_sata_sysc, -}; - -/* sata */ -static struct omap_hwmod omap54xx_sata_hwmod = { - .name = "sata", - .class = &omap54xx_sata_hwmod_class, - .clkdm_name = "l3init_clkdm", - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, - .main_clk = "func_48m_fclk", - .mpu_rt_idx = 1, - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET, - .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* l4_cfg -> sata */ -static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = { - .master = &omap54xx_l4_cfg_hwmod, - .slave = &omap54xx_sata_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* - * Interfaces - */ - -/* l3_main_1 -> dmm */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_1__dmm = { - .master = &omap54xx_l3_main_1_hwmod, - .slave = &omap54xx_dmm_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_SDMA, -}; - -/* l3_main_3 -> l3_instr */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_3__l3_instr = { - .master = &omap54xx_l3_main_3_hwmod, - .slave = &omap54xx_l3_instr_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l3_main_1 = { - .master = &omap54xx_l3_main_2_hwmod, - .slave = &omap54xx_l3_main_1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_1 = { - .master = &omap54xx_l4_cfg_hwmod, - .slave = &omap54xx_l3_main_1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> l3_main_1 */ -static struct omap_hwmod_ocp_if omap54xx_mpu__l3_main_1 = { - .master = &omap54xx_mpu_hwmod, - .slave = &omap54xx_l3_main_1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU, -}; - -/* l3_main_1 -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l3_main_2 = { - .master = &omap54xx_l3_main_1_hwmod, - .slave = &omap54xx_l3_main_2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU, -}; - -/* l4_cfg -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_2 = { - .master = &omap54xx_l4_cfg_hwmod, - .slave = &omap54xx_l3_main_2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l3_main_3 = { - .master = &omap54xx_l3_main_1_hwmod, - .slave = &omap54xx_l3_main_3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU, -}; - -/* l3_main_2 -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l3_main_3 = { - .master = &omap54xx_l3_main_2_hwmod, - .slave = &omap54xx_l3_main_3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l3_main_3 */ -static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_3 = { - .master = &omap54xx_l4_cfg_hwmod, - .slave = &omap54xx_l3_main_3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_cfg */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l4_cfg = { - .master = &omap54xx_l3_main_1_hwmod, - .slave = &omap54xx_l4_cfg_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_2 -> l4_per */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l4_per = { - .master = &omap54xx_l3_main_2_hwmod, - .slave = &omap54xx_l4_per_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_wkup */ -static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l4_wkup = { - .master = &omap54xx_l3_main_1_hwmod, - .slave = &omap54xx_l4_wkup_hwmod, - .clk = "wkupaon_iclk_mux", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> mpu_private */ -static struct omap_hwmod_ocp_if omap54xx_mpu__mpu_private = { - .master = &omap54xx_mpu_hwmod, - .slave = &omap54xx_mpu_private_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> emif1 */ -static struct omap_hwmod_ocp_if omap54xx_mpu__emif1 = { - .master = &omap54xx_mpu_hwmod, - .slave = &omap54xx_emif1_hwmod, - .clk = "dpll_core_h11x2_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> emif2 */ -static struct omap_hwmod_ocp_if omap54xx_mpu__emif2 = { - .master = &omap54xx_mpu_hwmod, - .slave = &omap54xx_emif2_hwmod, - .clk = "dpll_core_h11x2_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> mpu */ -static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = { - .master = &omap54xx_l4_cfg_hwmod, - .slave = &omap54xx_mpu_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = { - &omap54xx_l3_main_1__dmm, - &omap54xx_l3_main_3__l3_instr, - &omap54xx_l3_main_2__l3_main_1, - &omap54xx_l4_cfg__l3_main_1, - &omap54xx_mpu__l3_main_1, - &omap54xx_l3_main_1__l3_main_2, - &omap54xx_l4_cfg__l3_main_2, - &omap54xx_l3_main_1__l3_main_3, - &omap54xx_l3_main_2__l3_main_3, - &omap54xx_l4_cfg__l3_main_3, - &omap54xx_l3_main_1__l4_cfg, - &omap54xx_l3_main_2__l4_per, - &omap54xx_l3_main_1__l4_wkup, - &omap54xx_mpu__mpu_private, - &omap54xx_mpu__emif1, - &omap54xx_mpu__emif2, - &omap54xx_l4_cfg__mpu, - &omap54xx_l4_cfg__sata, - NULL, -}; - -int __init omap54xx_hwmod_init(void) -{ - omap_hwmod_init(); - return omap_hwmod_register_links(omap54xx_hwmod_ocp_ifs); -} diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c deleted file mode 100644 index 48c2a808bd46..000000000000 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ /dev/null @@ -1,719 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Hardware modules present on the DRA7xx chips - * - * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com - * - * Paul Walmsley - * Benoit Cousson - * - * This file is automatically generated from the OMAP hardware databases. - * We respectfully ask that any modifications to this file be coordinated - * with the public linux-omap@vger.kernel.org mailing list and the - * authors above to ensure that the autogeneration scripts are kept - * up-to-date with the file contents. - */ - -#include <linux/io.h> - -#include "omap_hwmod.h" -#include "omap_hwmod_common_data.h" -#include "cm1_7xx.h" -#include "cm2_7xx.h" -#include "prm7xx.h" -#include "soc.h" - -/* Base offset for all DRA7XX interrupts external to MPUSS */ -#define DRA7XX_IRQ_GIC_START 32 - -/* - * IP blocks - */ - -/* - * 'dmm' class - * instance(s): dmm - */ -static struct omap_hwmod_class dra7xx_dmm_hwmod_class = { - .name = "dmm", -}; - -/* dmm */ -static struct omap_hwmod dra7xx_dmm_hwmod = { - .name = "dmm", - .class = &dra7xx_dmm_hwmod_class, - .clkdm_name = "emif_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_EMIF_DMM_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_EMIF_DMM_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'l3' class - * instance(s): l3_instr, l3_main_1, l3_main_2 - */ -static struct omap_hwmod_class dra7xx_l3_hwmod_class = { - .name = "l3", -}; - -/* l3_instr */ -static struct omap_hwmod dra7xx_l3_instr_hwmod = { - .name = "l3_instr", - .class = &dra7xx_l3_hwmod_class, - .clkdm_name = "l3instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* l3_main_1 */ -static struct omap_hwmod dra7xx_l3_main_1_hwmod = { - .name = "l3_main_1", - .class = &dra7xx_l3_hwmod_class, - .clkdm_name = "l3main1_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET, - }, - }, -}; - -/* l3_main_2 */ -static struct omap_hwmod dra7xx_l3_main_2_hwmod = { - .name = "l3_main_2", - .class = &dra7xx_l3_hwmod_class, - .clkdm_name = "l3instr_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3INSTR_L3_MAIN_2_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3INSTR_L3_MAIN_2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_HWCTRL, - }, - }, -}; - -/* - * 'l4' class - * instance(s): l4_cfg, l4_per1, l4_per2, l4_per3, l4_wkup - */ -static struct omap_hwmod_class dra7xx_l4_hwmod_class = { - .name = "l4", -}; - -/* l4_cfg */ -static struct omap_hwmod dra7xx_l4_cfg_hwmod = { - .name = "l4_cfg", - .class = &dra7xx_l4_hwmod_class, - .clkdm_name = "l4cfg_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET, - }, - }, -}; - -/* l4_per1 */ -static struct omap_hwmod dra7xx_l4_per1_hwmod = { - .name = "l4_per1", - .class = &dra7xx_l4_hwmod_class, - .clkdm_name = "l4per_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER_L4_PER1_CLKCTRL_OFFSET, - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* l4_per2 */ -static struct omap_hwmod dra7xx_l4_per2_hwmod = { - .name = "l4_per2", - .class = &dra7xx_l4_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_L4_PER2_CLKCTRL_OFFSET, - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* l4_per3 */ -static struct omap_hwmod dra7xx_l4_per3_hwmod = { - .name = "l4_per3", - .class = &dra7xx_l4_hwmod_class, - .clkdm_name = "l4per3_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER3_L4_PER3_CLKCTRL_OFFSET, - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* l4_wkup */ -static struct omap_hwmod dra7xx_l4_wkup_hwmod = { - .name = "l4_wkup", - .class = &dra7xx_l4_hwmod_class, - .clkdm_name = "wkupaon_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET, - }, - }, -}; - -/* - * 'atl' class - * - */ - -static struct omap_hwmod_class dra7xx_atl_hwmod_class = { - .name = "atl", -}; - -/* atl */ -static struct omap_hwmod dra7xx_atl_hwmod = { - .name = "atl", - .class = &dra7xx_atl_hwmod_class, - .clkdm_name = "atl_clkdm", - .main_clk = "atl_gfclk_mux", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_ATL_ATL_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_ATL_ATL_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'bb2d' class - * - */ - -static struct omap_hwmod_class dra7xx_bb2d_hwmod_class = { - .name = "bb2d", -}; - -/* bb2d */ -static struct omap_hwmod dra7xx_bb2d_hwmod = { - .name = "bb2d", - .class = &dra7xx_bb2d_hwmod_class, - .clkdm_name = "dss_clkdm", - .main_clk = "dpll_core_h24x2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_DSS_BB2D_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_DSS_BB2D_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'ctrl_module' class - * - */ - -static struct omap_hwmod_class dra7xx_ctrl_module_hwmod_class = { - .name = "ctrl_module", -}; - -/* ctrl_module_wkup */ -static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = { - .name = "ctrl_module_wkup", - .class = &dra7xx_ctrl_module_hwmod_class, - .clkdm_name = "wkupaon_clkdm", - .prcm = { - .omap4 = { - .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT, - }, - }, -}; - -/* - * 'mpu' class - * - */ - -static struct omap_hwmod_class dra7xx_mpu_hwmod_class = { - .name = "mpu", -}; - -/* mpu */ -static struct omap_hwmod dra7xx_mpu_hwmod = { - .name = "mpu", - .class = &dra7xx_mpu_hwmod_class, - .clkdm_name = "mpu_clkdm", - .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, - .main_clk = "dpll_mpu_m2_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_MPU_MPU_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_MPU_MPU_CONTEXT_OFFSET, - }, - }, -}; - - -/* - * 'PCIE' class - * - */ - -/* - * As noted in documentation for _reset() in omap_hwmod.c, the stock reset - * functionality of OMAP HWMOD layer does not deassert the hardreset lines - * associated with an IP automatically leaving the driver to handle that - * by itself. This does not work for PCIeSS which needs the reset lines - * deasserted for the driver to start accessing registers. - * - * We use a PCIeSS HWMOD class specific reset handler to deassert the hardreset - * lines after asserting them. - */ -int dra7xx_pciess_reset(struct omap_hwmod *oh) -{ - int i; - - for (i = 0; i < oh->rst_lines_cnt; i++) { - omap_hwmod_assert_hardreset(oh, oh->rst_lines[i].name); - omap_hwmod_deassert_hardreset(oh, oh->rst_lines[i].name); - } - - return 0; -} - -static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { - .name = "pcie", - .reset = dra7xx_pciess_reset, -}; - -/* pcie1 */ -static struct omap_hwmod_rst_info dra7xx_pciess1_resets[] = { - { .name = "pcie", .rst_shift = 0 }, -}; - -static struct omap_hwmod dra7xx_pciess1_hwmod = { - .name = "pcie1", - .class = &dra7xx_pciess_hwmod_class, - .clkdm_name = "pcie_clkdm", - .rst_lines = dra7xx_pciess1_resets, - .rst_lines_cnt = ARRAY_SIZE(dra7xx_pciess1_resets), - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, - .rstctrl_offs = DRA7XX_RM_L3INIT_PCIESS_RSTCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* pcie2 */ -static struct omap_hwmod_rst_info dra7xx_pciess2_resets[] = { - { .name = "pcie", .rst_shift = 1 }, -}; - -/* pcie2 */ -static struct omap_hwmod dra7xx_pciess2_hwmod = { - .name = "pcie2", - .class = &dra7xx_pciess_hwmod_class, - .clkdm_name = "pcie_clkdm", - .rst_lines = dra7xx_pciess2_resets, - .rst_lines_cnt = ARRAY_SIZE(dra7xx_pciess2_resets), - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS2_CLKCTRL_OFFSET, - .rstctrl_offs = DRA7XX_RM_L3INIT_PCIESS_RSTCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3INIT_PCIESS2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'qspi' class - * - */ - -static struct omap_hwmod_class_sysconfig dra7xx_qspi_sysc = { - .rev_offs = 0, - .sysc_offs = 0x0010, - .sysc_flags = SYSC_HAS_SIDLEMODE, - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class dra7xx_qspi_hwmod_class = { - .name = "qspi", - .sysc = &dra7xx_qspi_sysc, -}; - -/* qspi */ -static struct omap_hwmod dra7xx_qspi_hwmod = { - .name = "qspi", - .class = &dra7xx_qspi_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "qspi_gfclk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_QSPI_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_QSPI_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'sata' class - * - */ - -static struct omap_hwmod_class_sysconfig dra7xx_sata_sysc = { - .rev_offs = 0x00fc, - .sysc_offs = 0x0000, - .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | - MSTANDBY_SMART | MSTANDBY_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class dra7xx_sata_hwmod_class = { - .name = "sata", - .sysc = &dra7xx_sata_sysc, -}; - -/* sata */ - -static struct omap_hwmod dra7xx_sata_hwmod = { - .name = "sata", - .class = &dra7xx_sata_hwmod_class, - .clkdm_name = "l3init_clkdm", - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, - .main_clk = "func_48m_fclk", - .mpu_rt_idx = 1, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3INIT_SATA_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'vcp' class - * - */ - -static struct omap_hwmod_class dra7xx_vcp_hwmod_class = { - .name = "vcp", -}; - -/* vcp1 */ -static struct omap_hwmod dra7xx_vcp1_hwmod = { - .name = "vcp1", - .class = &dra7xx_vcp_hwmod_class, - .clkdm_name = "l3main1_clkdm", - .main_clk = "l3_iclk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP1_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3MAIN1_VCP1_CONTEXT_OFFSET, - }, - }, -}; - -/* vcp2 */ -static struct omap_hwmod dra7xx_vcp2_hwmod = { - .name = "vcp2", - .class = &dra7xx_vcp_hwmod_class, - .clkdm_name = "l3main1_clkdm", - .main_clk = "l3_iclk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP2_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L3MAIN1_VCP2_CONTEXT_OFFSET, - }, - }, -}; - - - -/* - * Interfaces - */ - -/* l3_main_1 -> dmm */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dmm = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_dmm_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_SDMA, -}; - -/* l3_main_2 -> l3_instr */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_2__l3_instr = { - .master = &dra7xx_l3_main_2_hwmod, - .slave = &dra7xx_l3_instr_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> l3_main_1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_1 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_l3_main_1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* mpu -> l3_main_1 */ -static struct omap_hwmod_ocp_if dra7xx_mpu__l3_main_1 = { - .master = &dra7xx_mpu_hwmod, - .slave = &dra7xx_l3_main_1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU, -}; - -/* l3_main_1 -> l3_main_2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l3_main_2 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l3_main_2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU, -}; - -/* l4_cfg -> l3_main_2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_2 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_l3_main_2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_cfg */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_cfg = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l4_cfg_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_per1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per1 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l4_per1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_per2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per2 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l4_per2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_per3 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per3 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l4_per3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> l4_wkup */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_wkup = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_l4_wkup_hwmod, - .clk = "wkupaon_iclk_mux", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> atl */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__atl = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_atl_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> bb2d */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__bb2d = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_bb2d_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_wkup -> ctrl_module_wkup */ -static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = { - .master = &dra7xx_l4_wkup_hwmod, - .slave = &dra7xx_ctrl_module_wkup_hwmod, - .clk = "wkupaon_iclk_mux", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> mpu */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_mpu_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> pciess1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pciess1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pciess1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pciess1_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> pciess2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pciess2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pciess2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pciess2_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> qspi */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__qspi = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_qspi_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> sata */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__sata = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_sata_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> vcp1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp1 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_vcp1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> vcp1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp1 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_vcp1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> vcp2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp2 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_vcp2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> vcp2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp2 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_vcp2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { - &dra7xx_l3_main_1__dmm, - &dra7xx_l3_main_2__l3_instr, - &dra7xx_l4_cfg__l3_main_1, - &dra7xx_mpu__l3_main_1, - &dra7xx_l3_main_1__l3_main_2, - &dra7xx_l4_cfg__l3_main_2, - &dra7xx_l3_main_1__l4_cfg, - &dra7xx_l3_main_1__l4_per1, - &dra7xx_l3_main_1__l4_per2, - &dra7xx_l3_main_1__l4_per3, - &dra7xx_l3_main_1__l4_wkup, - &dra7xx_l4_per2__atl, - &dra7xx_l3_main_1__bb2d, - &dra7xx_l4_wkup__ctrl_module_wkup, - &dra7xx_l4_cfg__mpu, - &dra7xx_l3_main_1__pciess1, - &dra7xx_l4_cfg__pciess1, - &dra7xx_l3_main_1__pciess2, - &dra7xx_l4_cfg__pciess2, - &dra7xx_l3_main_1__qspi, - &dra7xx_l4_cfg__sata, - &dra7xx_l3_main_1__vcp1, - &dra7xx_l4_per2__vcp1, - &dra7xx_l3_main_1__vcp2, - &dra7xx_l4_per2__vcp2, - NULL, -}; - -/* SoC variant specific hwmod links */ -static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = { - NULL, -}; - -static struct omap_hwmod_ocp_if *rtc_hwmod_ocp_ifs[] __initdata = { - NULL, -}; - -int __init dra7xx_hwmod_init(void) -{ - int ret; - - omap_hwmod_init(); - ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs); - - if (!ret && soc_is_dra74x()) { - ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs); - } else if (!ret && soc_is_dra72x()) { - ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs); - if (!ret && !of_machine_is_compatible("ti,dra718")) - ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs); - } else if (!ret && soc_is_dra76x()) { - if (!ret && soc_is_dra76x_abz()) - ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs); - } - - return ret; -} diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c index a642d3b39e50..d4dab041324d 100644 --- a/arch/arm/mach-omap2/omap_twl.c +++ b/arch/arm/mach-omap2/omap_twl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * OMAP and TWL PMIC specific initializations. * * Copyright (C) 2010 Texas Instruments Incorporated. diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 2e3a10914c40..765809b214e7 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -443,7 +443,7 @@ void omap_auxdata_legacy_init(struct device *dev) dev->platform_data = &twl_gpio_auxdata; } -#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) +#if defined(CONFIG_ARCH_OMAP3) && IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) static struct omap_mcbsp_platform_data mcbsp_pdata; static void __init omap3_mcbsp_init(void) { @@ -569,10 +569,29 @@ static void pdata_quirks_check(struct pdata_init *quirks) } } -void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) +static const char * const pdata_quirks_init_nodes[] = { + "prcm", + "prm", +}; + +static void __init +pdata_quirks_init_clocks(const struct of_device_id *omap_dt_match_table) { struct device_node *np; + int i; + + for (i = 0; i < ARRAY_SIZE(pdata_quirks_init_nodes); i++) { + np = of_find_node_by_name(NULL, pdata_quirks_init_nodes[i]); + if (!np) + continue; + + of_platform_populate(np, omap_dt_match_table, + omap_auxdata_lookup, NULL); + } +} +void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) +{ /* * We still need this for omap2420 and omap3 PM to work, others are * using drivers/misc/sram.c already. @@ -585,13 +604,7 @@ void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) omap3_mcbsp_init(); pdata_quirks_check(auxdata_quirks); - /* Populate always-on PRCM in l4_wkup to probe l4_wkup */ - np = of_find_node_by_name(NULL, "prcm"); - if (!np) - np = of_find_node_by_name(NULL, "prm"); - if (np) - of_platform_populate(np, omap_dt_match_table, - omap_auxdata_lookup, NULL); + pdata_quirks_init_clocks(omap_dt_match_table); of_platform_populate(NULL, omap_dt_match_table, omap_auxdata_lookup, NULL); diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 919d35d5b325..b43eab9879d3 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -168,8 +168,8 @@ static int pwrdm_suspend_set(void *data, u64 val) return -EINVAL; } -DEFINE_SIMPLE_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get, - pwrdm_suspend_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get, + pwrdm_suspend_set, "%llu\n"); static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir) { diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c index 09076ad0576d..668dc84fd31e 100644 --- a/arch/arm/mach-omap2/pmic-cpcap.c +++ b/arch/arm/mach-omap2/pmic-cpcap.c @@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void) omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu); if (of_machine_is_compatible("motorola,droid-bionic")) { - voltdm = voltdm_lookup("mpu"); + voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap_cpcap_core); - voltdm = voltdm_lookup("mpu"); + voltdm = voltdm_lookup("iva"); omap_voltage_register_pmic(voltdm, &omap_cpcap_iva); } else { voltdm = voltdm_lookup("core"); diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 1cbac76136d4..0a5b87e2a4b0 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -1202,26 +1202,26 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm) if (!pwrdm) { pr_debug("powerdomain: %s: invalid powerdomain pointer\n", __func__); - return 1; + return true; } if (pwrdm->pwrsts & PWRSTS_OFF) - return 1; + return true; if (pwrdm->pwrsts & PWRSTS_RET) { if (pwrdm->pwrsts_logic_ret & PWRSTS_OFF) - return 1; + return true; for (i = 0; i < pwrdm->banks; i++) if (pwrdm->pwrsts_mem_ret[i] & PWRSTS_OFF) - return 1; + return true; } for (i = 0; i < pwrdm->banks; i++) if (pwrdm->pwrsts_mem_on[i] & PWRSTS_OFF) - return 1; + return true; - return 0; + return false; } /** diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c index 62df666c2bd0..db672cf19a51 100644 --- a/arch/arm/mach-omap2/sr_device.c +++ b/arch/arm/mach-omap2/sr_device.c @@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data, extern struct omap_sr_data omap_sr_pdata[]; -static int __init sr_dev_init(struct omap_hwmod *oh, void *user) +static int __init sr_init_by_name(const char *name, const char *voltdm) { struct omap_sr_data *sr_data = NULL; struct omap_volt_data *volt_data; - struct omap_smartreflex_dev_attr *sr_dev_attr; static int i; - if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) || - !strncmp(oh->name, "smartreflex_mpu", 16)) + if (!strncmp(name, "smartreflex_mpu_iva", 20) || + !strncmp(name, "smartreflex_mpu", 16)) sr_data = &omap_sr_pdata[OMAP_SR_MPU]; - else if (!strncmp(oh->name, "smartreflex_core", 17)) + else if (!strncmp(name, "smartreflex_core", 17)) sr_data = &omap_sr_pdata[OMAP_SR_CORE]; - else if (!strncmp(oh->name, "smartreflex_iva", 16)) + else if (!strncmp(name, "smartreflex_iva", 16)) sr_data = &omap_sr_pdata[OMAP_SR_IVA]; if (!sr_data) { - pr_err("%s: Unknown instance %s\n", __func__, oh->name); + pr_err("%s: Unknown instance %s\n", __func__, name); return -EINVAL; } - sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr; - if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) { - pr_err("%s: No voltage domain specified for %s. Cannot initialize\n", - __func__, oh->name); - goto exit; - } - - sr_data->name = oh->name; + sr_data->name = name; if (cpu_is_omap343x()) sr_data->ip_type = 1; else @@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user) } } - sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name); + sr_data->voltdm = voltdm_lookup(voltdm); if (!sr_data->voltdm) { pr_err("%s: Unable to get voltage domain pointer for VDD %s\n", - __func__, sr_dev_attr->sensor_voltdm_name); + __func__, voltdm); goto exit; } @@ -160,6 +152,27 @@ exit: return 0; } +#ifdef CONFIG_OMAP_HWMOD +static int __init sr_dev_init(struct omap_hwmod *oh, void *user) +{ + struct omap_smartreflex_dev_attr *sr_dev_attr; + + sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr; + if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) { + pr_err("%s: No voltage domain specified for %s. Cannot initialize\n", + __func__, oh->name); + return 0; + } + + return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name); +} +#else +static int __init sr_dev_init(struct omap_hwmod *oh, void *user) +{ + return -EINVAL; +} +#endif + /* * API to be called from board files to enable smartreflex * autocompensation at init. @@ -169,7 +182,42 @@ void __init omap_enable_smartreflex_on_init(void) sr_enable_on_init = true; } +static const char * const omap4_sr_instances[] = { + "mpu", + "iva", + "core", +}; + +static const char * const dra7_sr_instances[] = { + "mpu", + "core", +}; + int __init omap_devinit_smartreflex(void) { + const char * const *sr_inst = NULL; + int i, nr_sr = 0; + + if (soc_is_omap44xx()) { + sr_inst = omap4_sr_instances; + nr_sr = ARRAY_SIZE(omap4_sr_instances); + + } else if (soc_is_dra7xx()) { + sr_inst = dra7_sr_instances; + nr_sr = ARRAY_SIZE(dra7_sr_instances); + } + + if (nr_sr) { + const char *name, *voltdm; + + for (i = 0; i < nr_sr; i++) { + name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]); + voltdm = sr_inst[i]; + sr_init_by_name(name, voltdm); + } + + return 0; + } + return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL); } diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index 151e26ec0696..04a12523cdee 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -74,13 +74,17 @@ static const struct property_entry mcp251x_properties[] = { {} }; +static const struct software_node mcp251x_node = { + .properties = mcp251x_properties, +}; + static struct spi_board_info mcp251x_board_info[] = { { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 0, - .properties = mcp251x_properties, + .swnode = &mcp251x_node, .controller_data = &mcp251x_chip_info1, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1) }, @@ -89,7 +93,7 @@ static struct spi_board_info mcp251x_board_info[] = { .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 1, - .properties = mcp251x_properties, + .swnode = &mcp251x_node, .controller_data = &mcp251x_chip_info2, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ2) }, @@ -98,7 +102,7 @@ static struct spi_board_info mcp251x_board_info[] = { .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 0, - .properties = mcp251x_properties, + .swnode = &mcp251x_node, .controller_data = &mcp251x_chip_info3, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ3) }, @@ -107,7 +111,7 @@ static struct spi_board_info mcp251x_board_info[] = { .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 1, - .properties = mcp251x_properties, + .swnode = &mcp251x_node, .controller_data = &mcp251x_chip_info4, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ4) } diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index d1010ec26e9f..d237bd030238 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {} #endif static int mst_pcmcia0_irqs[11] = { - [0 ... 10] = -1, + [0 ... 4] = -1, [5] = MAINSTONE_S0_CD_IRQ, + [6 ... 7] = -1, [8] = MAINSTONE_S0_STSCHG_IRQ, + [9] = -1, [10] = MAINSTONE_S0_IRQ, }; static int mst_pcmcia1_irqs[11] = { - [0 ... 10] = -1, + [0 ... 4] = -1, [5] = MAINSTONE_S1_CD_IRQ, + [6 ... 7] = -1, [8] = MAINSTONE_S1_STSCHG_IRQ, + [9] = -1, [10] = MAINSTONE_S1_IRQ, }; diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c index 45c19ca96f7a..ec0d9b094744 100644 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c @@ -147,22 +147,20 @@ static int cplds_probe(struct platform_device *pdev) } irq_set_irq_wake(fpga->irq, 1); - fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node, - CPLDS_NB_IRQ, - &cplds_irq_domain_ops, fpga); + if (base_irq) + fpga->irqdomain = irq_domain_add_legacy(pdev->dev.of_node, + CPLDS_NB_IRQ, + base_irq, 0, + &cplds_irq_domain_ops, + fpga); + else + fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node, + CPLDS_NB_IRQ, + &cplds_irq_domain_ops, + fpga); if (!fpga->irqdomain) return -ENODEV; - if (base_irq) { - ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0, - CPLDS_NB_IRQ); - if (ret) { - dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n", - base_irq, base_irq + CPLDS_NB_IRQ); - return ret; - } - } - return 0; } diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index e2353f7dcf01..7ad627465768 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -794,6 +794,10 @@ static const struct property_entry pca9500_eeprom_properties[] = { { } }; +static const struct software_node pca9500_eeprom_node = { + .properties = pca9500_eeprom_properties, +}; + /** * stargate2_reset_bluetooth() reset the bluecore to ensure consistent state **/ @@ -929,7 +933,7 @@ static struct i2c_board_info __initdata stargate2_i2c_board_info[] = { }, { .type = "24c02", .addr = 0x57, - .properties = pca9500_eeprom_properties, + .swnode = &pca9500_eeprom_node, }, { .type = "max1238", .addr = 0x35, diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index b27fc7ac9cea..97700429633e 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -433,10 +433,14 @@ static const struct property_entry mcp251x_properties[] = { {} }; +static const struct software_node mcp251x_node = { + .properties = mcp251x_properties, +}; + static struct spi_board_info zeus_spi_board_info[] = { [0] = { .modalias = "mcp2515", - .properties = mcp251x_properties, + .swnode = &mcp251x_node, .irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO), .max_speed_hz = 1*1000*1000, .bus_num = 3, diff --git a/arch/arm/mach-s3c/mach-mini2440.c b/arch/arm/mach-s3c/mach-mini2440.c index 4100905dfbd0..551ec660ab59 100644 --- a/arch/arm/mach-s3c/mach-mini2440.c +++ b/arch/arm/mach-s3c/mach-mini2440.c @@ -542,10 +542,14 @@ static const struct property_entry mini2440_at24_properties[] = { { } }; +static const struct software_node mini2440_at24_node = { + .properties = mini2440_at24_properties, +}; + static struct i2c_board_info mini2440_i2c_devs[] __initdata = { { I2C_BOARD_INFO("24c08", 0x50), - .properties = mini2440_at24_properties, + .swnode = &mini2440_at24_node, }, }; diff --git a/arch/arm/mach-s3c/mach-rx1950.c b/arch/arm/mach-s3c/mach-rx1950.c index 6e19add158a9..a3f46aa61c45 100644 --- a/arch/arm/mach-s3c/mach-rx1950.c +++ b/arch/arm/mach-s3c/mach-rx1950.c @@ -384,6 +384,8 @@ static struct s3c2410fb_mach_info rx1950_lcd_cfg = { static struct pwm_lookup rx1950_pwm_lookup[] = { PWM_LOOKUP("samsung-pwm", 0, "pwm-backlight.0", NULL, 48000, PWM_POLARITY_NORMAL), + PWM_LOOKUP("samsung-pwm", 1, "pwm-backlight.0", "RX1950 LCD", LCD_PWM_PERIOD, + PWM_POLARITY_NORMAL), }; static struct pwm_device *lcd_pwm; @@ -498,19 +500,18 @@ static void rx1950_bl_power(int enable) static int rx1950_backlight_init(struct device *dev) { WARN_ON(gpio_request(S3C2410_GPB(0), "Backlight")); - lcd_pwm = pwm_request(1, "RX1950 LCD"); + lcd_pwm = pwm_get(dev, "RX1950 LCD"); if (IS_ERR(lcd_pwm)) { dev_err(dev, "Unable to request PWM for LCD power!\n"); return PTR_ERR(lcd_pwm); } /* - * This is only required to initialize .polarity; all other values are - * fixed in this driver. + * Call pwm_init_state to initialize .polarity and .period. The other + * values are fixed in this driver. */ pwm_init_state(lcd_pwm, &lcd_pwm_state); - lcd_pwm_state.period = LCD_PWM_PERIOD; lcd_pwm_state.duty_cycle = LCD_PWM_DUTY; rx1950_lcd_power(1); @@ -524,7 +525,7 @@ static void rx1950_backlight_exit(struct device *dev) rx1950_bl_power(0); rx1950_lcd_power(0); - pwm_free(lcd_pwm); + pwm_put(lcd_pwm); gpio_free(S3C2410_GPB(0)); } diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index c3bb68d57cea..43ddec677c0b 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -menuconfig ARCH_SOCFPGA +menuconfig ARCH_INTEL_SOCFPGA bool "Altera SOCFPGA family" depends on ARCH_MULTI_V7 select ARCH_SUPPORTS_BIG_ENDIAN @@ -19,7 +19,7 @@ menuconfig ARCH_SOCFPGA select PL310_ERRATA_753970 if PL310 select PL310_ERRATA_769419 -if ARCH_SOCFPGA +if ARCH_INTEL_SOCFPGA config SOCFPGA_SUSPEND bool "Suspend to RAM on SOCFPGA" help diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c index 0958f68a21e2..926d5a243238 100644 --- a/arch/arm/mach-spear/spear320.c +++ b/arch/arm/mach-spear/spear320.c @@ -195,14 +195,12 @@ static struct pl022_ssp_controller spear320_ssp_data[] = { .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp1_tx", .dma_rx_param = "ssp1_rx", - .num_chipselect = 2, }, { .bus_id = 2, .enable_dma = 1, .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp2_tx", .dma_rx_param = "ssp2_rx", - .num_chipselect = 2, } }; diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index 8537fcffe5a8..f83321d5e353 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -30,16 +30,6 @@ struct pl022_ssp_controller pl022_plat_data = { .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp0_tx", .dma_rx_param = "ssp0_rx", - /* - * This is number of spi devices that can be connected to spi. There are - * two type of chipselects on which slave devices can work. One is chip - * select provided by spi masters other is controlled through external - * gpio's. We can't use chipselect provided from spi master (because as - * soon as FIFO becomes empty, CS is disabled and transfer ends). So - * this number now depends on number of gpios available for spi. each - * slave on each master requires a separate gpio pin. - */ - .num_chipselect = 2, }; /* dmac device registration */ diff --git a/arch/arm/mach-stm32/board-dt.c b/arch/arm/mach-stm32/board-dt.c index 011d57b488c2..a766310d8dca 100644 --- a/arch/arm/mach-stm32/board-dt.c +++ b/arch/arm/mach-stm32/board-dt.c @@ -17,6 +17,7 @@ static const char *const stm32_compat[] __initconst = { "st,stm32f746", "st,stm32f769", "st,stm32h743", + "st,stm32h750", "st,stm32mp157", NULL }; diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 0810f3abd810..415d8ad2a3c1 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -86,7 +86,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) } #ifdef CONFIG_HOTPLUG_CPU -void ux500_cpu_die(unsigned int cpu) +static void ux500_cpu_die(unsigned int cpu) { wfi(); } diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index dc8f152f3556..830bbfb26ca5 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -33,41 +33,41 @@ icache_size: * processor. We fix this by performing an invalidate, rather than a * clean + invalidate, before jumping into the kernel. * - * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs - * to be called for both secondary cores startup and primary core resume - * procedures. + * This function needs to be called for both secondary cores startup and + * primary core resume procedures. */ ENTRY(v7_invalidate_l1) - mov r0, #0 - mcr p15, 2, r0, c0, c0, 0 - mrc p15, 1, r0, c0, c0, 0 - - movw r1, #0x7fff - and r2, r1, r0, lsr #13 + mov r0, #0 + mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR + isb + mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR - movw r1, #0x3ff + movw r3, #0x3ff + and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] + clz r1, r3 @ WayShift + mov r2, #1 + mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] + movs r1, r2, lsl r1 @ #1 shifted left by same amount + moveq r1, #1 @ r1 needs value > 0 even if only 1 way - and r3, r1, r0, lsr #3 @ NumWays - 1 - add r2, r2, #1 @ NumSets + and r2, r0, #0x7 + add r2, r2, #4 @ SetShift - and r0, r0, #0x7 - add r0, r0, #4 @ SetShift +1: movw ip, #0x7fff + and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] - clz r1, r3 @ WayShift - add r4, r3, #1 @ NumWays -1: sub r2, r2, #1 @ NumSets-- - mov r3, r4 @ Temp = NumWays -2: subs r3, r3, #1 @ Temp-- - mov r5, r3, lsl r1 - mov r6, r2, lsl r0 - orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) - mcr p15, 0, r5, c7, c6, 2 - bgt 2b - cmp r2, #0 - bgt 1b - dsb st - isb - ret lr +2: mov ip, r0, lsl r2 @ NumSet << SetShift + orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, ip, c7, c6, 2 + subs r0, r0, #1 @ Set-- + bpl 2b + subs r3, r3, r1 @ Way-- + bcc 3f + mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR + b 1b +3: dsb st + isb + ret lr ENDPROC(v7_invalidate_l1) /* diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 44f7292ec27b..f1da3b439b96 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 6a769a6c314e..d8a115de5507 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -8,6 +8,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/shmparam.h> #include <asm/tlbflush.h> diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index eb5d338657d1..bcb485620a05 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 93ff0097f00b..fb688003d156 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -420,7 +420,7 @@ void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) note_page(&st, 0, 0, 0, NULL); } -static void ptdump_initialize(void) +static void __init ptdump_initialize(void) { unsigned i, j; @@ -466,7 +466,7 @@ void ptdump_check_wx(void) pr_info("Checked W+X mappings: passed, no W+X pages found\n"); } -static int ptdump_init(void) +static int __init ptdump_init(void) { ptdump_initialize(); ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 828a2561b229..9d4744a632c6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -301,7 +301,11 @@ static void __init free_highpages(void) void __init mem_init(void) { #ifdef CONFIG_ARM_LPAE - swiotlb_init(1); + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > arm_dma_pfn_limit) + swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; #endif set_max_mapnr(pfn_to_page(max_pfn) - mem_map); @@ -316,8 +320,6 @@ void __init mem_init(void) free_highpages(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. @@ -487,33 +489,12 @@ static int __mark_rodata_ro(void *unused) return 0; } -static int kernel_set_to_readonly __read_mostly; - void mark_rodata_ro(void) { - kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } -void set_kernel_text_rw(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, - current->active_mm); -} - -void set_kernel_text_ro(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, - current->active_mm); -} - #else static inline void fix_kernmem_perms(void) { } #endif /* CONFIG_STRICT_KERNEL_RWX */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a25b660c3017..c1e12aab67b8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -387,8 +387,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); /* Make sure fixmap region does not exceed available allocation. */ - BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > - FIXADDR_END); + BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START); BUG_ON(idx >= __end_of_fixed_addresses); /* we only support device mappings until pgprot_kernel has been set */ diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c index 88950e41a3a9..59d916ccdf25 100644 --- a/arch/arm/mm/pmsa-v7.c +++ b/arch/arm/mm/pmsa-v7.c @@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void) phys_addr_t mem_end; phys_addr_t reg_start, reg_end; unsigned int mem_max_regions; + bool first = true; int num; u64 i; @@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void) #endif for_each_mem_range(i, ®_start, ®_end) { - if (i == 0) { + if (first) { phys_addr_t phys_offset = PHYS_OFFSET; /* @@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void) mem_start = reg_start; mem_end = reg_end; specified_mem_size = mem_end - mem_start; + first = false; } else { /* * memblock auto merges contiguous blocks, remove diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c index 2de019f7503e..8359748a19a1 100644 --- a/arch/arm/mm/pmsa-v8.c +++ b/arch/arm/mm/pmsa-v8.c @@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void) { phys_addr_t mem_end; phys_addr_t reg_start, reg_end; + bool first = true; u64 i; for_each_mem_range(i, ®_start, ®_end) { - if (i == 0) { + if (first) { phys_addr_t phys_offset = PHYS_OFFSET; /* @@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void) if (reg_start != phys_offset) panic("First memory bank must be contiguous from PHYS_OFFSET"); mem_end = reg_end; + first = false; } else { /* * memblock auto merges contiguous blocks, remove diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 28c9d32fa99a..26d726a08a34 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -256,6 +256,20 @@ ENDPROC(cpu_pj4b_do_resume) #endif + @ + @ Invoke the v7_invalidate_l1() function, which adheres to the AAPCS + @ rules, and so it may corrupt registers that we need to preserve. + @ + .macro do_invalidate_l1 + mov r6, r1 + mov r7, r2 + mov r10, lr + bl v7_invalidate_l1 @ corrupts {r0-r3, ip, lr} + mov r1, r6 + mov r2, r7 + mov lr, r10 + .endm + /* * __v7_setup * @@ -277,6 +291,7 @@ __v7_ca5mp_setup: __v7_ca9mp_setup: __v7_cr7mp_setup: __v7_cr8mp_setup: + do_invalidate_l1 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting b 1f __v7_ca7mp_setup: @@ -284,13 +299,9 @@ __v7_ca12mp_setup: __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: + do_invalidate_l1 mov r10, #0 -1: adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} +1: #ifdef CONFIG_SMP orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @@ -471,12 +482,7 @@ __v7_pj4b_setup: #endif /* CONFIG_CPU_PJ4B */ __v7_setup: - adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} + do_invalidate_l1 __v7_setup_cont: and r0, r9, #0xff000000 @ ARM? @@ -548,17 +554,8 @@ __errata_finish: orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions ret lr @ return to head.S:__ret - - .align 2 -__v7_setup_stack_ptr: - .word PHYS_RELATIVE(__v7_setup_stack, .) ENDPROC(__v7_setup) - .bss - .align 2 -__v7_setup_stack: - .space 4 * 7 @ 7 registers - __INITDATA .weak cpu_v7_bugs_init diff --git a/arch/arm/mm/ptdump_debugfs.c b/arch/arm/mm/ptdump_debugfs.c index 598b636615a2..318de969ae0f 100644 --- a/arch/arm/mm/ptdump_debugfs.c +++ b/arch/arm/mm/ptdump_debugfs.c @@ -11,20 +11,9 @@ static int ptdump_show(struct seq_file *m, void *v) ptdump_walk_pgd(m, info); return 0; } +DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ptdump_show, inode->i_private); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name) { debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 1eb59003bdec..9f11de46aaa9 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -85,7 +85,7 @@ static int dma_lch_count; static int dma_chan_count; static int omap_dma_reserve_channels; -static spinlock_t dma_chan_lock; +static DEFINE_SPINLOCK(dma_chan_lock); static struct omap_dma_lch *dma_chan; static inline void disable_lnk(int lch); @@ -902,7 +902,6 @@ static int omap_system_dma_probe(struct platform_device *pdev) if (!dma_chan) return -ENOMEM; - spin_lock_init(&dma_chan_lock); for (ch = 0; ch < dma_chan_count; ch++) { omap_clear_dma(ch); diff --git a/arch/arm/probes/kprobes/test-arm.c b/arch/arm/probes/kprobes/test-arm.c index 977369f1aa48..a0dae35ffacd 100644 --- a/arch/arm/probes/kprobes/test-arm.c +++ b/arch/arm/probes/kprobes/test-arm.c @@ -55,25 +55,25 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)") #define _DATA_PROCESSING_DNM(op,s,val) \ - TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \ - TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \ - TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \ - TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \ - TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \ - TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \ - TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \ - TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \ - TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \ - TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ - TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ - TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ - TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ - TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\ - TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\ - TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \ - TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \ + TEST_RR( op s "eq r0, r",1, VAL1,", r",2, val, "") \ + TEST_RR( op s "ne r1, r",1, VAL1,", r",2, val, ", lsl #3") \ + TEST_RR( op s "cs r2, r",3, VAL1,", r",2, val, ", lsr #4") \ + TEST_RR( op s "cc r3, r",3, VAL1,", r",2, val, ", asr #5") \ + TEST_RR( op s "mi r4, r",5, VAL1,", r",2, N(val),", asr #6") \ + TEST_RR( op s "pl r5, r",5, VAL1,", r",2, val, ", ror #7") \ + TEST_RR( op s "vs r6, r",7, VAL1,", r",2, val, ", rrx") \ + TEST_R( op s "vc r6, r",7, VAL1,", pc, lsl #3") \ + TEST_R( op s "vc r6, r",7, VAL1,", sp, lsr #4") \ + TEST_R( op s "vc r6, pc, r",7, VAL1,", asr #5") \ + TEST_R( op s "vc r6, sp, r",7, VAL1,", ror #6") \ + TEST_RRR( op s "hi r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ + TEST_RRR( op s "ls r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ + TEST_RRR( op s "ge r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ + TEST_RRR( op s "lt r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ + TEST_RR( op s "gt r12, r13" ", r",14,val, ", ror r",14,7,"")\ + TEST_RR( op s "le r14, r",0, val, ", r13" ", lsl r",14,8,"")\ + TEST_R( op s "eq r0, r",11,VAL1,", #0xf5") \ + TEST_R( op s "ne r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ TEST( op s " r4, pc" ", #0x00005a00") @@ -104,23 +104,23 @@ void kprobe_arm_test_cases(void) TEST_R( op " r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING_DM(op,s,val) \ - TEST_R( op "eq" s " r0, r",1, val, "") \ - TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \ - TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \ - TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \ - TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \ - TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \ - TEST_R( op "vs" s " r6, r",10,val, ", rrx") \ - TEST( op "vs" s " r7, pc, lsl #3") \ - TEST( op "vs" s " r7, sp, lsr #4") \ - TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \ - TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \ - TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \ - TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \ - TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \ - TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \ - TEST( op "eq" s " r0, #0xf5") \ - TEST( op "ne" s " r11, #0xf5000000") \ + TEST_R( op s "eq r0, r",1, val, "") \ + TEST_R( op s "ne r1, r",1, val, ", lsl #3") \ + TEST_R( op s "cs r2, r",3, val, ", lsr #4") \ + TEST_R( op s "cc r3, r",3, val, ", asr #5") \ + TEST_R( op s "mi r4, r",5, N(val),", asr #6") \ + TEST_R( op s "pl r5, r",5, val, ", ror #7") \ + TEST_R( op s "vs r6, r",10,val, ", rrx") \ + TEST( op s "vs r7, pc, lsl #3") \ + TEST( op s "vs r7, sp, lsr #4") \ + TEST_RR( op s "vc r8, r",7, val, ", lsl r",0, 3,"") \ + TEST_RR( op s "hi r9, r",9, val, ", lsr r",7, 4,"") \ + TEST_RR( op s "ls r10, r",9, val, ", asr r",7, 5,"") \ + TEST_RR( op s "ge r11, r",11,N(val),", asr r",7, 6,"") \ + TEST_RR( op s "lt r12, r",11,val, ", ror r",14,7,"") \ + TEST_R( op s "gt r14, r13" ", lsl r",14,8,"") \ + TEST( op s "eq r0, #0xf5") \ + TEST( op s "ne r11, #0xf5000000") \ TEST( op s " r7, #0x000af000") \ TEST( op s " r4, #0x00005a00") @@ -166,10 +166,10 @@ void kprobe_arm_test_cases(void) /* Data-processing with PC as a target and status registers updated */ TEST_UNSUPPORTED("movs pc, r1") - TEST_UNSUPPORTED("movs pc, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe1b0f211) " @movs pc, r1, lsl r2") TEST_UNSUPPORTED("movs pc, #0x10000") TEST_UNSUPPORTED("adds pc, lr, r1") - TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe09ef211) " @adds pc, lr, r1, lsl r2") TEST_UNSUPPORTED("adds pc, lr, #4") /* Data-processing with SP as target */ @@ -352,7 +352,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2") TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc") TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"") - TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"") + TEST_RR( "mulsls r7, r",8, VAL2,", r",9, VAL2,"") TEST_R( "muls lr, r",4, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2") @@ -361,7 +361,7 @@ void kprobe_arm_test_cases(void) TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3") TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") - TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") + TEST_RRR( "mlashi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3") @@ -394,7 +394,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3") TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "umullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3") @@ -405,7 +405,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3") TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "umlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3") @@ -416,7 +416,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3") TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "smullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "smulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3") @@ -427,7 +427,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3") TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "smlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3") @@ -450,7 +450,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]") #if __LINUX_ARM_ARCH__ < 6 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") + TEST_R( "swpbvs r0, r",1,VAL1,", [sp]") #else TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]") TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]") @@ -477,11 +477,11 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Extra load/store instructions") TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") - TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]") - TEST_UNSUPPORTED( "streqh r14, [r13, r12]") - TEST_UNSUPPORTED( "streqh r14, [r12, r13]") + TEST_RPR( "strheq r",14,VAL2,", [r",11,0, ", r",12, 48,"]") + TEST_UNSUPPORTED( "strheq r14, [r13, r12]") + TEST_UNSUPPORTED( "strheq r14, [r12, r13]") TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strhne r",12,VAL2,", [r",11,48,", -r",10,24,"]!") TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"") TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!") @@ -489,9 +489,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc") TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrhcs r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrhcc r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!") @@ -499,9 +499,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc") TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]") - TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]") + TEST_RP( "strhmi r",14,VAL2,", [r",13,0, ", #2]") TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!") - TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!") + TEST_RP( "strhpl r",12,VAL2,", [r",11,24,", #-4]!") TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48") TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -511,9 +511,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48") TEST_P( "ldrh r0, [r",0, 24,", #-2]") - TEST_P( "ldrvsh r14, [r",13,0, ", #2]") + TEST_P( "ldrhvs r14, [r",13,0, ", #2]") TEST_P( "ldrh r1, [r",2, 24,", #4]!") - TEST_P( "ldrvch r12, [r",11,24,", #-4]!") + TEST_P( "ldrhvc r12, [r",11,24,", #-4]!") TEST_P( "ldrh r2, [r",3, 24,"], #48") TEST_P( "ldrh r10, [r",9, 64,"], #-48") TEST( "ldrh r0, [pc, #0]") @@ -521,18 +521,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48") TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]") + TEST_PR( "ldrsbhi r14, [r",13,0,", r",12, 48,"]") TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrsbls r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11") TEST_P( "ldrsb r0, [r",0, 24,", #-1]") - TEST_P( "ldrgesb r14, [r",13,0, ", #1]") + TEST_P( "ldrsbge r14, [r",13,0, ", #1]") TEST_P( "ldrsb r1, [r",2, 24,", #4]!") - TEST_P( "ldrltsb r12, [r",11,24,", #-4]!") + TEST_P( "ldrsblt r12, [r",11,24,", #-4]!") TEST_P( "ldrsb r2, [r",3, 24,"], #48") TEST_P( "ldrsb r10, [r",9, 64,"], #-48") TEST( "ldrsb r0, [pc, #0]") @@ -540,18 +540,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48") TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrshgt r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrshle r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11") TEST_P( "ldrsh r0, [r",0, 24,", #-1]") - TEST_P( "ldreqsh r14, [r",13,0 ,", #1]") + TEST_P( "ldrsheq r14, [r",13,0 ,", #1]") TEST_P( "ldrsh r1, [r",2, 24,", #4]!") - TEST_P( "ldrnesh r12, [r",11,24,", #-4]!") + TEST_P( "ldrshne r12, [r",11,24,", #-4]!") TEST_P( "ldrsh r2, [r",3, 24,"], #48") TEST_P( "ldrsh r10, [r",9, 64,"], #-48") TEST( "ldrsh r0, [pc, #0]") @@ -571,30 +571,30 @@ void kprobe_arm_test_cases(void) #if __LINUX_ARM_ARCH__ >= 5 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") - TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]") - TEST_UNSUPPORTED( "strccd r8, [r13, r12]") - TEST_UNSUPPORTED( "strccd r8, [r12, r13]") + TEST_RPR( "strdcc r",8, VAL2,", [r",11,0, ", r",12,48,"]") + TEST_UNSUPPORTED( "strdcc r8, [r13, r12]") + TEST_UNSUPPORTED( "strdcc r8, [r12, r13]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") - TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") - TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") + TEST_RPR( "strdcs r",12,VAL2,", r13, [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strd r",2, VAL1,", r3, [r",5, 24,"], r",4,48,"") + TEST_RPR( "strd r",10,VAL2,", r11, [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!") TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]") - TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]") + TEST_PR( "ldrdmi r8, [r",13,0, ", r",12,48,"]") TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!") - TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"") - TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"") + TEST_PR( "ldrdpl r6, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrd r2, r3, [r",5, 24,"], r",4,48,"") + TEST_PR( "ldrd r10, r11, [r",9,48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc") TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]") - TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]") + TEST_RP( "strdvs r",8, VAL2,", [r",13,0, ", #8]") TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!") - TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!") + TEST_RP( "strdvc r",12,VAL2,", r13, [r",11,24,", #-16]!") TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48") TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -603,9 +603,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!") TEST_P( "ldrd r0, [r",0, 24,", #-8]") - TEST_P( "ldrhid r8, [r",13,0, ", #8]") + TEST_P( "ldrdhi r8, [r",13,0, ", #8]") TEST_P( "ldrd r4, [r",2, 24,", #16]!") - TEST_P( "ldrlsd r6, [r",11,24,", #-16]!") + TEST_P( "ldrdls r6, [r",11,24,", #-16]!") TEST_P( "ldrd r2, [r",5, 24,"], #48") TEST_P( "ldrd r10, [r",9,6,"], #-48") TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") @@ -1084,63 +1084,63 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Branch, branch with link, and block data transfer") TEST_P( "stmda r",0, 16*4,", {r0}") - TEST_P( "stmeqda r",4, 16*4,", {r0-r15}") - TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdaeq r",4, 16*4,", {r0-r15}") + TEST_P( "stmdane r",8, 16*4,"!, {r8-r15}") TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmda r",13,0, "!, {pc}") TEST_P( "ldmda r",0, 16*4,", {r0}") - TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}") - TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}") + TEST_BF_P("ldmdacs r",4, 15*4,", {r0-r15}") + TEST_BF_P("ldmdacc r",7, 15*4,"!, {r8-r15}") TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmda r",14,15*4,"!, {pc}") TEST_P( "stmia r",0, 16*4,", {r0}") - TEST_P( "stmmiia r",4, 16*4,", {r0-r15}") - TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmiami r",4, 16*4,", {r0-r15}") + TEST_P( "stmiapl r",8, 16*4,"!, {r8-r15}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmia r",14,0, "!, {pc}") TEST_P( "ldmia r",0, 16*4,", {r0}") - TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}") - TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}") + TEST_BF_P("ldmiavs r",4, 0, ", {r0-r15}") + TEST_BF_P("ldmiavc r",7, 8*4, "!, {r8-r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,15*4,"!, {pc}") TEST_P( "stmdb r",0, 16*4,", {r0}") - TEST_P( "stmhidb r",4, 16*4,", {r0-r15}") - TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdbhi r",4, 16*4,", {r0-r15}") + TEST_P( "stmdbls r",8, 16*4,"!, {r8-r15}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmdb r",13,4, "!, {pc}") TEST_P( "ldmdb r",0, 16*4,", {r0}") - TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}") - TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}") + TEST_BF_P("ldmdbge r",4, 16*4,", {r0-r15}") + TEST_BF_P("ldmdblt r",7, 16*4,"!, {r8-r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {pc}") TEST_P( "stmib r",0, 16*4,", {r0}") - TEST_P( "stmgtib r",4, 16*4,", {r0-r15}") - TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmibgt r",4, 16*4,", {r0-r15}") + TEST_P( "stmible r",8, 16*4,"!, {r8-r15}") TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmib r",13,-4, "!, {pc}") TEST_P( "ldmib r",0, 16*4,", {r0}") - TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}") - TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}") + TEST_BF_P("ldmibeq r",4, -4,", {r0-r15}") + TEST_BF_P("ldmibne r",7, 7*4,"!, {r8-r15}") TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmib r",14,14*4,"!, {pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") - TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}") - TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}") + TEST_P( "stmdbeq r",13,16*4,"!, {r3-r12}") + TEST_P( "stmdbne r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") - TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}") - TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}") + TEST_P( "ldmiacc r",13,5*4, "!, {r3-r12}") + TEST_BF_P("ldmiacs r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") @@ -1174,80 +1174,80 @@ void kprobe_arm_test_cases(void) #define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code) #define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], {1}") \ \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}") + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15], {1}") #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \ \ - TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mcrr"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mcrr"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mrrc"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mrrc"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0") + TEST_COPROCESSOR( "cdp"two" p15, 15, cr15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "cdp"two" p0, 0, cr0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mcr"two" p15, 7, r15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mcr"two" p0, 0, r0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mrc"two" p15, 7, r14, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mrc"two" p0, 0, r0, cr0, cr0, 0") COPROCESSOR_INSTRUCTIONS_ST_LD("",e) #if __LINUX_ARM_ARCH__ >= 5 diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h index 19a5b2add41e..f1d5583e7bbb 100644 --- a/arch/arm/probes/kprobes/test-core.h +++ b/arch/arm/probes/kprobes/test-core.h @@ -108,6 +108,7 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ + ".syntax unified \n\t" \ "bl __kprobes_test_case_start \n\t" \ ".pushsection .rodata \n\t" \ "10: \n\t" \ diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c index c4b49b322e8a..f5f790c6e5f8 100644 --- a/arch/arm/probes/uprobes/core.c +++ b/arch/arm/probes/uprobes/core.c @@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) static struct undef_hook uprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff), - .cpsr_mask = MODE_MASK, + .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; @@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = { static struct undef_hook uprobes_arm_ss_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff), - .cpsr_mask = MODE_MASK, + .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 3654f979851b..87de1f63f649 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -8,16 +8,15 @@ gen := arch/$(ARCH)/include/generated kapi := $(gen)/asm uapi := $(gen)/uapi/asm -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh syscall := $(src)/syscall.tbl gen-y := $(gen)/calls-oabi.S gen-y += $(gen)/calls-eabi.S kapi-hdrs-y := $(kapi)/unistd-nr.h kapi-hdrs-y += $(kapi)/mach-types.h -uapi-hdrs-y := $(uapi)/unistd-common.h uapi-hdrs-y += $(uapi)/unistd-oabi.h uapi-hdrs-y += $(uapi)/unistd-eabi.h @@ -41,28 +40,21 @@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(call if_changed,gen_mach) quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abi_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '__NR_SYSCALL_BASE' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \ + --offset __NR_SYSCALL_BASE $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abi_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ '$(syshdr_abi_$(basetarget))' -syshdr_abi_unistd-common := common -$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE - $(call if_changed,syshdr) - -syshdr_abi_unistd-oabi := oabi +$(uapi)/unistd-oabi.h: abis := common,oabi $(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abi_unistd-eabi := eabi +$(uapi)/unistd-eabi.h: abis := common,eabi $(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) @@ -70,10 +62,10 @@ sysnr_abi_unistd-nr := common,oabi,eabi,compat $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_calls-oabi := common,oabi +$(gen)/calls-oabi.S: abis := common,oabi $(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_calls-eabi := common,eabi +$(gen)/calls-eabi.S: abis := common,eabi $(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index dcc1191291a2..c7679d7db98b 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -456,3 +456,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/arm/tools/syscallhdr.sh b/arch/arm/tools/syscallhdr.sh deleted file mode 100644 index 6b2f25cdd721..000000000000 --- a/arch/arm/tools/syscallhdr.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_ASM_ARM_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -if echo $out | grep -q uapi; then - fileguard="_UAPI$fileguard" -fi -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - echo "#ifndef ${fileguard}" - echo "#define ${fileguard} 1" - echo "" - - while read nr abi name entry ; do - if [ -z "$offset" ]; then - echo "#define __NR_${prefix}${name} $nr" - else - echo "#define __NR_${prefix}${name} ($offset + $nr)" - fi - done - - echo "" - echo "#endif /* ${fileguard} */" -) > "$out" diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh deleted file mode 100644 index ae7e93cfbfd3..000000000000 --- a/arch/arm/tools/syscalltbl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - while read nr abi name entry compat; do - if [ "$abi" = "eabi" -a -n "$compat" ]; then - echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2 - exit 1 - fi - - if [ -n "$entry" ]; then - if [ -z "$compat" ]; then - echo "NATIVE($nr, $entry)" - else - echo "COMPAT($nr, $entry, $compat)" - fi - fi - done -) > "$out" diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 467fa225c3d0..f8f07469d259 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -135,12 +135,24 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) return; } +int xen_swiotlb_detect(void) +{ + if (!xen_domain()) + return 0; + if (xen_feature(XENFEAT_direct_mapped)) + return 1; + /* legacy case */ + if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain()) + return 1; + return 0; +} + static int __init xen_mm_init(void) { struct gnttab_cache_flush cflush; - if (!xen_initial_domain()) + if (!xen_swiotlb_detect()) return 0; - xen_swiotlb_init(1, false); + xen_swiotlb_init(); cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5656e7aacd69..9f1d8566bbf9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,12 @@ config ARM64 select ACPI_PPTT if ACPI select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -67,14 +73,17 @@ config ARM64 select ARCH_KEEP_MEMBLOCK select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_GNU_PROPERTY + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_CFI_CLANG select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_NUMA_BALANCING @@ -108,9 +117,9 @@ config ARM64 select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP + select GENERIC_FIND_FIRST_BIT select GENERIC_IDLE_POLL_SETUP select GENERIC_IRQ_IPI - select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL @@ -138,6 +147,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) select HAVE_ARCH_KFENCE @@ -146,6 +156,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PFN_VALID select HAVE_ARCH_PREL32_RELOCATIONS + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -159,7 +170,6 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING - select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE @@ -194,6 +204,7 @@ config ARM64 select IOMMU_DMA if IOMMU_SUPPORT select IRQ_DOMAIN select IRQ_FORCED_THREADING + select KASAN_VMALLOC if KASAN_GENERIC select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH @@ -208,6 +219,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD help ARM 64-bit (AArch64) Linux support. @@ -303,10 +315,7 @@ config ZONE_DMA32 bool "Support DMA32 zone" if EXPERT default y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y config SMP @@ -521,6 +530,9 @@ config ARM64_ERRATUM_843419 If unsure, say Y. +config ARM64_LD_HAS_FIX_ERRATUM_843419 + def_bool $(ld-option,--fix-cortex-a53-843419) + config ARM64_ERRATUM_1024718 bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" default y @@ -810,6 +822,16 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config NVIDIA_CARMEL_CNP_ERRATUM + bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores" + default y + help + If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not + invalidate shared TLB entries installed by a different core, as it would + on standard ARM cores. + + If unsure, say Y. + config SOCIONEXT_SYNQUACER_PREITS bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" default y @@ -1038,29 +1060,15 @@ source "kernel/Kconfig.hz" config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE - -config ARCH_SPARSEMEM_DEFAULT - def_bool ARCH_SPARSEMEM_ENABLE - -config ARCH_SELECT_MEMORY_MODEL - def_bool ARCH_SPARSEMEM_ENABLE - -config ARCH_FLATMEM_ENABLE - def_bool !NUMA + select SPARSEMEM_VMEMMAP config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - -config ARCH_HAS_CACHE_LINE_SIZE +config ARCH_HAS_FILTER_PGPROT def_bool y -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y if PGTABLE_LEVELS > 2 - # Supported by clang >= 7.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) @@ -1096,6 +1104,7 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE + select HAVE_IMA_KEXEC if IMA help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument @@ -1396,10 +1405,13 @@ config ARM64_PAN config AS_HAS_LDAPR def_bool $(as-instr,.arch_extension rcpc) +config AS_HAS_LSE_ATOMICS + def_bool $(as-instr,.arch_extension lse) + config ARM64_LSE_ATOMICS bool default ARM64_USE_LSE_ATOMICS - depends on $(as-instr,.arch_extension lse) + depends on AS_HAS_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" @@ -1416,19 +1428,6 @@ config ARM64_USE_LSE_ATOMICS built with binutils >= 2.25 in order for the new instructions to be used. -config ARM64_VHE - bool "Enable support for Virtualization Host Extensions (VHE)" - default y - help - Virtualization Host Extensions (VHE) allow the kernel to run - directly at EL2 (instead of EL1) on processors that support - it. This leads to better performance for KVM, as they reduce - the cost of the world switch. - - Selecting this option allows the VHE feature to be detected - at runtime, and does not affect processors that do not - implement this feature. - endmenu menu "ARMv8.2 architectural features" @@ -1656,6 +1655,7 @@ config ARM64_MTE default y depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI depends on AS_HAS_ARMV8_5 + depends on AS_HAS_LSE_ATOMICS # Required for tag checking in the uaccess routines depends on ARM64_PAN select ARCH_USES_HIGH_VMA_FLAGS @@ -1681,10 +1681,23 @@ config ARM64_MTE endmenu +menu "ARMv8.7 architectural features" + +config ARM64_EPAN + bool "Enable support for Enhanced Privileged Access Never (EPAN)" + default y + depends on ARM64_PAN + help + Enhanced Privileged Access Never (EPAN) allows Privileged + Access Never to be used with Execute-only mappings. + + The feature is detected at runtime, and will remain disabled + if the cpu does not implement the feature. +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y - depends on !KVM || ARM64_VHE help The Scalable Vector Extension (SVE) is an extension to the AArch64 execution state which complements and extends the SIMD functionality @@ -1713,12 +1726,6 @@ config ARM64_SVE booting the kernel. If unsure and you are not observing these symptoms, you should assume that it is safe to say Y. - CPUs that support SVE are architecturally required to support the - Virtualization Host Extensions (VHE), so the kernel makes no - provision for supporting SVE alongside KVM without VHE enabled. - Thus, you will need to enable CONFIG_ARM64_VHE if you want to support - KVM in the same kernel image. - config ARM64_MODULE_PLTS bool "Use PLTs to allow module memory to spill over into vmalloc area" depends on MODULES @@ -1903,14 +1910,6 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on TRANSPARENT_HUGEPAGE - menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index cdfd5fed457f..6409b47b73e4 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -8,16 +8,6 @@ config ARCH_ACTIONS help This enables support for the Actions Semiconductor S900 SoC family. -config ARCH_AGILEX - bool "Intel's Agilex SoCFPGA Family" - help - This enables support for Intel's Agilex SoCFPGA Family. - -config ARCH_N5X - bool "Intel's eASIC N5X SoCFPGA Family" - help - This enables support for Intel's eASIC N5X SoCFPGA Family. - config ARCH_SUNXI bool "Allwinner sunxi 64-bit SoC Family" select ARCH_HAS_RESET_CONTROLLER @@ -36,6 +26,13 @@ config ARCH_ALPINE This enables support for the Annapurna Labs Alpine Soc family. +config ARCH_APPLE + bool "Apple Silicon SoC family" + select APPLE_AIC + help + This enables support for Apple's in-house ARM SoC family, starting + with the Apple M1. + config ARCH_BCM2835 bool "Broadcom BCM2835 family" select TIMER_OF @@ -235,9 +232,7 @@ config ARCH_RENESAS config ARCH_ROCKCHIP bool "Rockchip Platforms" select ARCH_HAS_RESET_CONTROLLER - select GPIOLIB select PINCTRL - select PINCTRL_ROCKCHIP select PM select ROCKCHIP_TIMER help @@ -254,10 +249,11 @@ config ARCH_SEATTLE help This enables support for AMD Seattle SOC Family -config ARCH_STRATIX10 - bool "Altera's Stratix 10 SoCFPGA Family" +config ARCH_INTEL_SOCFPGA + bool "Intel's SoCFPGA ARMv8 Families" help - This enables support for Altera's Stratix 10 SoCFPGA Family. + This enables support for Intel's SoCFPGA ARMv8 families: + Stratix 10 (ex. Altera), Agilex and eASIC N5X. config ARCH_SYNQUACER bool "Socionext SynQuacer SoC Family" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 5b84aec31ed3..7ef44478560d 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -21,7 +21,7 @@ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \ endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) - ifeq ($(call ld-option, --fix-cortex-a53-843419),) + ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y) $(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum) else LDFLAGS_vmlinux += --fix-cortex-a53-843419 diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index f1173cd93594..639e01a4d855 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -6,6 +6,7 @@ subdir-y += amazon subdir-y += amd subdir-y += amlogic subdir-y += apm +subdir-y += apple subdir-y += arm subdir-y += bitmain subdir-y += broadcom diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts index c7bd73f35ed8..f17cc89f472d 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts @@ -173,7 +173,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index f7fe9fa50cb3..997a19372683 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -191,7 +191,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts index 09b3c7fb82c0..e47ff06a6fa9 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts @@ -152,7 +152,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index f3f8e177ab61..ec7e2c0e82c1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -185,7 +185,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index 70e31743f0ba..097a5511523a 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -192,7 +192,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts index 437ffe3628a5..596a25907432 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts @@ -19,3 +19,7 @@ }; }; }; + +&mmc0 { + broken-cd; /* card detect is broken on *some* boards */ +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index 329cf276561e..2accb5ddf783 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts @@ -139,7 +139,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts index 7ae16541d14f..34e67f5f8297 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts @@ -245,7 +245,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi index 9f69d489a81d..79adea3f8cc1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi @@ -296,7 +296,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts index 422a8507f674..7ef96f9ff7ae 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts @@ -266,7 +266,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi index 3402cec87035..6d78a1c98f10 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi @@ -34,7 +34,7 @@ vmmc-supply = <®_dcdc1>; disable-wp; bus-width = <4>; - cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */ status = "okay"; }; @@ -45,7 +45,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts index f0a16f355e27..45e1abdf70a0 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts @@ -205,7 +205,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; wakeup-source; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 57786fc120c3..5b30e6c1fa05 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -648,6 +648,7 @@ pio: pinctrl@1c20800 { compatible = "allwinner,sun50i-a64-pinctrl"; reg = <0x01c20800 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>; @@ -818,6 +819,7 @@ compatible = "allwinner,sun50i-a64-lradc", "allwinner,sun8i-a83t-r-lradc"; reg = <0x01c21800 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -1208,6 +1210,7 @@ compatible = "allwinner,sun50i-a64-rtc", "allwinner,sun8i-h3-rtc"; reg = <0x01f00000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clock-output-names = "osc32k", "osc32k-out", "iosc"; @@ -1219,7 +1222,7 @@ compatible = "allwinner,sun50i-a64-r-intc", "allwinner,sun6i-a31-r-intc"; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <3>; reg = <0x01f00c00 0x400>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; }; @@ -1279,6 +1282,7 @@ r_pio: pinctrl@1f02c00 { compatible = "allwinner,sun50i-a64-r-pinctrl"; reg = <0x01f02c00 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>; clock-names = "apb", "hosc", "losc"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 4f4755152fce..6249e9e02928 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -150,14 +150,30 @@ vcc-pg-supply = <®_aldo1>; }; -&r_i2c { +&r_ir { + linux,rc-map-name = "rc-beelink-gs1"; + status = "okay"; +}; + +&r_pio { + /* + * FIXME: We can't add that supply for now since it would + * create a circular dependency between pinctrl, the regulator + * and the RSB Bus. + * + * vcc-pl-supply = <®_aldo1>; + */ + vcc-pm-supply = <®_aldo1>; +}; + +&r_rsb { status = "okay"; - axp805: pmic@36 { + axp805: pmic@745 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x36>; + reg = <0x745>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <1>; x-powers,self-working-mode; @@ -273,26 +289,6 @@ }; }; -&r_ir { - linux,rc-map-name = "rc-beelink-gs1"; - status = "okay"; -}; - -&r_pio { - /* - * PL0 and PL1 are used for PMIC I2C - * don't enable the pl-supply else - * it will fail at boot - * - * vcc-pl-supply = <®_aldo1>; - */ - vcc-pm-supply = <®_aldo1>; -}; - -&rtc { - clocks = <&ext_osc32k>; -}; - &spdif { status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts index 7e83f6146f8a..c45d7b7fb39a 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts @@ -175,14 +175,18 @@ vcc-pg-supply = <®_vcc_wifi_io>; }; -&r_i2c { +&r_ir { + status = "okay"; +}; + +&r_rsb { status = "okay"; - axp805: pmic@36 { + axp805: pmic@745 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x36>; + reg = <0x745>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <1>; x-powers,self-working-mode; @@ -291,10 +295,6 @@ }; }; -&r_ir { - status = "okay"; -}; - &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi index da0875bd38d4..92745128fcfe 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi @@ -112,14 +112,22 @@ vcc-pg-supply = <®_aldo1>; }; -&r_i2c { +&r_ir { + status = "okay"; +}; + +&r_pio { + vcc-pm-supply = <®_bldo3>; +}; + +&r_rsb { status = "okay"; - axp805: pmic@36 { + axp805: pmic@745 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x36>; + reg = <0x745>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <1>; x-powers,self-working-mode; @@ -232,14 +240,6 @@ }; }; -&r_ir { - status = "okay"; -}; - -&r_pio { - vcc-pm-supply = <®_bldo3>; -}; - &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts index b868ad17af8f..1ffd68f43f87 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts @@ -168,7 +168,7 @@ compatible = "x-powers,axp805", "x-powers,axp806"; reg = <0x36>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <1>; x-powers,self-working-mode; @@ -281,7 +281,7 @@ compatible = "nxp,pcf8563"; reg = <0x51>; interrupt-parent = <&r_intc>; - interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; #clock-cells = <0>; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index 49e979794094..50815867ce7b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -294,6 +294,7 @@ pio: pinctrl@300b000 { compatible = "allwinner,sun50i-h6-pinctrl"; reg = <0x0300b000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, @@ -902,6 +903,7 @@ rtc: rtc@7000000 { compatible = "allwinner,sun50i-h6-rtc"; reg = <0x07000000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>; clock-output-names = "osc32k", "osc32k-out", "iosc"; @@ -927,10 +929,9 @@ }; r_intc: interrupt-controller@7021000 { - compatible = "allwinner,sun50i-h6-r-intc", - "allwinner,sun6i-a31-r-intc"; + compatible = "allwinner,sun50i-h6-r-intc"; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <3>; reg = <0x07021000 0x400>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; }; @@ -938,6 +939,7 @@ r_pio: pinctrl@7022000 { compatible = "allwinner,sun50i-h6-r-pinctrl"; reg = <0x07022000 0x400>; + interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; clocks = <&r_ccu CLK_R_APB1>, <&osc24M>, <&rtc 0>; @@ -995,9 +997,9 @@ compatible = "allwinner,sun8i-a23-rsb"; reg = <0x07083000 0x400>; interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&r_ccu 13>; + clocks = <&r_ccu CLK_R_APB2_RSB>; clock-frequency = <3000000>; - resets = <&r_ccu 7>; + resets = <&r_ccu RST_R_APB2_RSB>; pinctrl-names = "default"; pinctrl-0 = <&r_rsb_pins>; status = "disabled"; diff --git a/arch/arm64/boot/dts/altera/Makefile b/arch/arm64/boot/dts/altera/Makefile index 10119c7ab437..4db83fbeb115 100644 --- a/arch/arm64/boot/dts/altera/Makefile +++ b/arch/arm64/boot/dts/altera/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb \ +dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_stratix10_socdk.dtb \ socfpga_stratix10_socdk_nand.dtb diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile index 78a569d7fa20..a58ccecfcb55 100644 --- a/arch/arm64/boot/dts/amlogic/Makefile +++ b/arch/arm64/boot/dts/amlogic/Makefile @@ -29,6 +29,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc-v2.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-mecool-kii-pro.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-phicomm-n1.dtb @@ -38,6 +39,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-p281.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-tx3-mini.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-libretech-pc.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-khadas-vim2.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-mecool-kiii-pro.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-minix-neo-u9h.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi index 755b4ad15184..b4000cf65a9a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi @@ -88,13 +88,13 @@ #reset-cells = <1>; }; - periphs_pinctrl: pinctrl@0400 { + periphs_pinctrl: pinctrl@400 { compatible = "amlogic,meson-a1-periphs-pinctrl"; #address-cells = <2>; #size-cells = <2>; ranges; - gpio: bank@0400 { + gpio: bank@400 { reg = <0x0 0x0400 0x0 0x003c>, <0x0 0x0480 0x0 0x0118>; reg-names = "mux", "gpio"; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index d945c84ab697..895c43c7af9f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -1731,7 +1731,6 @@ interrupts = <GIC_SPI 150 IRQ_TYPE_EDGE_RISING>; clocks = <&clkc CLKID_VAPB>; resets = <&reset RESET_GE2D>; - reset-names = "core"; }; gic: interrupt-controller@ffc01000 { diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index b858c5e43cc8..793d48f72390 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -17,6 +17,12 @@ #address-cells = <2>; #size-cells = <2>; + aliases { + mmc0 = &sd_emmc_b; /* SD card */ + mmc1 = &sd_emmc_c; /* eMMC */ + mmc2 = &sd_emmc_a; /* SDIO */ + }; + chosen { #address-cells = <2>; #size-cells = <2>; @@ -122,9 +128,9 @@ pcie: pcie@fc000000 { compatible = "amlogic,g12a-pcie", "snps,dw-pcie"; - reg = <0x0 0xfc000000 0x0 0x400000 - 0x0 0xff648000 0x0 0x2000 - 0x0 0xfc400000 0x0 0x200000>; + reg = <0x0 0xfc000000 0x0 0x400000>, + <0x0 0xff648000 0x0 0x2000>, + <0x0 0xfc400000 0x0 0x200000>; reg-names = "elbi", "cfg", "config"; interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <1>; @@ -134,8 +140,8 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000 - 0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>; + ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000>, + <0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>; clocks = <&clkc CLKID_PCIE_PHY &clkc CLKID_PCIE_COMB @@ -2003,7 +2009,7 @@ }; }; - vrtc: rtc@0a8 { + vrtc: rtc@a8 { compatible = "amlogic,meson-vrtc"; reg = <0x0 0x000a8 0x0 0x4>; }; @@ -2181,7 +2187,7 @@ amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>; }; - watchdog: wdt@f0d0 { + watchdog: watchdog@f0d0 { compatible = "amlogic,meson-gxbb-wdt"; reg = <0x0 0xf0d0 0x0 0x10>; clocks = <&xtal>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts index 211191f66344..6c7bfacbad78 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts @@ -12,7 +12,7 @@ #include <dt-bindings/sound/meson-g12a-tohdmitx.h> / { - compatible = "azw,gsking-x", "amlogic,g12b"; + compatible = "azw,gsking-x", "amlogic,s922x", "amlogic,g12b"; model = "Beelink GS-King X"; aliases { diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts index 0e331aa5a2d7..707daf92787b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts @@ -11,7 +11,7 @@ #include <dt-bindings/sound/meson-g12a-tohdmitx.h> / { - compatible = "azw,gtking", "amlogic,g12b"; + compatible = "azw,gtking", "amlogic,s922x", "amlogic,g12b"; model = "Beelink GT-King Pro"; aliases { @@ -35,7 +35,7 @@ leds { compatible = "gpio-leds"; - white { + led-white { label = "power:white"; gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>; default-state = "on"; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts index a7db84a500bb..5d96c1449050 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts @@ -11,7 +11,7 @@ #include <dt-bindings/sound/meson-g12a-tohdmitx.h> / { - compatible = "azw,gtking", "amlogic,g12b"; + compatible = "azw,gtking", "amlogic,s922x", "amlogic,g12b"; model = "Beelink GT-King"; aliases { diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi index 58ce569b2ace..344573e157a7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi @@ -42,7 +42,7 @@ leds { compatible = "gpio-leds"; - blue { + led-blue { label = "n2:blue"; gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; @@ -410,7 +410,7 @@ &ext_mdio { external_phy: ethernet-phy@0 { - /* Realtek RTL8211F (0x001cc916) */ + /* Realtek RTL8211F (0x001cc916) */ reg = <0>; max-speed = <1000>; @@ -446,13 +446,58 @@ }; &gpio { + gpio-line-names = + /* GPIOZ */ + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", + /* GPIOH */ + "", "", "", "", "", "", "", "", + "", + /* BOOT */ + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", + /* GPIOC */ + "", "", "", "", "", "", "", "", + /* GPIOA */ + "PIN_44", /* GPIOA_0 */ + "PIN_46", /* GPIOA_1 */ + "PIN_45", /* GPIOA_2 */ + "PIN_47", /* GPIOA_3 */ + "PIN_26", /* GPIOA_4 */ + "", "", "", "", "", "", + "PIN_42", /* GPIOA_11 */ + "PIN_32", /* GPIOA_12 */ + "PIN_7", /* GPIOA_13 */ + "PIN_27", /* GPIOA_14 */ + "PIN_28", /* GPIOA_15 */ + /* GPIOX */ + "PIN_16", /* GPIOX_0 */ + "PIN_18", /* GPIOX_1 */ + "PIN_22", /* GPIOX_2 */ + "PIN_11", /* GPIOX_3 */ + "PIN_13", /* GPIOX_4 */ + "PIN_33", /* GPIOX_5 */ + "PIN_35", /* GPIOX_6 */ + "PIN_15", /* GPIOX_7 */ + "PIN_19", /* GPIOX_8 */ + "PIN_21", /* GPIOX_9 */ + "PIN_24", /* GPIOX_10 */ + "PIN_23", /* GPIOX_11 */ + "PIN_8", /* GPIOX_12 */ + "PIN_10", /* GPIOX_13 */ + "PIN_29", /* GPIOX_14 */ + "PIN_31", /* GPIOX_15 */ + "PIN_12", /* GPIOX_16 */ + "PIN_3", /* GPIOX_17 */ + "PIN_5", /* GPIOX_18 */ + "PIN_36"; /* GPIOX_19 */ /* * WARNING: The USB Hub on the Odroid-N2 needs a reset signal * to be turned high in order to be detected by the USB Controller * This signal should be handled by a USB specific power sequence * in order to reset the Hub when USB bus is powered down. */ - usb-hub { + hog-0 { gpio-hog; gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>; output-high; @@ -508,6 +553,11 @@ status = "okay"; }; +&saradc { + status = "okay"; + vref-supply = <&vddao_1v8>; +}; + /* SD card */ &sd_emmc_b { status = "okay"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 0edd137151f8..3d00404aae0f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -20,6 +20,12 @@ #address-cells = <2>; #size-cells = <2>; + aliases { + mmc0 = &sd_emmc_b; /* SD card */ + mmc1 = &sd_emmc_c; /* eMMC */ + mmc2 = &sd_emmc_a; /* SDIO */ + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index bfaf7f41a2d6..201596247fd9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -257,7 +257,7 @@ * This signal should be handled by a USB specific power sequence * in order to reset the Hub when USB bus is powered down. */ - usb-hub { + hog-0 { gpio-hog; gpios = <GPIOAO_4 GPIO_ACTIVE_HIGH>; output-high; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts new file mode 100644 index 000000000000..c529b6c860a4 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/* + * Author: Christian Hewitt <christianshewitt@gmail.com> + */ + +/dts-v1/; + +#include "meson-gxl-s905d.dtsi" +#include "meson-gx-p23x-q20x.dtsi" +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + compatible = "videostrong,gxl-kii-pro", "amlogic,s905d", "amlogic,meson-gxl"; + model = "MeCool KII Pro"; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1710000>; + + button-function { + label = "Update"; + linux,code = <KEY_VENDOR>; + press-threshold-microvolt = <10000>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <100>; + + button@0 { + label = "power"; + linux,code = <KEY_POWER>; + gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-blue { + color = <LED_COLOR_ID_BLUE>; + function = LED_FUNCTION_POWER; + gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; +}; + +ðmac { + phy-mode = "rmii"; + phy-handle = <&internal_phy>; +}; + +&ir { + linux,rc-map-name = "rc-mecool-kii-pro"; +}; + +&sd_emmc_a { + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +&uart_A { + status = "okay"; + pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>; + pinctrl-names = "default"; + uart-has-rtscts; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; + max-speed = <2000000>; + clocks = <&wifi32k>; + clock-names = "lpo"; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts index ad6d72254150..b331a013572f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts @@ -16,28 +16,28 @@ leds { compatible = "gpio-leds"; - yellow { + led-yellow { color = <LED_COLOR_ID_YELLOW>; function = LED_FUNCTION_STATUS; gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - blue { + led-blue { color = <LED_COLOR_ID_BLUE>; function = LED_FUNCTION_STATUS; gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - green { + led-green { color = <LED_COLOR_ID_GREEN>; function = LED_FUNCTION_STATUS; gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>; default-state = "on"; }; - red { + led-red { color = <LED_COLOR_ID_RED>; function = LED_FUNCTION_STATUS; gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts new file mode 100644 index 000000000000..ebebf344b715 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/* + * Author: Christian Hewitt <christianshewitt@gmail.com> + */ + +/dts-v1/; + +#include "meson-gxm.dtsi" +#include "meson-gx-p23x-q20x.dtsi" +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + compatible = "videostrong,gxm-kiii-pro", "amlogic,s912", "amlogic,meson-gxm"; + model = "MeCool KIII Pro"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x0 0x0 0xC0000000>; + }; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1710000>; + + button-function { + label = "Update"; + linux,code = <KEY_VENDOR>; + press-threshold-microvolt = <10000>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <100>; + + button@0 { + label = "power"; + linux,code = <KEY_POWER>; + gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-blue { + color = <LED_COLOR_ID_BLUE>; + function = LED_FUNCTION_POWER; + gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; +}; + +ðmac { + pinctrl-0 = <ð_pins>; + pinctrl-names = "default"; + + phy-handle = <&external_phy>; + + amlogic,tx-delay-ns = <2>; + + phy-mode = "rgmii"; +}; + +&external_mdio { + external_phy: ethernet-phy@0 { + /* Realtek RTL8211F (0x001cc916) */ + reg = <0>; + max-speed = <1000>; + + reset-assert-us = <10000>; + reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; + /* MAC_INTR on GPIOZ_15 */ + interrupts = <25 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +&ir { + linux,rc-map-name = "rc-mecool-kiii-pro"; +}; + +&sd_emmc_a { + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +&uart_A { + status = "okay"; + pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>; + pinctrl-names = "default"; + uart-has-rtscts; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; + max-speed = <2000000>; + clocks = <&wifi32k>; + clock-names = "lpo"; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts new file mode 100644 index 000000000000..ea9f234d1fc7 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) Christian Hewitt <christianshewitt@gmail.com> + */ + +/dts-v1/; + +#include "meson-gxm.dtsi" +#include "meson-gx-p23x-q20x.dtsi" +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +/ { + compatible = "minix,neo-u9h", "amlogic,s912", "amlogic,meson-gxm"; + model = "Minix Neo U9-H"; + + leds { + compatible = "gpio-leds"; + + led-white { + color = <LED_COLOR_ID_WHITE>; + function = LED_FUNCTION_POWER; + gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1710000>; + + button-function { + label = "update"; + linux,code = <KEY_VENDOR>; + press-threshold-microvolt = <10000>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <100>; + + button@0 { + label = "power"; + linux,code = <KEY_POWER>; + gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; + }; + }; +}; + +ðmac { + pinctrl-0 = <ð_pins>; + pinctrl-names = "default"; + phy-handle = <&external_phy>; + amlogic,tx-delay-ns = <2>; + phy-mode = "rgmii"; +}; + +&external_mdio { + external_phy: ethernet-phy@0 { + /* Realtek RTL8211F (0x001cc916) */ + reg = <0>; + max-speed = <1000>; + + reset-assert-us = <10000>; + reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; + /* MAC_INTR on GPIOZ_15 */ + interrupts = <25 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +&ir { + linux,rc-map-name = "rc-minix-neo"; +}; + +&i2c_B { + status = "okay"; + pinctrl-0 = <&i2c_b_pins>; + pinctrl-names = "default"; + + rtc: rtc@51 { + status = "okay"; + compatible = "haoyu,hym8563"; + reg = <0x51>; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "xin32k"; + wakeup-source; + }; +}; + +&sd_emmc_a { + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +&uart_A { + status = "okay"; + pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>; + pinctrl-names = "default"; + uart-has-rtscts; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; + max-speed = <2000000>; + clocks = <&wifi32k>; + clock-names = "lpo"; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts index ec794c134c15..1e7f77f9b533 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts @@ -22,7 +22,7 @@ leds { compatible = "gpio-leds"; - blue { + led-blue { color = <LED_COLOR_ID_BLUE>; function = LED_FUNCTION_STATUS; gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi index 877e3b989203..66d67524b031 100644 --- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi @@ -43,14 +43,14 @@ leds { compatible = "gpio-leds"; - white { + led-white { color = <LED_COLOR_ID_WHITE>; function = LED_FUNCTION_STATUS; gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; }; - red { + led-red { color = <LED_COLOR_ID_RED>; function = LED_FUNCTION_STATUS; gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts index b2a4e823c1d8..8c327c03d845 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts @@ -47,7 +47,7 @@ * This signal should be handled by a USB specific power sequence * in order to reset the Hub when USB bus is powered down. */ - usb-hub { + hog-0 { gpio-hog; gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>; output-high; diff --git a/arch/arm64/boot/dts/apple/Makefile b/arch/arm64/boot/dts/apple/Makefile new file mode 100644 index 000000000000..cbbd701ebf05 --- /dev/null +++ b/arch/arm64/boot/dts/apple/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_ARCH_APPLE) += t8103-j274.dtb diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts new file mode 100644 index 000000000000..e0f6775b9878 --- /dev/null +++ b/arch/arm64/boot/dts/apple/t8103-j274.dts @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ OR MIT +/* + * Apple Mac mini (M1, 2020) + * + * target-type: J274 + * + * Copyright The Asahi Linux Contributors + */ + +/dts-v1/; + +#include "t8103.dtsi" + +/ { + compatible = "apple,j274", "apple,t8103", "apple,arm-platform"; + model = "Apple Mac mini (M1, 2020)"; + + aliases { + serial0 = &serial0; + }; + + chosen { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + stdout-path = "serial0"; + + framebuffer0: framebuffer@0 { + compatible = "apple,simple-framebuffer", "simple-framebuffer"; + reg = <0 0 0 0>; /* To be filled by loader */ + /* Format properties will be added by loader */ + status = "disabled"; + }; + }; + + memory@800000000 { + device_type = "memory"; + reg = <0x8 0 0x2 0>; /* To be filled by loader */ + }; +}; + +&serial0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi new file mode 100644 index 000000000000..a1e22a2ea2e5 --- /dev/null +++ b/arch/arm64/boot/dts/apple/t8103.dtsi @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0+ OR MIT +/* + * Apple T8103 "M1" SoC + * + * Other names: H13G, "Tonga" + * + * Copyright The Asahi Linux Contributors + */ + +#include <dt-bindings/interrupt-controller/apple-aic.h> +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + compatible = "apple,t8103", "apple,arm-platform"; + + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "apple,icestorm"; + device_type = "cpu"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu1: cpu@1 { + compatible = "apple,icestorm"; + device_type = "cpu"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu2: cpu@2 { + compatible = "apple,icestorm"; + device_type = "cpu"; + reg = <0x0 0x2>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu3: cpu@3 { + compatible = "apple,icestorm"; + device_type = "cpu"; + reg = <0x0 0x3>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu4: cpu@10100 { + compatible = "apple,firestorm"; + device_type = "cpu"; + reg = <0x0 0x10100>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu5: cpu@10101 { + compatible = "apple,firestorm"; + device_type = "cpu"; + reg = <0x0 0x10101>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu6: cpu@10102 { + compatible = "apple,firestorm"; + device_type = "cpu"; + reg = <0x0 0x10102>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + + cpu7: cpu@10103 { + compatible = "apple,firestorm"; + device_type = "cpu"; + reg = <0x0 0x10103>; + enable-method = "spin-table"; + cpu-release-addr = <0 0>; /* To be filled by loader */ + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupt-parent = <&aic>; + interrupt-names = "phys", "virt", "hyp-phys", "hyp-virt"; + interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>, + <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>, + <AIC_FIQ AIC_TMR_HV_PHYS IRQ_TYPE_LEVEL_HIGH>, + <AIC_FIQ AIC_TMR_HV_VIRT IRQ_TYPE_LEVEL_HIGH>; + }; + + clk24: clock-24m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "clk24"; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + + ranges; + nonposted-mmio; + + serial0: serial@235200000 { + compatible = "apple,s5l-uart"; + reg = <0x2 0x35200000 0x0 0x1000>; + reg-io-width = <4>; + interrupt-parent = <&aic>; + interrupts = <AIC_IRQ 605 IRQ_TYPE_LEVEL_HIGH>; + /* + * TODO: figure out the clocking properly, there may + * be a third selectable clock. + */ + clocks = <&clk24>, <&clk24>; + clock-names = "uart", "clk_uart_baud0"; + status = "disabled"; + }; + + aic: interrupt-controller@23b100000 { + compatible = "apple,t8103-aic", "apple,aic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x2 0x3b100000 0x0 0x8000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index f6c55877fbd9..1cc7fdcec51b 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -544,6 +544,10 @@ ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>, <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>, <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; + /* Standard AXI Translation entries as programmed by EDK2 */ + dma-ranges = <0x02000000 0x0 0x2c1c0000 0x0 0x2c1c0000 0x0 0x00040000>, + <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>, + <0x43000000 0x8 0x00000000 0x8 0x00000000 0x2 0x00000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, @@ -640,7 +644,6 @@ #iommu-cells = <1>; #global-interrupts = <1>; dma-coherent; - status = "disabled"; }; smmu_hdlcd1: iommu@7fb10000 { diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts index 5f290090b0cf..0e24e29eb9b1 100644 --- a/arch/arm64/boot/dts/arm/juno-r1.dts +++ b/arch/arm64/boot/dts/arm/juno-r1.dts @@ -230,6 +230,10 @@ status = "okay"; }; +&smmu_pcie { + status = "okay"; +}; + &etm0 { cpu = <&A57_0>; }; diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index 305300dd521c..e609420ce3e4 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -236,6 +236,10 @@ status = "okay"; }; +&smmu_pcie { + status = "okay"; +}; + &etm0 { cpu = <&A72_0>; }; diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile index ebebc0cd421f..cc75854519ac 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile +++ b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-netgear-r8000p.dtb +dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-tplink-archer-c2300-v1.dtb dtb-$(CONFIG_ARCH_BCM4908) += bcm4908-asus-gt-ac5300.dtb diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts index ee3ed612274c..2dd028438c22 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts @@ -18,11 +18,106 @@ leds { compatible = "gpio-leds"; - wps { + led-power-white { + function = LED_FUNCTION_POWER; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 8 GPIO_ACTIVE_LOW>; + }; + + led-power-amber { + function = LED_FUNCTION_POWER; + color = <LED_COLOR_ID_AMBER>; + gpios = <&gpio0 9 GPIO_ACTIVE_LOW>; + }; + + led-wps { function = LED_FUNCTION_WPS; color = <LED_COLOR_ID_WHITE>; gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; }; + + led-2ghz { + function = "2ghz"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + }; + + led-5ghz-1 { + function = "5ghz-1"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; + }; + + led-5ghz-2 { + function = "5ghz-2"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 16 GPIO_ACTIVE_LOW>; + }; + + led-usb2 { + function = "usb2"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; + }; + + led-usb3 { + function = "usb3"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; + }; + + led-wifi { + function = "wifi"; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 56 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&enet { + nvmem-cells = <&base_mac_addr>; + nvmem-cell-names = "mac-address"; +}; + +&usb_phy { + brcm,ioc = <1>; + status = "okay"; +}; + +&ehci { + status = "okay"; +}; + +&ohci { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; + +&ports { + port@0 { + label = "lan4"; + }; + + port@1 { + label = "lan3"; + }; + + port@2 { + label = "lan2"; + }; + + port@3 { + label = "lan1"; + }; + + port@7 { + reg = <7>; + phy-mode = "internal"; + phy-handle = <&phy12>; + label = "wan"; }; }; @@ -40,11 +135,21 @@ #size-cells = <1>; partition@0 { + compatible = "nvmem-cells"; label = "cferom"; reg = <0x0 0x100000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x100000>; + + base_mac_addr: mac@106a0 { + reg = <0x106a0 0x6>; + }; }; partition@100000 { + compatible = "brcm,bcm4908-firmware"; label = "firmware"; reg = <0x100000 0x4400000>; }; diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts new file mode 100644 index 000000000000..b63eefab48bd --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/leds/common.h> + +#include "bcm4906.dtsi" + +/ { + compatible = "tplink,archer-c2300-v1", "brcm,bcm4906", "brcm,bcm4908"; + model = "TP-Link Archer C2300 V1"; + + memory@0 { + device_type = "memory"; + reg = <0x00 0x00 0x00 0x20000000>; + }; + + leds { + compatible = "gpio-leds"; + + led-power { + function = LED_FUNCTION_POWER; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + }; + + led-2ghz { + function = "2ghz"; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 2 GPIO_ACTIVE_LOW>; + }; + + led-5ghz { + function = "5ghz"; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; + }; + + led-wan-amber { + function = LED_FUNCTION_WAN; + color = <LED_COLOR_ID_AMBER>; + gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; + }; + + led-wan-blue { + function = LED_FUNCTION_WAN; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; + }; + + led-lan { + function = LED_FUNCTION_LAN; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; + }; + + led-wps { + function = LED_FUNCTION_WPS; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + }; + + led-usb2 { + function = "usb2"; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; + }; + + led-usb3 { + function = "usbd3"; + color = <LED_COLOR_ID_BLUE>; + gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; + }; + + led-brightness { + function = LED_FUNCTION_BACKLIGHT; + color = <LED_COLOR_ID_WHITE>; + gpios = <&gpio0 19 GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + poll-interval = <100>; + + brightness { + label = "LEDs"; + linux,code = <KEY_BRIGHTNESS_ZERO>; + gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; + }; + + wps { + label = "WPS"; + linux,code = <KEY_WPS_BUTTON>; + gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; + }; + + wifi { + label = "WiFi"; + linux,code = <KEY_RFKILL>; + gpios = <&gpio0 22 GPIO_ACTIVE_LOW>; + }; + + restart { + label = "Reset"; + linux,code = <KEY_RESTART>; + gpios = <&gpio0 23 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&usb_phy { + brcm,ioc = <1>; + status = "okay"; +}; + +&ehci { + status = "okay"; +}; + +&ohci { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; + +&ports { + port@0 { + label = "lan4"; + }; + + port@1 { + label = "lan3"; + }; + + port@2 { + label = "lan2"; + }; + + port@3 { + label = "lan1"; + }; + + port@7 { + reg = <7>; + phy-mode = "internal"; + phy-handle = <&phy12>; + label = "wan"; + }; +}; + +&nandcs { + nand-ecc-strength = <4>; + nand-ecc-step-size = <512>; + nand-on-flash-bbt; + + #address-cells = <1>; + #size-cells = <0>; + + partitions { + compatible = "brcm,bcm4908-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "cferom"; + reg = <0x0 0x100000>; + }; + + partition@100000 { + compatible = "brcm,bcm4908-firmware"; + reg = <0x100000 0x3900000>; + }; + + partition@5800000 { + compatible = "brcm,bcm4908-firmware"; + reg = <0x3a00000 0x3900000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts index 6e4ad66ff536..169fbb7cfd34 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts @@ -44,6 +44,28 @@ }; }; +&enet { + nvmem-cells = <&base_mac_addr>; + nvmem-cell-names = "mac-address"; +}; + +&usb_phy { + brcm,ioc = <1>; + status = "okay"; +}; + +&ehci { + status = "okay"; +}; + +&ohci { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; + &ports { port@0 { label = "lan2"; @@ -65,6 +87,7 @@ port@7 { label = "sw"; reg = <7>; + phy-mode = "rgmii"; fixed-link { speed = <1000>; @@ -105,13 +128,32 @@ #size-cells = <0>; partitions { - compatible = "fixed-partitions"; + compatible = "brcm,bcm4908-partitions"; #address-cells = <1>; #size-cells = <1>; partition@0 { + compatible = "nvmem-cells"; label = "cferom"; reg = <0x0 0x100000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x100000>; + + base_mac_addr: mac@106a0 { + reg = <0x106a0 0x6>; + }; + }; + + partition@100000 { + compatible = "brcm,bcm4908-firmware"; + reg = <0x100000 0x5700000>; + }; + + partition@5800000 { + compatible = "brcm,bcm4908-firmware"; + reg = <0x5800000 0x5700000>; }; }; }; diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi index 9354077f74cd..8060178b365d 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi @@ -2,6 +2,8 @@ #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/phy/phy.h> +#include <dt-bindings/soc/bcm-pmb.h> /dts-v1/; @@ -110,28 +112,52 @@ #size-cells = <1>; ranges = <0x00 0x00 0x80000000 0x281000>; - usb@c300 { + enet: ethernet@2000 { + compatible = "brcm,bcm4908-enet"; + reg = <0x2000 0x1000>; + + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx", "tx"; + }; + + usb_phy: usb-phy@c200 { + compatible = "brcm,bcm4908-usb-phy"; + reg = <0xc200 0x100>; + reg-names = "ctrl"; + power-domains = <&pmb BCM_PMB_HOST_USB>; + dr_mode = "host"; + brcm,has-xhci; + brcm,has-eohci; + #phy-cells = <1>; + status = "disabled"; + }; + + ehci: usb@c300 { compatible = "generic-ehci"; reg = <0xc300 0x100>; interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb_phy PHY_TYPE_USB2>; status = "disabled"; }; - usb@c400 { + ohci: usb@c400 { compatible = "generic-ohci"; reg = <0xc400 0x100>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb_phy PHY_TYPE_USB2>; status = "disabled"; }; - usb@d000 { + xhci: usb@d000 { compatible = "generic-xhci"; reg = <0xd000 0x8c8>; interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb_phy PHY_TYPE_USB3>; status = "disabled"; }; - ethernet-switch@80000 { + bus@80000 { compatible = "simple-bus"; #size-cells = <1>; #address-cells = <1>; @@ -182,6 +208,17 @@ phy-mode = "internal"; phy-handle = <&phy11>; }; + + port@8 { + reg = <8>; + phy-mode = "internal"; + ethernet = <&enet>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; }; }; @@ -222,7 +259,7 @@ #address-cells = <1>; #size-cells = <1>; - power-controller@2800c0 { + pmb: power-controller@2800c0 { compatible = "brcm,bcm4908-pmb"; reg = <0x2800c0 0x40>; #power-domain-cells = <1>; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 413cac63a1cb..773d9abe3a44 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -1002,7 +1002,7 @@ ppmu_event0_d1_general: ppmu-event0-d1-general { event-name = "ppmu-event0-d1-general"; }; - }; + }; }; &pinctrl_alive { diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index 6433f9ee35e1..18a912eee360 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -564,9 +564,9 @@ compatible = "samsung,exynos5433-slim-sss"; reg = <0x11140000 0x1000>; interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>; - clock-names = "aclk", "pclk"; - clocks = <&cmu_imem CLK_ACLK_SLIMSSS>, - <&cmu_imem CLK_PCLK_SLIMSSS>; + clock-names = "pclk", "aclk"; + clocks = <&cmu_imem CLK_PCLK_SLIMSSS>, + <&cmu_imem CLK_ACLK_SLIMSSS>; }; pd_gscl: power-domain@105c4000 { diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile index 6438db3822f8..44890d56c194 100644 --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile @@ -33,6 +33,8 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-qds.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-ddr4-evk.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-ctouch2.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-edimm2.2.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-n801x-s.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb @@ -47,6 +49,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mq-kontron-pitx-imx8m.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r2.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r3.dtb @@ -57,6 +60,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mq-pico-pi.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-thor96.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-rmb3.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-zest.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8qm-mek.dtb dtb-$(CONFIG_ARCH_MXC) += imx8qxp-ai_ml.dtb dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-eval-v3.dtb dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index 7de6b376d792..9058cfa4980f 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -198,6 +198,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts index 0516076087ae..a92ecb331cdc 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts @@ -25,6 +25,8 @@ spi1 = &dspi2; mmc0 = &esdhc1; mmc1 = &esdhc; + rtc0 = &rtc; + rtc1 = &ftm_alarm0; }; buttons0 { @@ -115,8 +117,6 @@ status = "okay"; flash@0 { - #address-cells = <1>; - #size-cells = <1>; compatible = "jedec,spi-nor"; m25p,fast-read; spi-max-frequency = <133000000>; @@ -125,49 +125,37 @@ spi-rx-bus-width = <2>; /* 2 SPI Rx lines */ spi-tx-bus-width = <1>; /* 1 SPI Tx line */ - partition@0 { - reg = <0x000000 0x010000>; - label = "rcw"; - read-only; - }; - - partition@10000 { - reg = <0x010000 0x0f0000>; - label = "failsafe bootloader"; - read-only; - }; - - partition@100000 { - reg = <0x100000 0x040000>; - label = "failsafe DP firmware"; - read-only; - }; - - partition@140000 { - reg = <0x140000 0x0a0000>; - label = "failsafe trusted firmware"; - read-only; - }; - - partition@1e0000 { - reg = <0x1e0000 0x020000>; - label = "reserved"; - read-only; - }; - - partition@200000 { - reg = <0x200000 0x010000>; - label = "configuration store"; - }; - - partition@210000 { - reg = <0x210000 0x1d0000>; - label = "bootloader"; - }; - - partition@3e0000 { - reg = <0x3e0000 0x020000>; - label = "bootloader environment"; + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + reg = <0x000000 0x010000>; + label = "rcw"; + read-only; + }; + + partition@10000 { + reg = <0x010000 0x1d0000>; + label = "failsafe bootloader"; + read-only; + }; + + partition@200000 { + reg = <0x200000 0x010000>; + label = "configuration store"; + }; + + partition@210000 { + reg = <0x210000 0x1d0000>; + label = "bootloader"; + }; + + partition@3e0000 { + reg = <0x3e0000 0x020000>; + label = "bootloader environment"; + }; }; }; }; @@ -191,7 +179,7 @@ &i2c0 { status = "okay"; - rtc@32 { + rtc: rtc@32 { compatible = "microcrystal,rv8803"; reg = <0x32>; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts index fbcba9cb8503..bfd14b64567e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts @@ -25,6 +25,7 @@ serial1 = &duart1; mmc0 = &esdhc; mmc1 = &esdhc1; + rtc1 = &ftm_alarm0; }; chosen { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts index 41ae6e7675ba..9322c6ad8e4a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts @@ -21,6 +21,7 @@ serial1 = &duart1; mmc0 = &esdhc; mmc1 = &esdhc1; + rtc1 = &ftm_alarm0; }; chosen { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 262fbad8f0ec..eca06a0c3cf8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -18,10 +18,6 @@ #address-cells = <2>; #size-cells = <2>; - aliases { - rtc1 = &ftm_alarm0; - }; - cpus { #address-cells = <1>; #size-cells = <0>; @@ -1027,7 +1023,7 @@ status = "disabled"; fixed-link { - speed = <1000>; + speed = <2500>; full-duplex; }; }; @@ -1114,6 +1110,18 @@ full-duplex; }; }; + + rcec@1f,0 { + reg = <0x00f800 0 0 0 0>; + /* IEP INT_A */ + interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + /* Integrated Endpoint Register Block */ + ierb@1f0800000 { + compatible = "fsl,ls1028a-enetc-ierb"; + reg = <0x01 0xf0800000 0x0 0x10000>; }; rcpm: power-controller@1e34040 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index 5a8a1dc4262d..28c51e521cb2 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -348,6 +348,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = <0 75 0x4>; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index 1d6dfd189c7f..39458305e333 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -354,6 +354,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi index 459dccad8326..afb455210bd0 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi @@ -122,6 +122,30 @@ vcc-supply = <&sb_3v3>; }; }; + + sfp0_i2c: i2c@4 { + #address-cells = <1>; + #size-cells = <0>; + reg = <4>; + }; + + sfp1_i2c: i2c@5 { + #address-cells = <1>; + #size-cells = <0>; + reg = <5>; + }; + + sfp2_i2c: i2c@6 { + #address-cells = <1>; + #size-cells = <0>; + reg = <6>; + }; + + sfp3_i2c: i2c@7 { + #address-cells = <1>; + #size-cells = <0>; + reg = <7>; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi index 2b63235ca627..17f8e733972a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi @@ -30,6 +30,54 @@ gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; }; }; + + sfp0: sfp-0 { + compatible = "sff,sfp"; + i2c-bus = <&sfp0_i2c>; + mod-def0-gpio = <&gpio2 0 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <2000>; + }; + + sfp1: sfp-1 { + compatible = "sff,sfp"; + i2c-bus = <&sfp1_i2c>; + mod-def0-gpio = <&gpio2 9 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <2000>; + }; + + sfp2: sfp-2 { + compatible = "sff,sfp"; + i2c-bus = <&sfp2_i2c>; + mod-def0-gpio = <&gpio2 10 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <2000>; + }; + + sfp3: sfp-3 { + compatible = "sff,sfp"; + i2c-bus = <&sfp3_i2c>; + mod-def0-gpio = <&gpio2 11 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <2000>; + }; +}; + +&dpmac7 { + sfp = <&sfp0>; + managed = "in-band-status"; +}; + +&dpmac8 { + sfp = <&sfp1>; + managed = "in-band-status"; +}; + +&dpmac9 { + sfp = <&sfp2>; + managed = "in-band-status"; +}; + +&dpmac10 { + sfp = <&sfp3>; + managed = "in-band-status"; }; &emdio2 { @@ -44,6 +92,22 @@ status = "okay"; }; +&pcs_mdio7 { + status = "okay"; +}; + +&pcs_mdio8 { + status = "okay"; +}; + +&pcs_mdio9 { + status = "okay"; +}; + +&pcs_mdio10 { + status = "okay"; +}; + &sata0 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi new file mode 100644 index 000000000000..9386d1a59e82 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include "imx8-ss-audio.dtsi" +#include "imx8-ss-dma.dtsi" diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi new file mode 100644 index 000000000000..6c8d75ef9250 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include <dt-bindings/clock/imx8-lpcg.h> +#include <dt-bindings/firmware/imx/rsrc.h> + +audio_subsys: bus@59000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x59000000 0x0 0x59000000 0x1000000>; + + audio_ipg_clk: clock-audio-ipg { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <120000000>; + clock-output-names = "audio_ipg_clk"; + }; + + dsp_lpcg: clock-controller@59580000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x59580000 0x10000>; + #clock-cells = <1>; + clocks = <&audio_ipg_clk>, + <&audio_ipg_clk>, + <&audio_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_7>; + clock-output-names = "dsp_lpcg_adb_clk", + "dsp_lpcg_ipg_clk", + "dsp_lpcg_core_clk"; + power-domains = <&pd IMX_SC_R_DSP>; + }; + + dsp_ram_lpcg: clock-controller@59590000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x59590000 0x10000>; + #clock-cells = <1>; + clocks = <&audio_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_4>; + clock-output-names = "dsp_ram_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_DSP_RAM>; + }; + + dsp: dsp@596e8000 { + compatible = "fsl,imx8qxp-dsp"; + reg = <0x596e8000 0x88000>; + clocks = <&dsp_lpcg IMX_LPCG_CLK_5>, + <&dsp_ram_lpcg IMX_LPCG_CLK_4>, + <&dsp_lpcg IMX_LPCG_CLK_7>; + clock-names = "ipg", "ocram", "core"; + power-domains = <&pd IMX_SC_R_MU_13A>, + <&pd IMX_SC_R_MU_13B>, + <&pd IMX_SC_R_DSP>, + <&pd IMX_SC_R_DSP_RAM>; + mbox-names = "txdb0", "txdb1", + "rxdb0", "rxdb1"; + mboxes = <&lsio_mu13 2 0>, + <&lsio_mu13 2 1>, + <&lsio_mu13 3 0>, + <&lsio_mu13 3 1>; + memory-region = <&dsp_reserved>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi new file mode 100644 index 000000000000..e1e81ca0ca69 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include <dt-bindings/clock/imx8-lpcg.h> +#include <dt-bindings/firmware/imx/rsrc.h> + +conn_subsys: bus@5b000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5b000000 0x0 0x5b000000 0x1000000>; + + conn_axi_clk: clock-conn-axi { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <333333333>; + clock-output-names = "conn_axi_clk"; + }; + + conn_ahb_clk: clock-conn-ahb { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <166666666>; + clock-output-names = "conn_ahb_clk"; + }; + + conn_ipg_clk: clock-conn-ipg { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <83333333>; + clock-output-names = "conn_ipg_clk"; + }; + + usdhc1: mmc@5b010000 { + interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>; + reg = <0x5b010000 0x10000>; + clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>, + <&sdhc0_lpcg IMX_LPCG_CLK_5>, + <&sdhc0_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per", "ahb"; + power-domains = <&pd IMX_SC_R_SDHC_0>; + status = "disabled"; + }; + + usdhc2: mmc@5b020000 { + interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>; + reg = <0x5b020000 0x10000>; + clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>, + <&sdhc1_lpcg IMX_LPCG_CLK_5>, + <&sdhc1_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per", "ahb"; + power-domains = <&pd IMX_SC_R_SDHC_1>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; + status = "disabled"; + }; + + usdhc3: mmc@5b030000 { + interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; + reg = <0x5b030000 0x10000>; + clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>, + <&sdhc2_lpcg IMX_LPCG_CLK_5>, + <&sdhc2_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per", "ahb"; + power-domains = <&pd IMX_SC_R_SDHC_2>; + status = "disabled"; + }; + + fec1: ethernet@5b040000 { + reg = <0x5b040000 0x10000>; + interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&enet0_lpcg IMX_LPCG_CLK_4>, + <&enet0_lpcg IMX_LPCG_CLK_2>, + <&enet0_lpcg IMX_LPCG_CLK_1>, + <&enet0_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; + power-domains = <&pd IMX_SC_R_ENET_0>; + status = "disabled"; + }; + + fec2: ethernet@5b050000 { + reg = <0x5b050000 0x10000>; + interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&enet1_lpcg IMX_LPCG_CLK_4>, + <&enet1_lpcg IMX_LPCG_CLK_2>, + <&enet1_lpcg IMX_LPCG_CLK_1>, + <&enet1_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; + power-domains = <&pd IMX_SC_R_ENET_1>; + status = "disabled"; + }; + + /* LPCG clocks */ + sdhc0_lpcg: clock-controller@5b200000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5b200000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>, + <&conn_ipg_clk>, <&conn_axi_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>, + <IMX_LPCG_CLK_5>; + clock-output-names = "sdhc0_lpcg_per_clk", + "sdhc0_lpcg_ipg_clk", + "sdhc0_lpcg_ahb_clk"; + power-domains = <&pd IMX_SC_R_SDHC_0>; + }; + + sdhc1_lpcg: clock-controller@5b210000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5b210000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>, + <&conn_ipg_clk>, <&conn_axi_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>, + <IMX_LPCG_CLK_5>; + clock-output-names = "sdhc1_lpcg_per_clk", + "sdhc1_lpcg_ipg_clk", + "sdhc1_lpcg_ahb_clk"; + power-domains = <&pd IMX_SC_R_SDHC_1>; + }; + + sdhc2_lpcg: clock-controller@5b220000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5b220000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_SDHC_2 IMX_SC_PM_CLK_PER>, + <&conn_ipg_clk>, <&conn_axi_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>, + <IMX_LPCG_CLK_5>; + clock-output-names = "sdhc2_lpcg_per_clk", + "sdhc2_lpcg_ipg_clk", + "sdhc2_lpcg_ahb_clk"; + power-domains = <&pd IMX_SC_R_SDHC_2>; + }; + + enet0_lpcg: clock-controller@5b230000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5b230000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>, + <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>, + <IMX_LPCG_CLK_5>; + clock-output-names = "enet0_ipg_root_clk", + "enet0_tx_clk", + "enet0_ahb_clk", + "enet0_ipg_clk", + "enet0_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_ENET_0>; + }; + + enet1_lpcg: clock-controller@5b240000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5b240000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>, + <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>, + <IMX_LPCG_CLK_5>; + clock-output-names = "enet1_ipg_root_clk", + "enet1_tx_clk", + "enet1_ahb_clk", + "enet1_ipg_clk", + "enet1_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_ENET_1>; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi new file mode 100644 index 000000000000..8b5cad4e2700 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2019-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +ddr_subsys: bus@5c000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5c000000 0x0 0x5c000000 0x1000000>; + + ddr-pmu@5c020000 { + compatible = "fsl,imx8-ddr-pmu"; + reg = <0x5c020000 0x10000>; + interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi new file mode 100644 index 000000000000..960a802b8b6e --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include <dt-bindings/clock/imx8-lpcg.h> +#include <dt-bindings/firmware/imx/rsrc.h> + +dma_subsys: bus@5a000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5a000000 0x0 0x5a000000 0x1000000>; + + dma_ipg_clk: clock-dma-ipg { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <120000000>; + clock-output-names = "dma_ipg_clk"; + }; + + lpuart0: serial@5a060000 { + reg = <0x5a060000 0x1000>; + interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&uart0_lpcg IMX_LPCG_CLK_4>, + <&uart0_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "baud"; + power-domains = <&pd IMX_SC_R_UART_0>; + status = "disabled"; + }; + + lpuart1: serial@5a070000 { + reg = <0x5a070000 0x1000>; + interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&uart1_lpcg IMX_LPCG_CLK_4>, + <&uart1_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "baud"; + power-domains = <&pd IMX_SC_R_UART_1>; + status = "disabled"; + }; + + lpuart2: serial@5a080000 { + reg = <0x5a080000 0x1000>; + interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&uart2_lpcg IMX_LPCG_CLK_4>, + <&uart2_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "baud"; + power-domains = <&pd IMX_SC_R_UART_2>; + status = "disabled"; + }; + + lpuart3: serial@5a090000 { + reg = <0x5a090000 0x1000>; + interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&uart3_lpcg IMX_LPCG_CLK_4>, + <&uart3_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "baud"; + power-domains = <&pd IMX_SC_R_UART_3>; + status = "disabled"; + }; + + uart0_lpcg: clock-controller@5a460000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5a460000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "uart0_lpcg_baud_clk", + "uart0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_UART_0>; + }; + + uart1_lpcg: clock-controller@5a470000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5a470000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "uart1_lpcg_baud_clk", + "uart1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_UART_1>; + }; + + uart2_lpcg: clock-controller@5a480000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5a480000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "uart2_lpcg_baud_clk", + "uart2_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_UART_2>; + }; + + uart3_lpcg: clock-controller@5a490000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5a490000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "uart3_lpcg_baud_clk", + "uart3_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_UART_3>; + }; + + i2c0: i2c@5a800000 { + reg = <0x5a800000 0x4000>; + interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&i2c0_lpcg IMX_LPCG_CLK_0>; + clock-names = "per"; + assigned-clocks = <&clk IMX_SC_R_I2C_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_0>; + status = "disabled"; + }; + + i2c1: i2c@5a810000 { + reg = <0x5a810000 0x4000>; + interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&i2c1_lpcg IMX_LPCG_CLK_0>; + clock-names = "per"; + assigned-clocks = <&clk IMX_SC_R_I2C_1 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_1>; + status = "disabled"; + }; + + i2c2: i2c@5a820000 { + reg = <0x5a820000 0x4000>; + interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&i2c2_lpcg IMX_LPCG_CLK_0>; + clock-names = "per"; + assigned-clocks = <&clk IMX_SC_R_I2C_2 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_2>; + status = "disabled"; + }; + + i2c3: i2c@5a830000 { + reg = <0x5a830000 0x4000>; + interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&i2c3_lpcg IMX_LPCG_CLK_0>; + clock-names = "per"; + assigned-clocks = <&clk IMX_SC_R_I2C_3 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_3>; + status = "disabled"; + }; + + i2c0_lpcg: clock-controller@5ac00000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5ac00000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_I2C_0 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "i2c0_lpcg_clk", + "i2c0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_I2C_0>; + }; + + i2c1_lpcg: clock-controller@5ac10000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5ac10000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_I2C_1 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "i2c1_lpcg_clk", + "i2c1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_I2C_1>; + }; + + i2c2_lpcg: clock-controller@5ac20000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5ac20000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_I2C_2 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "i2c2_lpcg_clk", + "i2c2_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_I2C_2>; + }; + + i2c3_lpcg: clock-controller@5ac30000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5ac30000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_I2C_3 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "i2c3_lpcg_clk", + "i2c3_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_I2C_3>; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi new file mode 100644 index 000000000000..ee4e585a9c39 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include <dt-bindings/clock/imx8-lpcg.h> +#include <dt-bindings/firmware/imx/rsrc.h> + +lsio_subsys: bus@5d000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5d000000 0x0 0x5d000000 0x1000000>; + + lsio_mem_clk: clock-lsio-mem { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <200000000>; + clock-output-names = "lsio_mem_clk"; + }; + + lsio_bus_clk: clock-lsio-bus { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "lsio_bus_clk"; + }; + + lsio_gpio0: gpio@5d080000 { + reg = <0x5d080000 0x10000>; + interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_0>; + }; + + lsio_gpio1: gpio@5d090000 { + reg = <0x5d090000 0x10000>; + interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_1>; + }; + + lsio_gpio2: gpio@5d0a0000 { + reg = <0x5d0a0000 0x10000>; + interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_2>; + }; + + lsio_gpio3: gpio@5d0b0000 { + reg = <0x5d0b0000 0x10000>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_3>; + }; + + lsio_gpio4: gpio@5d0c0000 { + reg = <0x5d0c0000 0x10000>; + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_4>; + }; + + lsio_gpio5: gpio@5d0d0000 { + reg = <0x5d0d0000 0x10000>; + interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_5>; + }; + + lsio_gpio6: gpio@5d0e0000 { + reg = <0x5d0e0000 0x10000>; + interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_6>; + }; + + lsio_gpio7: gpio@5d0f0000 { + reg = <0x5d0f0000 0x10000>; + interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_7>; + }; + + lsio_mu0: mailbox@5d1b0000 { + reg = <0x5d1b0000 0x10000>; + interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + status = "disabled"; + }; + + lsio_mu1: mailbox@5d1c0000 { + reg = <0x5d1c0000 0x10000>; + interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + }; + + lsio_mu2: mailbox@5d1d0000 { + reg = <0x5d1d0000 0x10000>; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + status = "disabled"; + }; + + lsio_mu3: mailbox@5d1e0000 { + reg = <0x5d1e0000 0x10000>; + interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + status = "disabled"; + }; + + lsio_mu4: mailbox@5d1f0000 { + reg = <0x5d1f0000 0x10000>; + interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + status = "disabled"; + }; + + lsio_mu13: mailbox@5d280000 { + reg = <0x5d280000 0x10000>; + interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <2>; + power-domains = <&pd IMX_SC_R_MU_13A>; + }; + + /* LPCG clocks */ + pwm0_lpcg: clock-controller@5d400000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d400000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm0_lpcg_ipg_clk", + "pwm0_lpcg_ipg_hf_clk", + "pwm0_lpcg_ipg_s_clk", + "pwm0_lpcg_ipg_slv_clk", + "pwm0_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_0>; + }; + + pwm1_lpcg: clock-controller@5d410000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d410000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm1_lpcg_ipg_clk", + "pwm1_lpcg_ipg_hf_clk", + "pwm1_lpcg_ipg_s_clk", + "pwm1_lpcg_ipg_slv_clk", + "pwm1_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_1>; + }; + + pwm2_lpcg: clock-controller@5d420000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d420000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm2_lpcg_ipg_clk", + "pwm2_lpcg_ipg_hf_clk", + "pwm2_lpcg_ipg_s_clk", + "pwm2_lpcg_ipg_slv_clk", + "pwm2_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_2>; + }; + + pwm3_lpcg: clock-controller@5d430000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d430000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm3_lpcg_ipg_clk", + "pwm3_lpcg_ipg_hf_clk", + "pwm3_lpcg_ipg_s_clk", + "pwm3_lpcg_ipg_slv_clk", + "pwm3_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_3>; + }; + + pwm4_lpcg: clock-controller@5d440000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d440000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm4_lpcg_ipg_clk", + "pwm4_lpcg_ipg_hf_clk", + "pwm4_lpcg_ipg_s_clk", + "pwm4_lpcg_ipg_slv_clk", + "pwm4_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_4>; + }; + + pwm5_lpcg: clock-controller@5d450000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d450000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm5_lpcg_ipg_clk", + "pwm5_lpcg_ipg_hf_clk", + "pwm5_lpcg_ipg_s_clk", + "pwm5_lpcg_ipg_slv_clk", + "pwm5_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_5>; + }; + + pwm6_lpcg: clock-controller@5d460000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d460000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm6_lpcg_ipg_clk", + "pwm6_lpcg_ipg_hf_clk", + "pwm6_lpcg_ipg_s_clk", + "pwm6_lpcg_ipg_slv_clk", + "pwm6_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_6>; + }; + + pwm7_lpcg: clock-controller@5d470000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5d470000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>, + <&lsio_bus_clk>, + <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>, + <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>, + <IMX_LPCG_CLK_6>; + clock-output-names = "pwm7_lpcg_ipg_clk", + "pwm7_lpcg_ipg_hf_clk", + "pwm7_lpcg_ipg_s_clk", + "pwm7_lpcg_ipg_slv_clk", + "pwm7_lpcg_ipg_mstr_clk"; + power-domains = <&pd IMX_SC_R_PWM_7>; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts new file mode 100644 index 000000000000..5389d6f2beba --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 NXP + * Copyright (c) 2019 Engicam srl + * Copyright (c) 2020 Amarula Solutions(India) + */ + +/dts-v1/; +#include "imx8mm.dtsi" +#include "imx8mm-icore-mx8mm.dtsi" + +/ { + model = "Engicam i.Core MX8M Mini C.TOUCH 2.0"; + compatible = "engicam,icore-mx8mm-ctouch2", "engicam,icore-mx8mm", + "fsl,imx8mm"; + + chosen { + stdout-path = &uart2; + }; +}; + +&fec1 { + status = "okay"; +}; + +&i2c2 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; +}; + +&i2c4 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c4>; + status = "okay"; +}; + +&iomuxc { + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3 + MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3 + >; + }; + + pinctrl_i2c4: i2c4grp { + fsl,pins = < + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140 + MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140 + >; + }; + + pinctrl_usdhc1_gpio: usdhc1gpiogrp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x41 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190 + MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0 + MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0 + MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0 + MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0 + MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0 + >; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + status = "okay"; +}; + +/* SD */ +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>; + cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>; + max-frequency = <50000000>; + bus-width = <4>; + no-1-8-v; + pm-ignore-notify; + keep-power-in-suspend; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts new file mode 100644 index 000000000000..a4a2ada14835 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 NXP + * Copyright (c) 2019 Engicam srl + * Copyright (c) 2020 Amarula Solutions(India) + */ + +/dts-v1/; +#include "imx8mm.dtsi" +#include "imx8mm-icore-mx8mm.dtsi" + +/ { + model = "Engicam i.Core MX8M Mini EDIMM2.2 Starter Kit"; + compatible = "engicam,icore-mx8mm-edimm2.2", "engicam,icore-mx8mm", + "fsl,imx8mm"; + + chosen { + stdout-path = &uart2; + }; +}; + +&fec1 { + status = "okay"; +}; + +&i2c2 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; +}; + +&i2c4 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c4>; + status = "okay"; +}; + +&iomuxc { + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3 + MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3 + >; + }; + + pinctrl_i2c4: i2c4grp { + fsl,pins = < + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140 + MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140 + >; + }; + + pinctrl_usdhc1_gpio: usdhc1gpiogrp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x41 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190 + MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0 + MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0 + MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0 + MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0 + MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0 + >; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + status = "okay"; +}; + +/* SD */ +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>; + cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>; + max-frequency = <50000000>; + bus-width = <4>; + no-1-8-v; + pm-ignore-notify; + keep-power-in-suspend; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi new file mode 100644 index 000000000000..b40148d728ea --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2018 NXP + * Copyright (c) 2019 Engicam srl + * Copyright (c) 2020 Amarula Solutons(India) + */ + +/ { + compatible = "engicam,icore-mx8mm", "fsl,imx8mm"; +}; + +&A53_0 { + cpu-supply = <®_buck4>; +}; + +&A53_1 { + cpu-supply = <®_buck4>; +}; + +&A53_2 { + cpu-supply = <®_buck4>; +}; + +&A53_3 { + cpu-supply = <®_buck4>; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy: ethernet-phy@3 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <3>; + reset-gpios = <&gpio3 7 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + }; + }; +}; + +&i2c1 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + status = "okay"; + + pmic@8 { + compatible = "nxp,pf8121a"; + reg = <0x08>; + + regulators { + reg_ldo1: ldo1 { + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_ldo2: ldo2 { + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_ldo3: ldo3 { + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_ldo4: ldo4 { + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck1: buck1 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck2: buck2 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck3: buck3 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck4: buck4 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck5: buck5 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck6: buck6 { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_buck7: buck7 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_vsnvs: vsnvs { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + }; + }; +}; + +&iomuxc { + pinctrl_fec1: fec1grp { + fsl,pins = < + MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3 + MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3 + MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f + MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f + MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f + MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f + MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91 + MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91 + MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91 + MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91 + MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f + MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 + MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 + MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f + MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x19 + >; + }; + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3 + MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3 + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190 + MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0 + MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0 + MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0 + MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0 + MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0 + MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0 + MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0 + MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0 + MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0 + MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0 + MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190 + >; + }; + + pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194 + MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4 + MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4 + MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4 + MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4 + MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4 + MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4 + MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4 + MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4 + MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4 + MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194 + >; + }; + + pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196 + MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6 + MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6 + MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6 + MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6 + MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6 + MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6 + MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6 + MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6 + MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6 + MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196 + >; + }; +}; + +/* eMMC */ +&usdhc3 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc3>; + pinctrl-1 = <&pinctrl_usdhc3_100mhz>; + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + bus-width = <8>; + non-removable; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts index c0c384d76147..74c09891600f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts @@ -9,6 +9,53 @@ / { model = "Boundary Devices i.MX8MMini Nitrogen8MM Rev2"; compatible = "boundary,imx8mm-nitrogen8mm", "fsl,imx8mm"; + + reg_vref_1v8: regulator-vref-1v8 { + compatible = "regulator-fixed"; + regulator-name = "vref-1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + reg_vref_3v3: regulator-vref-3v3 { + compatible = "regulator-fixed"; + regulator-name = "vref-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + reg_wlan_vmmc: regulator-wlan-vmmc { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_wlan_vmmc>; + regulator-name = "reg_wlan_vmmc"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + sound-wm8960 { + audio-cpu = <&sai1>; + audio-codec = <&wm8960>; + audio-routing = + "Headphone Jack", "HP_L", + "Headphone Jack", "HP_R", + "Ext Spk", "SPK_LP", + "Ext Spk", "SPK_LN", + "Ext Spk", "SPK_RP", + "Ext Spk", "SPK_RN", + "RINPUT1", "Mic Jack", + "Mic Jack", "MICB"; + compatible = "fsl,imx-audio-wm8960"; + /* JD2: hp detect high for headphone*/ + hp-det-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>; + /* Jack is not stuffed */ + mic-det-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>; + model = "wm8960-audio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sound_wm8960>; + }; }; &A53_0 { @@ -27,6 +74,17 @@ cpu-supply = <®_buck3>; }; +/* J15 */ +&ecspi2 { + assigned-clocks = <&clk IMX8MM_CLK_ECSPI2>; + assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_40M>; + assigned-clock-rates = <40000000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ecspi2>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + &fec1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec1>; @@ -47,6 +105,12 @@ }; }; +&flexspi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexspi>; + status = "okay"; +}; + &i2c1 { clock-frequency = <100000>; pinctrl-names = "default"; @@ -156,7 +220,7 @@ #address-cells = <1>; #size-cells = <0>; - i2c3 { + i2c3@0 { reg = <0>; #address-cells = <1>; #size-cells = <0>; @@ -173,12 +237,88 @@ }; }; +&i2c4 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c4>; + status = "okay"; + + wm8960: codec@1a { + compatible = "wlf,wm8960"; + reg = <0x1a>; + clocks = <&clk IMX8MM_CLK_SAI1_ROOT>; + clock-names = "mclk1"; + wlf,shared-lrclk; + #sound-dai-cells = <0>; + }; +}; + +&pwm1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm1>; + status = "okay"; +}; + +&pwm2 { + assigned-clocks = <&clk IMX8MM_CLK_PWM2>; + assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_40M>; + assigned-clock-rates = <40000000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm2>; + status = "okay"; +}; + +&pwm3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm3>; + status = "okay"; +}; + +&pwm4 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm4>; + status = "okay"; +}; + +&sai1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai1>; + status = "okay"; +}; + +&sai2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai2>; + status = "okay"; +}; + +/* BT */ +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + uart-has-rtscts; + status = "okay"; +}; + /* console */ &uart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart2>; - assigned-clocks = <&clk IMX8MM_CLK_UART2>; - assigned-clock-parents = <&clk IMX8MM_CLK_24M>; + status = "okay"; +}; + +/* J15 */ +&uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3>; + uart-has-rtscts; + status = "okay"; +}; + +/* J9 */ +&uart4 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart4>; status = "okay"; }; @@ -191,6 +331,8 @@ pinctrl-0 = <&pinctrl_usdhc1>; pinctrl-1 = <&pinctrl_usdhc1_100mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + vmmc-supply = <®_vref_3v3>; + vqmmc-supply = <®_vref_1v8>; status = "okay"; }; @@ -206,6 +348,48 @@ status = "okay"; }; +/* wlan */ +&usdhc3 { + bus-width = <4>; + sdhci-caps-mask = <0x2 0x0>; + non-removable; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc3>; + pinctrl-1 = <&pinctrl_usdhc3_100mhz>; + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + vmmc-supply = <®_wlan_vmmc>; + vqmmc-supply = <®_vref_1v8>; + status = "okay"; +}; + +/* USB OTG port */ +&usbotg1 { + dr_mode = "otg"; + over-current-active-low; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; + power-active-high; + status = "okay"; +}; + +/* USB Host port */ +&usbotg2 { + dr_mode = "host"; + over-current-active-low; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg2>; + power-active-high; + /* + * FIXME: having USB2 enabled hangs the boot just after: + *[ 1.655941] ci_hdrc ci_hdrc.1: EHCI Host Controller + *[ 1.660880] ci_hdrc ci_hdrc.1: new USB bus registered, assigned bus number 2 + *[ 1.681505] ci_hdrc ci_hdrc.1: USB 2.0 started, EHCI 1.00 + *[ 1.687730] hub 2-0:1.0: USB hub found + *[ 1.691528] hub 2-0:1.0: 1 port detected + */ + status = "disabled"; +}; + &wdog1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wdog>; @@ -217,6 +401,15 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hog>; + pinctrl_ecspi2: ecspi2grp { + fsl,pins = < + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x140 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x19 + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x19 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x19 + >; + }; + pinctrl_fec1: fec1grp { fsl,pins = < MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3 @@ -237,6 +430,17 @@ >; }; + pinctrl_flexspi: flexspigrp { + fsl,pins = < + MX8MM_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x1c2 + MX8MM_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82 + MX8MM_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82 + MX8MM_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82 + MX8MM_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82 + MX8MM_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82 + >; + }; + pinctrl_hog: hoggrp { fsl,pins = < MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x09 @@ -258,12 +462,86 @@ >; }; + pinctrl_i2c4: i2c4grp { + fsl,pins = < + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + >; + }; + pinctrl_i2c3a_rv4162: i2c3a-rv4162grp { fsl,pins = < MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1c0 >; }; + pinctrl_pwm1: pwm1grp { + fsl,pins = < + MX8MM_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x16 + >; + }; + + pinctrl_pwm2: pwm2grp { + fsl,pins = < + MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x16 + >; + }; + + pinctrl_pwm3: pwm3grp { + fsl,pins = < + MX8MM_IOMUXC_SPDIF_TX_PWM3_OUT 0x16 + >; + }; + + pinctrl_pwm4: pwm4grp { + fsl,pins = < + MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x16 + >; + }; + + pinctrl_reg_wlan_vmmc: reg-wlan-vmmcgrp { + fsl,pins = < + MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x16 + >; + }; + + pinctrl_sai1: sai1grp { + fsl,pins = < + /* wm8960 */ + MX8MM_IOMUXC_SAI1_MCLK_SAI1_MCLK 0xd6 + MX8MM_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0xd6 + MX8MM_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0xd6 + MX8MM_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0xd6 + MX8MM_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0xd6 + >; + }; + + pinctrl_sai2: sai2grp { + fsl,pins = < + /* Bluetooth PCM */ + MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6 + MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6 + MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6 + MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0xd6 + >; + }; + + pinctrl_sound_wm8960: sound-wm8960grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x80 + MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x80 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140 + MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140 + MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x140 + MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x140 + >; + }; + pinctrl_uart2: uart2grp { fsl,pins = < MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140 @@ -271,6 +549,36 @@ >; }; + pinctrl_uart3: uart3grp { + fsl,pins = < + MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x140 + MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x140 + MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x140 + MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x140 + >; + }; + + pinctrl_uart4: uart4grp { + fsl,pins = < + MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140 + MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140 + >; + }; + + pinctrl_usbotg1: usbotg1grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x16 + MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x156 + >; + }; + + pinctrl_usbotg2: usbotg2grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x16 + MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x15 + >; + }; + pinctrl_usdhc1: usdhc1grp { fsl,pins = < MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190 diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index 5ccc4cc91959..a003e6af3353 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -124,7 +124,7 @@ #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index 6bf1d15ba16a..a27e02bee6b4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -887,7 +887,7 @@ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MM_CLK_QSPI_ROOT>, <&clk IMX8MM_CLK_QSPI_ROOT>; - clock-names = "fspi", "fspi_en"; + clock-names = "fspi_en", "fspi"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi index de2cd0e3201c..c35eeaff958f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi @@ -262,8 +262,12 @@ &usdhc1 { #address-cells = <1>; #size-cells = <0>; - pinctrl-names = "default"; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + vmmc-supply = <&buck4_reg>; + vqmmc-supply = <&buck5_reg>; bus-width = <4>; non-removable; cap-power-off-card; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index 16ea50089567..4dac4da38f4c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -898,7 +898,7 @@ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_QSPI_ROOT>, <&clk IMX8MN_CLK_QSPI_ROOT>; - clock-names = "fspi", "fspi_en"; + clock-names = "fspi_en", "fspi"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts index 7db4273cc88b..2c28e589677e 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts @@ -98,6 +98,8 @@ reg = <1>; eee-broken-1000t; reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + reset-deassert-us = <80000>; }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts index 0e1a6d953389..984a6b9ded8d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts @@ -16,7 +16,7 @@ "phytec,imx8mp-phycore-som", "fsl,imx8mp"; chosen { - stdout-path = &uart2; + stdout-path = &uart1; }; reg_usdhc2_vmmc: regulator-usdhc2 { @@ -33,9 +33,33 @@ }; }; +&eqos { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_eqos>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + status = "okay"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0x1>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>; + ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>; + enet-phy-lane-no-swap; + }; + }; +}; + &i2c2 { clock-frequency = <400000>; - pinctrl-names = "default"; + pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; pinctrl-1 = <&pinctrl_i2c2_gpio>; sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; @@ -71,9 +95,9 @@ }; /* debug console */ -&uart2 { +&uart1 { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_uart2>; + pinctrl-0 = <&pinctrl_uart1>; status = "okay"; }; @@ -90,6 +114,26 @@ }; &iomuxc { + pinctrl_eqos: eqosgrp { + fsl,pins = < + MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3 + MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3 + MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91 + MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91 + MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91 + MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91 + MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91 + MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91 + MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f + MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f + MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f + MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f + MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f + MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f + MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x10 + >; + }; + pinctrl_i2c2: i2c2grp { fsl,pins = < MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3 @@ -110,10 +154,10 @@ >; }; - pinctrl_uart2: uart2grp { + pinctrl_uart1: uart1grp { fsl,pins = < - MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49 - MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49 + MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x49 + MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x49 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi index 44a8c2337cee..f3965ec5b31d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi @@ -67,7 +67,7 @@ &i2c1 { clock-frequency = <400000>; - pinctrl-names = "default"; + pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c1>; pinctrl-1 = <&pinctrl_i2c1_gpio>; sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index c7523fd4eae9..c2d51a46cb3c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -18,6 +18,7 @@ aliases { ethernet0 = &fec; + ethernet1 = &eqos; gpio0 = &gpio1; gpio1 = &gpio2; gpio2 = &gpio3; @@ -312,6 +313,22 @@ status = "disabled"; }; + wdog2: watchdog@30290000 { + compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt"; + reg = <0x30290000 0x10000>; + interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MP_CLK_WDOG2_ROOT>; + status = "disabled"; + }; + + wdog3: watchdog@302a0000 { + compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt"; + reg = <0x302a0000 0x10000>; + interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MP_CLK_WDOG3_ROOT>; + status = "disabled"; + }; + iomuxc: pinctrl@30330000 { compatible = "fsl,imx8mp-iomuxc"; reg = <0x30330000 0x10000>; @@ -786,6 +803,28 @@ nvmem_macaddr_swap; status = "disabled"; }; + + eqos: ethernet@30bf0000 { + compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a"; + reg = <0x30bf0000 0x10000>; + interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "eth_wake_irq", "macirq"; + clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>, + <&clk IMX8MP_CLK_QOS_ENET_ROOT>, + <&clk IMX8MP_CLK_ENET_QOS_TIMER>, + <&clk IMX8MP_CLK_ENET_QOS>; + clock-names = "stmmaceth", "pclk", "ptp_ref", "tx"; + assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>, + <&clk IMX8MP_CLK_ENET_QOS_TIMER>, + <&clk IMX8MP_CLK_ENET_QOS>; + assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>, + <&clk IMX8MP_SYS_PLL2_100M>, + <&clk IMX8MP_SYS_PLL2_125M>; + assigned-clock-rates = <0>, <100000000>, <125000000>; + intf_mode = <&gpr 0x4>; + status = "disabled"; + }; }; gic: interrupt-controller@38800000 { @@ -828,7 +867,7 @@ ranges; status = "disabled"; - usb_dwc3_0: dwc3@38100000 { + usb_dwc3_0: usb@38100000 { compatible = "snps,dwc3"; reg = <0x38100000 0x10000>; clocks = <&clk IMX8MP_CLK_HSIO_AXI>, @@ -869,7 +908,7 @@ ranges; status = "disabled"; - usb_dwc3_1: dwc3@38200000 { + usb_dwc3_1: usb@38200000 { compatible = "snps,dwc3"; reg = <0x38200000 0x10000>; clocks = <&clk IMX8MP_CLK_HSIO_AXI>, diff --git a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts new file mode 100644 index 000000000000..f593e4ff62e1 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts @@ -0,0 +1,613 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Device Tree File for the Kontron pitx-imx8m board. + * + * Copyright (C) 2021 Heiko Thiery <heiko.thiery@gmail.com> + */ + +/dts-v1/; + +#include "imx8mq.dtsi" +#include <dt-bindings/net/ti-dp83867.h> + +/ { + model = "Kontron pITX-imx8m"; + compatible = "kontron,pitx-imx8m", "fsl,imx8mq"; + + aliases { + i2c0 = &i2c1; + i2c1 = &i2c2; + i2c2 = &i2c3; + mmc0 = &usdhc1; + mmc1 = &usdhc2; + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; + spi0 = &qspi0; + spi1 = &ecspi2; + }; + + chosen { + stdout-path = "serial2:115200n8"; + }; + + pcie0_refclk: pcie0-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + pcie1_refclk: pcie1-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + reg_usdhc2_vmmc: regulator-usdhc2-vmmc { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2>; + regulator-name = "V_3V3_SD"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; + off-on-delay-us = <20000>; + enable-active-high; + }; +}; + +&ecspi2 { + #address-cells = <1>; + #size-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ecspi2 &pinctrl_ecspi2_cs>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; + status = "okay"; + + tpm@0 { + compatible = "infineon,slb9670"; + reg = <0>; + spi-max-frequency = <43000000>; + }; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>; + ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_75_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; + reset-assert-us = <10>; + reset-deassert-us = <280>; + }; + }; +}; + +&i2c1 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + status = "okay"; + + pmic@8 { + compatible = "fsl,pfuze100"; + fsl,pfuze-support-disable-sw; + reg = <0x8>; + + regulators { + sw1a_reg: sw1ab { + regulator-name = "V_0V9_GPU"; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + }; + + sw1c_reg: sw1c { + regulator-name = "V_0V9_VPU"; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + }; + + sw2_reg: sw2 { + regulator-name = "V_1V1_NVCC_DRAM"; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-always-on; + }; + + sw3a_reg: sw3ab { + regulator-name = "V_1V0_DRAM"; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + regulator-always-on; + }; + + sw4_reg: sw4 { + regulator-name = "V_1V8_S0"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + swbst_reg: swbst { + regulator-name = "NC"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5150000>; + }; + + snvs_reg: vsnvs { + regulator-name = "V_0V9_SNVS"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + }; + + vref_reg: vrefddr { + regulator-name = "V_0V55_VREF_DDR"; + regulator-always-on; + }; + + vgen1_reg: vgen1 { + regulator-name = "V_1V5_CSI"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen2_reg: vgen2 { + regulator-name = "V_0V9_PHY"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <975000>; + regulator-always-on; + }; + + vgen3_reg: vgen3 { + regulator-name = "V_1V8_PHY"; + regulator-min-microvolt = <1675000>; + regulator-max-microvolt = <1975000>; + regulator-always-on; + }; + + vgen4_reg: vgen4 { + regulator-name = "V_1V8_VDDA"; + regulator-min-microvolt = <1625000>; + regulator-max-microvolt = <1875000>; + regulator-always-on; + }; + + vgen5_reg: vgen5 { + regulator-name = "V_3V3_PHY"; + regulator-min-microvolt = <3075000>; + regulator-max-microvolt = <3625000>; + regulator-always-on; + }; + + vgen6_reg: vgen6 { + regulator-name = "V_2V8_CAM"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + }; + + fan-controller@1b { + compatible = "maxim,max6650"; + reg = <0x1b>; + maxim,fan-microvolt = <5000000>; + }; + + rtc@32 { + compatible = "microcrystal,rv8803"; + reg = <0x32>; + }; + + sensor@4b { + compatible = "national,lm75b"; + reg = <0x4b>; + }; + + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + pagesize = <32>; + }; +}; + +&i2c2 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; +}; + +&i2c3 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "okay"; +}; + +/* M.2 B-key slot */ +&pcie0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie0>; + reset-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>; + clocks = <&clk IMX8MQ_CLK_PCIE1_ROOT>, + <&clk IMX8MQ_CLK_PCIE1_AUX>, + <&clk IMX8MQ_CLK_PCIE1_PHY>, + <&pcie0_refclk>; + clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus"; + status = "okay"; +}; + +/* Intel Ethernet Controller I210/I211 */ +&pcie1 { + clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>, + <&clk IMX8MQ_CLK_PCIE2_AUX>, + <&clk IMX8MQ_CLK_PCIE2_PHY>, + <&pcie1_refclk>; + clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus"; + fsl,max-link-speed = <1>; + status = "okay"; +}; + +&pgc_gpu { + power-supply = <&sw1a_reg>; +}; + +&pgc_vpu { + power-supply = <&sw1c_reg>; +}; + +&qspi0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_qspi>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + m25p,fast-read; + spi-max-frequency = <50000000>; + }; +}; + +&snvs_pwrkey { + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + assigned-clocks = <&clk IMX8MQ_CLK_UART1>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>; + status = "okay"; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + assigned-clocks = <&clk IMX8MQ_CLK_UART2>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>; + status = "okay"; +}; + +&uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3>; + fsl,uart-has-rtscts; + assigned-clocks = <&clk IMX8MQ_CLK_UART3>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>; + status = "okay"; +}; + +&usb3_phy0 { + status = "okay"; +}; + +&usb3_phy1 { + status = "okay"; +}; + +&usb_dwc3_0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb0>; + dr_mode = "otg"; + hnp-disable; + srp-disable; + adp-disable; + maximum-speed = "high-speed"; + status = "okay"; +}; + +&usb_dwc3_1 { + dr_mode = "host"; + status = "okay"; +}; + +&usdhc1 { + assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; + assigned-clock-rates = <400000000>; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + vqmmc-supply = <&sw4_reg>; + bus-width = <8>; + non-removable; + no-sd; + no-sdio; + status = "okay"; +}; + +&usdhc2 { + assigned-clocks = <&clk IMX8MQ_CLK_USDHC2>; + assigned-clock-rates = <200000000>; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + bus-width = <4>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>; + vmmc-supply = <®_usdhc2_vmmc>; + status = "okay"; +}; + +&wdog1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; + +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_hog: hoggrp { + fsl,pins = < + MX8MQ_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19 /* TPM Reset */ + MX8MQ_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x19 /* USB2 Hub Reset */ + >; + }; + + pinctrl_gpio: gpiogrp { + fsl,pins = < + MX8MQ_IOMUXC_NAND_CLE_GPIO3_IO5 0x19 /* GPIO0 */ + MX8MQ_IOMUXC_NAND_RE_B_GPIO3_IO15 0x19 /* GPIO1 */ + MX8MQ_IOMUXC_NAND_WE_B_GPIO3_IO17 0x19 /* GPIO2 */ + MX8MQ_IOMUXC_NAND_WP_B_GPIO3_IO18 0x19 /* GPIO3 */ + MX8MQ_IOMUXC_NAND_READY_B_GPIO3_IO16 0x19 /* GPIO4 */ + MX8MQ_IOMUXC_NAND_DATA04_GPIO3_IO10 0x19 /* GPIO5 */ + MX8MQ_IOMUXC_NAND_DATA05_GPIO3_IO11 0x19 /* GPIO6 */ + MX8MQ_IOMUXC_NAND_DATA06_GPIO3_IO12 0x19 /* GPIO7 */ + >; + }; + + pinctrl_pcie0: pcie0grp { + fsl,pins = < + MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x16 /* PCIE_PERST */ + MX8MQ_IOMUXC_UART4_TXD_GPIO5_IO29 0x16 /* W_DISABLE */ + >; + }; + + pinctrl_reg_usdhc2: regusdhc2gpiogrp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41 + >; + }; + + pinctrl_fec1: fec1grp { + fsl,pins = < + MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3 + MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x23 + MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f + MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f + MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f + MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f + MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91 + MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91 + MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91 + MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91 + MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f + MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 + MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 + MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f + MX8MQ_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x16 + MX8MQ_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x16 + >; + }; + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f + MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x4000007f + >; + }; + + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x4000007f + MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x4000007f + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x4000007f + MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x4000007f + >; + }; + + pinctrl_qspi: qspigrp { + fsl,pins = < + MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x82 + MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82 + MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82 + MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82 + MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82 + MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82 + >; + }; + + pinctrl_ecspi2: ecspi2grp { + fsl,pins = < + MX8MQ_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x19 + MX8MQ_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x19 + MX8MQ_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x19 + >; + }; + + pinctrl_ecspi2_cs: ecspi2csgrp { + fsl,pins = < + MX8MQ_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x19 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49 + MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX8MQ_IOMUXC_UART2_TXD_UART2_DCE_TX 0x49 + MX8MQ_IOMUXC_UART2_RXD_UART2_DCE_RX 0x49 + >; + }; + + pinctrl_uart3: uart3grp { + fsl,pins = < + MX8MQ_IOMUXC_UART3_TXD_UART3_DCE_TX 0x49 + MX8MQ_IOMUXC_UART3_RXD_UART3_DCE_RX 0x49 + MX8MQ_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x49 + MX8MQ_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x49 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x83 + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc3 + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc3 + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc3 + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc3 + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc3 + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc3 + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc3 + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc3 + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc3 + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x83 + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc1_100mhz: usdhc1-100grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc1_200mhz: usdhc1-200grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc2_gpio: usdhc2gpiogrp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CD_B_GPIO2_IO12 0x41 + MX8MQ_IOMUXC_SD2_WP_GPIO2_IO20 0x19 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x83 + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc3 + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc3 + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc3 + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc3 + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc3 + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x8d + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xcd + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xcd + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xcd + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xcd + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xcd + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x9f + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xdf + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xdf + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xdf + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xdf + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xdf + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_usb0: usb0grp { + fsl,pins = < + MX8MQ_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x19 + MX8MQ_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x19 + >; + }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts index dd217a0760e9..622f3787a186 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts @@ -50,13 +50,6 @@ linux,code = <KEY_VOLUMEDOWN>; }; - hp-det { - label = "HP_DET"; - gpios = <&gpio3 20 GPIO_ACTIVE_LOW>; - wakeup-source; - linux,code = <KEY_HP>; - }; - wwan-wake { label = "WWAN_WAKE"; gpios = <&gpio3 8 GPIO_ACTIVE_LOW>; @@ -163,21 +156,35 @@ #sound-dai-cells = <0>; }; + mic_mux: mic-mux { + compatible = "simple-audio-mux"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_micsel>; + mux-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>; + sound-name-prefix = "Mic Mux"; + }; + sound { compatible = "simple-audio-card"; - simple-audio-card,name = "sgtl5000"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hpdet>; + simple-audio-card,aux-devs = <&speaker_amp>, <&mic_mux>; + simple-audio-card,name = "Librem 5 Devkit"; simple-audio-card,format = "i2s"; simple-audio-card,widgets = - "Microphone", "Microphone Jack", - "Headphone", "Headphone Jack", - "Speaker", "Speaker Ext", - "Line", "Line In Jack"; + "Microphone", "Builtin Microphone", + "Microphone", "Headset Microphone", + "Headphone", "Headphones", + "Speaker", "Builtin Speaker"; simple-audio-card,routing = - "MIC_IN", "Microphone Jack", - "Microphone Jack", "Mic Bias", - "LINE_IN", "Line In Jack", - "Headphone Jack", "HP_OUT", - "Speaker Ext", "LINE_OUT"; + "MIC_IN", "Mic Mux OUT", + "Mic Mux IN1", "Headset Microphone", + "Mic Mux IN2", "Builtin Microphone", + "Mic Mux OUT", "Mic Bias", + "Headphones", "HP_OUT", + "Builtin Speaker", "Speaker Amp OUTR", + "Speaker Amp INR", "LINE_OUT"; + simple-audio-card,hp-det-gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>; simple-audio-card,cpu { sound-dai = <&sai2>; @@ -207,6 +214,15 @@ }; }; + speaker_amp: speaker-amp { + compatible = "simple-audio-amplifier"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spkamp>; + VCC-supply = <®_3v3_p>; + sound-name-prefix = "Speaker Amp"; + enable-gpios = <&gpio5 3 GPIO_ACTIVE_HIGH>; + }; + vibrator { compatible = "gpio-vibrator"; pinctrl-names = "default"; @@ -315,7 +331,6 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; - regulator-enable-ramp-delay = <200>; rohm,dvs-run-voltage = <900000>; }; @@ -610,7 +625,6 @@ fsl,pins = < MX8MQ_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x16 MX8MQ_IOMUXC_SAI2_RXC_GPIO4_IO22 0x16 - MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0x180 /* HP_DET */ MX8MQ_IOMUXC_NAND_DATA02_GPIO3_IO8 0x80 /* nWoWWAN */ >; }; @@ -621,6 +635,12 @@ >; }; + pinctrl_hpdet: hpdetgrp { + fsl,pins = < + MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0xC0 /* HP_DET */ + >; + }; + pinctrl_i2c1: i2c1grp { fsl,pins = < MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000001f @@ -641,6 +661,18 @@ >; }; + pinctrl_micsel: micselgrp { + fsl,pins = < + MX8MQ_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0xc6 /* MIC_SEL */ + >; + }; + + pinctrl_spkamp: spkamp { + fsl,pins = < + MX8MQ_IOMUXC_SPDIF_TX_GPIO5_IO3 0x81 /* MUTE */ + >; + }; + pinctrl_pmic: pmicgrp { fsl,pins = < MX8MQ_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x80 /* PMIC intr */ diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts index d77fc5df3f06..73bd431cbd6a 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts @@ -25,5 +25,5 @@ }; &proximity { - proximity-near-level = <220>; + proximity-near-level = <120>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts index 0d38327043f8..cd3c3edd48fa 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts @@ -28,6 +28,10 @@ ti,termination-current = <144000>; /* uA */ }; +&buck3_reg { + regulator-always-on; +}; + &proximity { proximity-near-level = <25>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi index 06a4799b6aeb..460ef0d86540 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi @@ -258,6 +258,25 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "protected0"; + reg = <0x0 0x30000>; + read-only; + }; + + partition@30000 { + label = "protected1"; + reg = <0x30000 0x10000>; + read-only; + }; + + partition@40000 { + label = "rw"; + reg = <0x40000 0x1C0000>; + }; }; }; @@ -267,8 +286,9 @@ pmic-5v-hog { gpio-hog; - gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>; + gpios = <1 GPIO_ACTIVE_HIGH>; input; + lane-mapping = "pmic-5v"; }; }; @@ -1051,8 +1071,6 @@ assigned-clocks = <&clk IMX8MQ_CLK_SAI2>; assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>; assigned-clock-rates = <24576000>; - assigned-clocks = <&clk IMX8MQ_AUDIO_PLL1>, <&clk IMX8MQ_AUDIO_PLL2>; - assigned-clock-rates = <786432000>, <722534400>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h index b94b02080a34..68e8fa172974 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h @@ -130,7 +130,7 @@ #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts new file mode 100644 index 000000000000..ce9d3f0b98fc --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +/dts-v1/; + +#include "imx8qm.dtsi" + +/ { + model = "Freescale i.MX8QM MEK"; + compatible = "fsl,imx8qm-mek", "fsl,imx8qm"; + + chosen { + stdout-path = &lpuart0; + }; + + cpus { + /delete-node/ cpu-map; + /delete-node/ cpu@100; + /delete-node/ cpu@101; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x00000000 0x80000000 0 0x40000000>; + }; + + reg_usdhc2_vmmc: usdhc2-vmmc { + compatible = "regulator-fixed"; + regulator-name = "SD1_SPWR"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&lpuart0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart0>; + status = "okay"; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + + ethphy1: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + }; + }; +}; + +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1>; + bus-width = <8>; + no-sd; + no-sdio; + non-removable; + status = "okay"; +}; + +&usdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2>; + bus-width = <4>; + vmmc-supply = <®_usdhc2_vmmc>; + cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>; + wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&iomuxc { + pinctrl_fec1: fec1grp { + fsl,pins = < + IMX8QM_ENET0_MDC_CONN_ENET0_MDC 0x06000020 + IMX8QM_ENET0_MDIO_CONN_ENET0_MDIO 0x06000020 + IMX8QM_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL 0x06000020 + IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC 0x06000020 + IMX8QM_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 0x06000020 + IMX8QM_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 0x06000020 + IMX8QM_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 0x06000020 + IMX8QM_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 0x06000020 + IMX8QM_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC 0x06000020 + IMX8QM_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL 0x06000020 + IMX8QM_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 0x06000020 + IMX8QM_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 0x06000020 + IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 0x06000020 + IMX8QM_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 0x06000020 + >; + }; + + pinctrl_lpuart0: lpuart0grp { + fsl,pins = < + IMX8QM_UART0_RX_DMA_UART0_RX 0x06000020 + IMX8QM_UART0_TX_DMA_UART0_TX 0x06000020 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041 + IMX8QM_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021 + IMX8QM_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021 + IMX8QM_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021 + IMX8QM_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021 + IMX8QM_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021 + IMX8QM_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021 + IMX8QM_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021 + IMX8QM_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021 + IMX8QM_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021 + IMX8QM_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + IMX8QM_USDHC1_CLK_CONN_USDHC1_CLK 0x06000041 + IMX8QM_USDHC1_CMD_CONN_USDHC1_CMD 0x00000021 + IMX8QM_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000021 + IMX8QM_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000021 + IMX8QM_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000021 + IMX8QM_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000021 + IMX8QM_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000021 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi new file mode 100644 index 000000000000..42637a45701c --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2019-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&fec1 { + compatible = "fsl,imx8qm-fec", "fsl,imx6sx-fec"; +}; + +&fec2 { + compatible = "fsl,imx8qm-fec", "fsl,imx6sx-fec"; +}; + +&usdhc1 { + compatible = "fsl,imx8qm-usdhc", "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; +}; + +&usdhc2 { + compatible = "fsl,imx8qm-usdhc", "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi new file mode 100644 index 000000000000..bbe5f5ecfb92 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&dma_subsys { + uart4_lpcg: clock-controller@5a4a0000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5a4a0000 0x10000>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_UART_4 IMX_SC_PM_CLK_PER>, + <&dma_ipg_clk>; + clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>; + clock-output-names = "uart4_lpcg_baud_clk", + "uart4_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_UART_4>; + }; +}; + +&lpuart0 { + compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart"; +}; + +&lpuart1 { + compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart"; +}; + +&lpuart2 { + compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart"; +}; + +&lpuart3 { + compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart"; +}; + +&i2c0 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c1 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c2 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c3 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi new file mode 100644 index 000000000000..30896610c654 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2019-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&lsio_gpio0 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio1 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio2 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio3 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio4 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio5 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio6 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio7 { + compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio"; +}; + +&lsio_mu0 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu1 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu2 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu3 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu4 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu13 { + compatible = "fsl,imx8qm-mu", "fsl,imx6sx-mu"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi new file mode 100644 index 000000000000..12cd059b339b --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#include <dt-bindings/clock/imx8-lpcg.h> +#include <dt-bindings/firmware/imx/rsrc.h> +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/pinctrl/pads-imx8qm.h> + +/ { + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + mmc0 = &usdhc1; + mmc1 = &usdhc2; + mmc2 = &usdhc3; + serial0 = &lpuart0; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&A53_0>; + }; + core1 { + cpu = <&A53_1>; + }; + core2 { + cpu = <&A53_2>; + }; + core3 { + cpu = <&A53_3>; + }; + }; + + cluster1 { + core0 { + cpu = <&A72_0>; + }; + core1 { + cpu = <&A72_1>; + }; + }; + }; + + A53_0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A72_0: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + next-level-cache = <&A72_L2>; + }; + + A72_1: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + next-level-cache = <&A72_L2>; + }; + + A53_L2: l2-cache0 { + compatible = "cache"; + }; + + A72_L2: l2-cache1 { + compatible = "cache"; + }; + }; + + gic: interrupt-controller@51a00000 { + compatible = "arm,gic-v3"; + reg = <0x0 0x51a00000 0 0x10000>, /* GIC Dist */ + <0x0 0x51b00000 0 0xC0000>, /* GICR */ + <0x0 0x52000000 0 0x2000>, /* GICC */ + <0x0 0x52010000 0 0x1000>, /* GICH */ + <0x0 0x52020000 0 0x20000>; /* GICV */ + #interrupt-cells = <3>; + interrupt-controller; + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gic>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* Physical Secure */ + <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* Physical Non-Secure */ + <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* Virtual */ + <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* Hypervisor */ + }; + + scu { + compatible = "fsl,imx-scu"; + mbox-names = "tx0", + "rx0", + "gip3"; + mboxes = <&lsio_mu1 0 0 + &lsio_mu1 1 0 + &lsio_mu1 3 3>; + + pd: imx8qx-pd { + compatible = "fsl,imx8qm-scu-pd", "fsl,scu-pd"; + #power-domain-cells = <1>; + }; + + clk: clock-controller { + compatible = "fsl,imx8qxp-clk", "fsl,scu-clk"; + #clock-cells = <2>; + }; + + iomuxc: pinctrl { + compatible = "fsl,imx8qm-iomuxc"; + }; + + }; + + /* sorted in register address */ + #include "imx8-ss-dma.dtsi" + #include "imx8-ss-conn.dtsi" + #include "imx8-ss-lsio.dtsi" +}; + +#include "imx8qm-ss-dma.dtsi" +#include "imx8qm-ss-conn.dtsi" +#include "imx8qm-ss-lsio.dtsi" diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts b/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts index a3f8cf195974..47bb68823b24 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts +++ b/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts @@ -13,13 +13,13 @@ compatible = "einfochips,imx8qxp-ai_ml", "fsl,imx8qxp"; aliases { - serial1 = &adma_lpuart1; - serial2 = &adma_lpuart2; - serial3 = &adma_lpuart3; + serial1 = &lpuart1; + serial2 = &lpuart2; + serial3 = &lpuart3; }; chosen { - stdout-path = &adma_lpuart2; + stdout-path = &lpuart2; }; memory@80000000 { @@ -82,7 +82,7 @@ }; /* BT */ -&adma_lpuart0 { +&lpuart0 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart0>; uart-has-rtscts; @@ -90,21 +90,21 @@ }; /* LS-UART0 */ -&adma_lpuart1 { +&lpuart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart1>; status = "okay"; }; /* Debug */ -&adma_lpuart2 { +&lpuart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart2>; status = "okay"; }; /* PCI-E UART */ -&adma_lpuart3 { +&lpuart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart3>; status = "okay"; @@ -133,7 +133,7 @@ &usdhc1 { #address-cells = <1>; #size-cells = <0>; - assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>; + assigned-clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <200000000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; @@ -151,7 +151,7 @@ /* SD */ &usdhc2 { - assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>; + assigned-clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <200000000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi index c7336f387605..144fc9e82da7 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi @@ -26,7 +26,7 @@ }; }; -&adma_i2c1 { +&i2c1 { status = "okay"; /* M41T0M6 real time clock on carrier board */ @@ -37,17 +37,17 @@ }; /* Colibri UART_B */ -&adma_lpuart0 { +&lpuart0 { status= "okay"; }; /* Colibri UART_C */ -&adma_lpuart2 { +&lpuart2 { status= "okay"; }; /* Colibri UART_A */ -&adma_lpuart3 { +&lpuart3 { status= "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi index f38acff0d25c..89d70e030433 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi @@ -10,7 +10,7 @@ compatible = "toradex,colibri-imx8x", "fsl,imx8qxp"; chosen { - stdout-path = &adma_lpuart3; + stdout-path = &lpuart3; }; reg_module_3v3: regulator-module-3v3 { @@ -22,7 +22,7 @@ }; /* On-module I2C */ -&adma_i2c0 { +&i2c0 { #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; @@ -49,7 +49,7 @@ }; /* Colibri I2C */ -&adma_i2c1 { +&i2c1 { #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; @@ -58,19 +58,19 @@ }; /* Colibri UART_B */ -&adma_lpuart0 { +&lpuart0 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart0>; }; /* Colibri UART_C */ -&adma_lpuart2 { +&lpuart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart2>; }; /* Colibri UART_A */ -&adma_lpuart3 { +&lpuart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart3>, <&pinctrl_lpuart3_ctrl>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts index 46437d3c7a04..863232a47004 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts @@ -12,7 +12,7 @@ compatible = "fsl,imx8qxp-mek", "fsl,imx8qxp"; chosen { - stdout-path = &adma_lpuart0; + stdout-path = &lpuart0; }; memory@80000000 { @@ -30,11 +30,30 @@ }; }; -&adma_dsp { +&dsp { status = "okay"; }; -&adma_i2c1 { +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + }; +}; + +&i2c1 { #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; @@ -110,31 +129,12 @@ }; }; -&adma_lpuart0 { +&lpuart0 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpuart0>; status = "okay"; }; -&fec1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fec1>; - phy-mode = "rgmii-id"; - phy-handle = <ðphy0>; - fsl,magic-packet; - status = "okay"; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - - ethphy0: ethernet-phy@0 { - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <0>; - }; - }; -}; - &scu_key { status = "okay"; }; @@ -173,7 +173,7 @@ }; &usdhc1 { - assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>; + assigned-clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <200000000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; @@ -185,7 +185,7 @@ }; &usdhc2 { - assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>; + assigned-clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <200000000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi new file mode 100644 index 000000000000..dc1daa8dc72f --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&lpuart0 { + compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; +}; + +&lpuart1 { + compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; +}; + +&lpuart2 { + compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; +}; + +&lpuart3 { + compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; +}; + +&i2c0 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c1 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c2 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; +}; + +&i2c3 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi new file mode 100644 index 000000000000..f5f58959f65c --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&usdhc1 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; +}; + +&usdhc2 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; +}; + +&usdhc3 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; +}; + +&fec1 { + compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; +}; + +&fec2 { + compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi new file mode 100644 index 000000000000..11395479ffc0 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +&lsio_gpio0 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio1 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio2 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio3 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio4 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio5 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio6 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_gpio7 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; +}; + +&lsio_mu0 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu1 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu2 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu3 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu4 { + compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; + +&lsio_mu13 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi index e46faac1fe71..1e6b4995091e 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. - * Copyright 2017-2018 NXP + * Copyright 2017-2020 NXP * Dong Aisheng <aisheng.dong@nxp.com> */ #include <dt-bindings/clock/imx8-clock.h> +#include <dt-bindings/clock/imx8-lpcg.h> #include <dt-bindings/firmware/imx/rsrc.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> @@ -29,10 +30,10 @@ gpio5 = &lsio_gpio5; gpio6 = &lsio_gpio6; gpio7 = &lsio_gpio7; - i2c0 = &adma_i2c0; - i2c1 = &adma_i2c1; - i2c2 = &adma_i2c2; - i2c3 = &adma_i2c3; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; mmc0 = &usdhc1; mmc1 = &usdhc2; mmc2 = &usdhc3; @@ -41,10 +42,10 @@ mu2 = &lsio_mu2; mu3 = &lsio_mu3; mu4 = &lsio_mu4; - serial0 = &adma_lpuart0; - serial1 = &adma_lpuart1; - serial2 = &adma_lpuart2; - serial3 = &adma_lpuart3; + serial0 = &lpuart0; + serial1 = &lpuart1; + serial2 = &lpuart2; + serial3 = &lpuart3; }; cpus { @@ -58,7 +59,7 @@ reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&A35_L2>; - clocks = <&clk IMX_A35_CLK>; + clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>; operating-points-v2 = <&a35_opp_table>; #cooling-cells = <2>; }; @@ -69,7 +70,7 @@ reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&A35_L2>; - clocks = <&clk IMX_A35_CLK>; + clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>; operating-points-v2 = <&a35_opp_table>; #cooling-cells = <2>; }; @@ -80,7 +81,7 @@ reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&A35_L2>; - clocks = <&clk IMX_A35_CLK>; + clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>; operating-points-v2 = <&a35_opp_table>; #cooling-cells = <2>; }; @@ -91,7 +92,7 @@ reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&A35_L2>; - clocks = <&clk IMX_A35_CLK>; + clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>; operating-points-v2 = <&a35_opp_table>; #cooling-cells = <2>; }; @@ -158,9 +159,14 @@ &lsio_mu1 1 0 &lsio_mu1 3 3>; + pd: imx8qx-pd { + compatible = "fsl,imx8qxp-scu-pd", "fsl,scu-pd"; + #power-domain-cells = <1>; + }; + clk: clock-controller { compatible = "fsl,imx8qxp-clk"; - #clock-cells = <1>; + #clock-cells = <2>; clocks = <&xtal32k &xtal24m>; clock-names = "xtal_32KHz", "xtal_24Mhz"; }; @@ -175,11 +181,6 @@ #size-cells = <1>; }; - pd: imx8qx-pd { - compatible = "fsl,imx8qxp-scu-pd"; - #power-domain-cells = <1>; - }; - scu_key: scu-key { compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key"; linux,keycodes = <KEY_POWER>; @@ -223,380 +224,6 @@ clock-output-names = "xtal_24MHz"; }; - adma_subsys: bus@59000000 { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x59000000 0x0 0x59000000 0x2000000>; - - adma_lpcg: clock-controller@59000000 { - compatible = "fsl,imx8qxp-lpcg-adma"; - reg = <0x59000000 0x2000000>; - #clock-cells = <1>; - }; - - adma_dsp: dsp@596e8000 { - compatible = "fsl,imx8qxp-dsp"; - reg = <0x596e8000 0x88000>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_DSP_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_OCRAM_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_DSP_CORE_CLK>; - clock-names = "ipg", "ocram", "core"; - power-domains = <&pd IMX_SC_R_MU_13A>, - <&pd IMX_SC_R_MU_13B>, - <&pd IMX_SC_R_DSP>, - <&pd IMX_SC_R_DSP_RAM>; - mbox-names = "txdb0", "txdb1", - "rxdb0", "rxdb1"; - mboxes = <&lsio_mu13 2 0>, - <&lsio_mu13 2 1>, - <&lsio_mu13 3 0>, - <&lsio_mu13 3 1>; - memory-region = <&dsp_reserved>; - status = "disabled"; - }; - - adma_lpuart0: serial@5a060000 { - compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; - reg = <0x5a060000 0x1000>; - interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_UART0_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_UART0_BAUD_CLK>; - clock-names = "ipg", "baud"; - power-domains = <&pd IMX_SC_R_UART_0>; - status = "disabled"; - }; - - adma_lpuart1: serial@5a070000 { - compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; - reg = <0x5a070000 0x1000>; - interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_UART1_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_UART1_BAUD_CLK>; - clock-names = "ipg", "baud"; - power-domains = <&pd IMX_SC_R_UART_1>; - status = "disabled"; - }; - - adma_lpuart2: serial@5a080000 { - compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; - reg = <0x5a080000 0x1000>; - interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_UART2_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_UART2_BAUD_CLK>; - clock-names = "ipg", "baud"; - power-domains = <&pd IMX_SC_R_UART_2>; - status = "disabled"; - }; - - adma_lpuart3: serial@5a090000 { - compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; - reg = <0x5a090000 0x1000>; - interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_UART3_IPG_CLK>, - <&adma_lpcg IMX_ADMA_LPCG_UART3_BAUD_CLK>; - clock-names = "ipg", "baud"; - power-domains = <&pd IMX_SC_R_UART_3>; - status = "disabled"; - }; - - adma_i2c0: i2c@5a800000 { - compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; - reg = <0x5a800000 0x4000>; - interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C0_CLK>; - clock-names = "per"; - assigned-clocks = <&clk IMX_ADMA_I2C0_CLK>; - assigned-clock-rates = <24000000>; - power-domains = <&pd IMX_SC_R_I2C_0>; - status = "disabled"; - }; - - adma_i2c1: i2c@5a810000 { - compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; - reg = <0x5a810000 0x4000>; - interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C1_CLK>; - clock-names = "per"; - assigned-clocks = <&clk IMX_ADMA_I2C1_CLK>; - assigned-clock-rates = <24000000>; - power-domains = <&pd IMX_SC_R_I2C_1>; - status = "disabled"; - }; - - adma_i2c2: i2c@5a820000 { - compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; - reg = <0x5a820000 0x4000>; - interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C2_CLK>; - clock-names = "per"; - assigned-clocks = <&clk IMX_ADMA_I2C2_CLK>; - assigned-clock-rates = <24000000>; - power-domains = <&pd IMX_SC_R_I2C_2>; - status = "disabled"; - }; - - adma_i2c3: i2c@5a830000 { - compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; - reg = <0x5a830000 0x4000>; - interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C3_CLK>; - clock-names = "per"; - assigned-clocks = <&clk IMX_ADMA_I2C3_CLK>; - assigned-clock-rates = <24000000>; - power-domains = <&pd IMX_SC_R_I2C_3>; - status = "disabled"; - }; - }; - - conn_subsys: bus@5b000000 { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x5b000000 0x0 0x5b000000 0x1000000>; - - conn_lpcg: clock-controller@5b200000 { - compatible = "fsl,imx8qxp-lpcg-conn"; - reg = <0x5b200000 0xb0000>; - #clock-cells = <1>; - }; - - usdhc1: mmc@5b010000 { - compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; - interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>; - reg = <0x5b010000 0x10000>; - clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC0_PER_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC0_HCLK>; - clock-names = "ipg", "per", "ahb"; - power-domains = <&pd IMX_SC_R_SDHC_0>; - status = "disabled"; - }; - - usdhc2: mmc@5b020000 { - compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; - interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>; - reg = <0x5b020000 0x10000>; - clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC1_IPG_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC1_PER_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC1_HCLK>; - clock-names = "ipg", "per", "ahb"; - power-domains = <&pd IMX_SC_R_SDHC_1>; - fsl,tuning-start-tap = <20>; - fsl,tuning-step= <2>; - status = "disabled"; - }; - - usdhc3: mmc@5b030000 { - compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; - interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; - reg = <0x5b030000 0x10000>; - clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC2_IPG_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC2_PER_CLK>, - <&conn_lpcg IMX_CONN_LPCG_SDHC2_HCLK>; - clock-names = "ipg", "per", "ahb"; - power-domains = <&pd IMX_SC_R_SDHC_2>; - status = "disabled"; - }; - - fec1: ethernet@5b040000 { - compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; - reg = <0x5b040000 0x10000>; - interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&conn_lpcg IMX_CONN_LPCG_ENET0_IPG_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET0_AHB_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET0_TX_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET0_ROOT_CLK>; - clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; - fsl,num-tx-queues=<3>; - fsl,num-rx-queues=<3>; - power-domains = <&pd IMX_SC_R_ENET_0>; - status = "disabled"; - }; - - fec2: ethernet@5b050000 { - compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; - reg = <0x5b050000 0x10000>; - interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&conn_lpcg IMX_CONN_LPCG_ENET1_IPG_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET1_AHB_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET1_TX_CLK>, - <&conn_lpcg IMX_CONN_LPCG_ENET1_ROOT_CLK>; - clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; - fsl,num-tx-queues=<3>; - fsl,num-rx-queues=<3>; - power-domains = <&pd IMX_SC_R_ENET_1>; - status = "disabled"; - }; - }; - - ddr_subsyss: bus@5c000000 { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x5c000000 0x0 0x5c000000 0x1000000>; - - ddr-pmu@5c020000 { - compatible = "fsl,imx8-ddr-pmu"; - reg = <0x5c020000 0x10000>; - interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; - }; - }; - - lsio_subsys: bus@5d000000 { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x5d000000 0x0 0x5d000000 0x1000000>; - - lsio_gpio0: gpio@5d080000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d080000 0x10000>; - interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_0>; - }; - - lsio_gpio1: gpio@5d090000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d090000 0x10000>; - interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_1>; - }; - - lsio_gpio2: gpio@5d0a0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0a0000 0x10000>; - interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_2>; - }; - - lsio_gpio3: gpio@5d0b0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0b0000 0x10000>; - interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_3>; - }; - - lsio_gpio4: gpio@5d0c0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0c0000 0x10000>; - interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_4>; - }; - - lsio_gpio5: gpio@5d0d0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0d0000 0x10000>; - interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_5>; - }; - - lsio_gpio6: gpio@5d0e0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0e0000 0x10000>; - interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_6>; - }; - - lsio_gpio7: gpio@5d0f0000 { - compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; - reg = <0x5d0f0000 0x10000>; - interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - power-domains = <&pd IMX_SC_R_GPIO_7>; - }; - - lsio_mu0: mailbox@5d1b0000 { - compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d1b0000 0x10000>; - interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - status = "disabled"; - }; - - lsio_mu1: mailbox@5d1c0000 { - compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d1c0000 0x10000>; - interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - }; - - lsio_mu2: mailbox@5d1d0000 { - compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d1d0000 0x10000>; - interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - status = "disabled"; - }; - - lsio_mu3: mailbox@5d1e0000 { - compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d1e0000 0x10000>; - interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - status = "disabled"; - }; - - lsio_mu4: mailbox@5d1f0000 { - compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d1f0000 0x10000>; - interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - status = "disabled"; - }; - - lsio_mu13: mailbox@5d280000 { - compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; - reg = <0x5d280000 0x10000>; - interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; - #mbox-cells = <2>; - power-domains = <&pd IMX_SC_R_MU_13A>; - }; - - lsio_lpcg: clock-controller@5d400000 { - compatible = "fsl,imx8qxp-lpcg-lsio"; - reg = <0x5d400000 0x400000>; - #clock-cells = <1>; - }; - }; - thermal_zones: thermal-zones { cpu-thermal0 { polling-delay-passive = <250>; @@ -629,4 +256,14 @@ }; }; }; + + /* sorted in register address */ + #include "imx8-ss-adma.dtsi" + #include "imx8-ss-conn.dtsi" + #include "imx8-ss-ddr.dtsi" + #include "imx8-ss-lsio.dtsi" }; + +#include "imx8qxp-ss-adma.dtsi" +#include "imx8qxp-ss-conn.dtsi" +#include "imx8qxp-ss-lsio.dtsi" diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile index 3a052540605b..0b5477442263 100644 --- a/arch/arm64/boot/dts/intel/Makefile +++ b/arch/arm64/boot/dts/intel/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb \ - socfpga_agilex_socdk_nand.dtb +dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_socdk.dtb \ + socfpga_agilex_socdk_nand.dtb \ + socfpga_n5x_socdk.dtb dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb -dtb-$(CONFIG_ARCH_N5X) += socfpga_n5x_socdk.dtb diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 07c099b4ed5b..163f33b46e4f 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -6,6 +6,7 @@ /dts-v1/; #include <dt-bindings/reset/altr,rst-mgr-s10.h> #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/clock/agilex-clock.h> / { @@ -61,10 +62,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 170 4>, - <0 171 4>, - <0 172 4>, - <0 173 4>; + interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, @@ -77,7 +78,7 @@ method = "smc"; }; - intc: intc@fffc1000 { + intc: interrupt-controller@fffc1000 { compatible = "arm,gic-400", "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; @@ -87,6 +88,48 @@ <0x0 0xfffc6000 0x0 0x2000>; }; + clocks { + cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + cb_intosc_ls_clk: cb-intosc-ls-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + f2s_free_clk: f2s-free-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + osc1: osc1 { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + qspi_clk: qspi-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <200000000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupt-parent = <&intc>; + interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, + <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>; + }; + + usbphy0: usbphy { + #phy-cells = <0>; + compatible = "usb-nop-xceiv"; + }; + soc { #address-cells = <1>; #size-cells = <1>; @@ -108,38 +151,10 @@ #clock-cells = <1>; }; - clocks { - cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk { - #clock-cells = <0>; - compatible = "fixed-clock"; - }; - - cb_intosc_ls_clk: cb-intosc-ls-clk { - #clock-cells = <0>; - compatible = "fixed-clock"; - }; - - f2s_free_clk: f2s-free-clk { - #clock-cells = <0>; - compatible = "fixed-clock"; - }; - - osc1: osc1 { - #clock-cells = <0>; - compatible = "fixed-clock"; - }; - - qspi_clk: qspi-clk { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <200000000>; - }; - }; - gmac0: ethernet@ff800000 { compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff800000 0x2000>; - interrupts = <0 90 4>; + interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>; @@ -157,7 +172,7 @@ gmac1: ethernet@ff802000 { compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff802000 0x2000>; - interrupts = <0 91 4>; + interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>; @@ -175,7 +190,7 @@ gmac2: ethernet@ff804000 { compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff804000 0x2000>; - interrupts = <0 92 4>; + interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>; @@ -206,7 +221,7 @@ reg = <0>; interrupt-controller; #interrupt-cells = <2>; - interrupts = <0 110 4>; + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>; }; }; @@ -226,7 +241,7 @@ reg = <0>; interrupt-controller; #interrupt-cells = <2>; - interrupts = <0 111 4>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; }; }; @@ -235,7 +250,7 @@ #size-cells = <0>; compatible = "snps,designware-i2c"; reg = <0xffc02800 0x100>; - interrupts = <0 103 4>; + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst I2C0_RESET>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; status = "disabled"; @@ -246,7 +261,7 @@ #size-cells = <0>; compatible = "snps,designware-i2c"; reg = <0xffc02900 0x100>; - interrupts = <0 104 4>; + interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst I2C1_RESET>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; status = "disabled"; @@ -257,7 +272,7 @@ #size-cells = <0>; compatible = "snps,designware-i2c"; reg = <0xffc02a00 0x100>; - interrupts = <0 105 4>; + interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst I2C2_RESET>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; status = "disabled"; @@ -268,7 +283,7 @@ #size-cells = <0>; compatible = "snps,designware-i2c"; reg = <0xffc02b00 0x100>; - interrupts = <0 106 4>; + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst I2C3_RESET>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; status = "disabled"; @@ -279,7 +294,7 @@ #size-cells = <0>; compatible = "snps,designware-i2c"; reg = <0xffc02c00 0x100>; - interrupts = <0 107 4>; + interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst I2C4_RESET>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; status = "disabled"; @@ -290,7 +305,7 @@ #size-cells = <0>; compatible = "altr,socfpga-dw-mshc"; reg = <0xff808000 0x1000>; - interrupts = <0 96 4>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; fifo-depth = <0x400>; resets = <&rst SDMMC_RESET>; reset-names = "reset"; @@ -301,14 +316,14 @@ status = "disabled"; }; - nand: nand@ffb90000 { + nand: nand-controller@ffb90000 { #address-cells = <1>; #size-cells = <0>; compatible = "altr,socfpga-denali-nand"; reg = <0xffb90000 0x10000>, <0xffb80000 0x1000>; reg-names = "nand_data", "denali_reg"; - interrupts = <0 97 4>; + interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clkmgr AGILEX_NAND_CLK>, <&clkmgr AGILEX_NAND_X_CLK>, <&clkmgr AGILEX_NAND_ECC_CLK>; @@ -325,15 +340,15 @@ pdma: pdma@ffda0000 { compatible = "arm,pl330", "arm,primecell"; reg = <0xffda0000 0x1000>; - interrupts = <0 81 4>, - <0 82 4>, - <0 83 4>, - <0 84 4>, - <0 85 4>, - <0 86 4>, - <0 87 4>, - <0 88 4>, - <0 89 4>; + interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; #dma-channels = <8>; #dma-requests = <32>; @@ -355,17 +370,43 @@ #global-interrupts = <2>; #iommu-cells = <1>; interrupt-parent = <&intc>; - interrupts = <0 128 4>, /* Global Secure Fault */ - <0 129 4>, /* Global Non-secure Fault */ + /* Global Secure Fault */ + interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, + /* Global Non-secure Fault */ + <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>, /* Non-secure Context Interrupts (32) */ - <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>, - <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>, - <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>, - <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>, - <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>, - <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>, - <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>, - <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>; + <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; stream-match-mask = <0x7ff0>; clocks = <&clkmgr AGILEX_MPU_CCU_CLK>, <&clkmgr AGILEX_L3_MAIN_FREE_CLK>, @@ -378,7 +419,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0xffda4000 0x1000>; - interrupts = <0 99 4>; + interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst SPIM0_RESET>; reset-names = "spi"; reg-io-width = <4>; @@ -392,7 +433,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0xffda5000 0x1000>; - interrupts = <0 100 4>; + interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst SPIM1_RESET>; reset-names = "spi"; reg-io-width = <4>; @@ -406,18 +447,9 @@ reg = <0xffd12000 0x500>; }; - /* Local timer */ - timer { - compatible = "arm,armv8-timer"; - interrupts = <1 13 0xf08>, - <1 14 0xf08>, - <1 11 0xf08>, - <1 10 0xf08>; - }; - timer0: timer0@ffc03000 { compatible = "snps,dw-apb-timer"; - interrupts = <0 113 4>; + interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; reg = <0xffc03000 0x100>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; clock-names = "timer"; @@ -425,7 +457,7 @@ timer1: timer1@ffc03100 { compatible = "snps,dw-apb-timer"; - interrupts = <0 114 4>; + interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; reg = <0xffc03100 0x100>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; clock-names = "timer"; @@ -433,7 +465,7 @@ timer2: timer2@ffd00000 { compatible = "snps,dw-apb-timer"; - interrupts = <0 115 4>; + interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; reg = <0xffd00000 0x100>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; clock-names = "timer"; @@ -441,16 +473,16 @@ timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; - interrupts = <0 116 4>; + interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; reg = <0xffd00100 0x100>; clocks = <&clkmgr AGILEX_L4_SP_CLK>; clock-names = "timer"; }; - uart0: serial0@ffc02000 { + uart0: serial@ffc02000 { compatible = "snps,dw-apb-uart"; reg = <0xffc02000 0x100>; - interrupts = <0 108 4>; + interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; resets = <&rst UART0_RESET>; @@ -458,10 +490,10 @@ clocks = <&clkmgr AGILEX_L4_SP_CLK>; }; - uart1: serial1@ffc02100 { + uart1: serial@ffc02100 { compatible = "snps,dw-apb-uart"; reg = <0xffc02100 0x100>; - interrupts = <0 109 4>; + interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; resets = <&rst UART1_RESET>; @@ -469,16 +501,10 @@ status = "disabled"; }; - usbphy0: usbphy@0 { - #phy-cells = <0>; - compatible = "usb-nop-xceiv"; - status = "okay"; - }; - usb0: usb@ffb00000 { compatible = "snps,dwc2"; reg = <0xffb00000 0x40000>; - interrupts = <0 93 4>; + interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; phys = <&usbphy0>; phy-names = "usb2-phy"; resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>; @@ -491,7 +517,7 @@ usb1: usb@ffb40000 { compatible = "snps,dwc2"; reg = <0xffb40000 0x40000>; - interrupts = <0 94 4>; + interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; phys = <&usbphy0>; phy-names = "usb2-phy"; resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>; @@ -504,7 +530,7 @@ watchdog0: watchdog@ffd00200 { compatible = "snps,dw-wdt"; reg = <0xffd00200 0x100>; - interrupts = <0 117 4>; + interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst WATCHDOG0_RESET>; clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; status = "disabled"; @@ -513,7 +539,7 @@ watchdog1: watchdog@ffd00300 { compatible = "snps,dw-wdt"; reg = <0xffd00300 0x100>; - interrupts = <0 118 4>; + interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst WATCHDOG1_RESET>; clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; status = "disabled"; @@ -522,7 +548,7 @@ watchdog2: watchdog@ffd00400 { compatible = "snps,dw-wdt"; reg = <0xffd00400 0x100>; - interrupts = <0 125 4>; + interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst WATCHDOG2_RESET>; clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; status = "disabled"; @@ -531,7 +557,7 @@ watchdog3: watchdog@ffd00500 { compatible = "snps,dw-wdt"; reg = <0xffd00500 0x100>; - interrupts = <0 126 4>; + interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>; resets = <&rst WATCHDOG3_RESET>; clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; status = "disabled"; @@ -548,7 +574,7 @@ altr,sysmgr-syscon = <&sysmgr>; #address-cells = <1>; #size-cells = <1>; - interrupts = <0 15 4>; + interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; interrupt-controller; #interrupt-cells = <2>; ranges; @@ -607,7 +633,7 @@ #size-cells = <0>; reg = <0xff8d2000 0x100>, <0xff900000 0x100000>; - interrupts = <0 3 4>; + interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; cdns,fifo-depth = <128>; cdns,fifo-width = <4>; cdns,trigger-address = <0x00000000>; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts index a7a83f29f00b..0f7a0ba344be 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts @@ -41,14 +41,6 @@ /* We expect the bootloader to fill in the reg */ reg = <0 0 0 0>; }; - - soc { - clocks { - osc1 { - clock-frequency = <25000000>; - }; - }; - }; }; &gpio1 { @@ -92,6 +84,10 @@ bus-width = <4>; }; +&osc1 { + clock-frequency = <25000000>; +}; + &uart0 { status = "okay"; }; @@ -117,7 +113,7 @@ m25p,fast-read; cdns,page-size = <256>; cdns,block-size = <16>; - cdns,read-delay = <1>; + cdns,read-delay = <2>; cdns,tshsl-ns = <50>; cdns,tsd2d-ns = <50>; cdns,tchsh-ns = <4>; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts index 979aa59a6bd0..cc2dcabf34e3 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts @@ -20,17 +20,17 @@ leds { compatible = "gpio-leds"; - hps0 { + led0 { label = "hps_led0"; gpios = <&portb 20 GPIO_ACTIVE_HIGH>; }; - hps1 { + led1 { label = "hps_led1"; gpios = <&portb 19 GPIO_ACTIVE_HIGH>; }; - hps2 { + led2 { label = "hps_led2"; gpios = <&portb 21 GPIO_ACTIVE_HIGH>; }; @@ -41,14 +41,6 @@ /* We expect the bootloader to fill in the reg */ reg = <0 0 0 0>; }; - - soc { - clocks { - osc1 { - clock-frequency = <25000000>; - }; - }; - }; }; &gpio1 { @@ -121,6 +113,10 @@ }; }; +&osc1 { + clock-frequency = <25000000>; +}; + &uart0 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts index 5f56e2697fee..01f1307ce4ac 100644 --- a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts @@ -23,14 +23,6 @@ /* We expect the bootloader to fill in the reg */ reg = <0 0 0 0>; }; - - soc { - clocks { - osc1 { - clock-frequency = <25000000>; - }; - }; - }; }; &clkmgr { @@ -44,6 +36,10 @@ bus-width = <4>; }; +&osc1 { + clock-frequency = <25000000>; +}; + &uart0 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index d239ab70ed99..53e817c5f6f3 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Device Tree file for CZ.NIC Turris Mox Board - * 2019 by Marek Behun <marek.behun@nic.cz> + * 2019 by Marek Behún <kabel@kernel.org> */ /dts-v1/; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 7a2df148c6a3..456dcd4a7793 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -156,7 +156,8 @@ }; nb_periph_clk: nb-periph-clk@13000 { - compatible = "marvell,armada-3700-periph-clock-nb"; + compatible = "marvell,armada-3700-periph-clock-nb", + "syscon"; reg = <0x13000 0x100>; clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, <&tbg 3>, <&xtalclk>; diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index a7eb4e7697a2..51f3e2907597 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -218,6 +218,10 @@ }; }; +&cp0_utmi { + status = "okay"; +}; + &cp0_comphy1 { cp0_usbh0_con: connector { compatible = "usb-a-connector"; @@ -226,8 +230,9 @@ }; &cp0_usb3_0 { - phys = <&cp0_comphy1 0>; - phy-names = "cp0-usb3h0-comphy"; + phys = <&cp0_comphy1 0>, <&cp0_utmi0>; + phy-names = "cp0-usb3h0-comphy", "utmi"; + dr_mode = "host"; status = "okay"; }; @@ -239,8 +244,9 @@ }; &cp0_usb3_1 { - phys = <&cp0_comphy4 1>; - phy-names = "cp0-usb3h1-comphy"; + phys = <&cp0_comphy4 1>, <&cp0_utmi1>; + phy-names = "cp0-usb3h1-comphy", "utmi"; + dr_mode = "host"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index eb01cc96ba7a..0ec0d5625818 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -31,6 +31,16 @@ ethernet2 = &cp1_eth2; }; + fan: pwm { + compatible = "pwm-fan"; + /* 20% steps */ + cooling-levels = <0 51 102 153 204 255>; + #cooling-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&cp0_fan_pwm_pins>; + pwms = <&cp0_gpio2 16 40000>; + }; + v_3_3: regulator-3-3v { compatible = "regulator-fixed"; regulator-name = "v_3_3"; @@ -102,6 +112,123 @@ }; }; +&ap_thermal_ic { + polling-delay = <1000>; /* milliseconds */ + trips { + ap_active: trip-active { + temperature = <40000>; /* millicelsius */ + hysteresis = <4000>; /* millicelsius */ + type = "active"; + }; + }; + cooling-maps { + map0 { + trip = <&ap_active>; + cooling-device = <&fan THERMAL_NO_LIMIT 4>; + }; + map1 { + trip = <&ap_crit>; + cooling-device = <&fan 4 5>; + }; + }; +}; + +&cp0_thermal_ic { + polling-delay = <1000>; /* milliseconds */ + trips { + cp0_active0: trip-active0 { + temperature = <40000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp0_active1: trip-active1 { + temperature = <45000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp0_active2: trip-active2 { + temperature = <50000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp0_active3: trip-active3 { + temperature = <60000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + }; + cooling-maps { + map0 { + trip = <&cp0_active0>; + cooling-device = <&fan 0 1>; + }; + map1 { + trip = <&cp0_active1>; + cooling-device = <&fan 1 2>; + }; + map2 { + trip = <&cp0_active2>; + cooling-device = <&fan 2 3>; + }; + map3 { + trip = <&cp0_active3>; + cooling-device = <&fan 3 4>; + }; + map4 { + trip = <&cp0_crit>; + cooling-device = <&fan 4 5>; + }; + }; +}; + +&cp1_thermal_ic { + polling-delay = <1000>; /* milliseconds */ + trips { + cp1_active0: trip-active0 { + temperature = <40000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp1_active1: trip-active1 { + temperature = <45000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp1_active2: trip-active2 { + temperature = <50000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + cp1_active3: trip-active3 { + temperature = <60000>; /* millicelsius */ + hysteresis = <2500>; /* millicelsius */ + type = "active"; + }; + }; + cooling-maps { + map0 { + trip = <&cp1_active0>; + cooling-device = <&fan 0 1>; + }; + map1 { + trip = <&cp1_active1>; + cooling-device = <&fan 1 2>; + }; + map2 { + trip = <&cp1_active2>; + cooling-device = <&fan 2 3>; + }; + map3 { + trip = <&cp1_active3>; + cooling-device = <&fan 3 4>; + }; + map4 { + trip = <&cp1_crit>; + cooling-device = <&fan 4 5>; + }; + }; +}; + &uart0 { status = "okay"; pinctrl-0 = <&uart0_pins>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 09fb5256f1db..e39e1efc95b6 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -154,8 +154,15 @@ }; /* CON9 on CP0 expansion */ +&cp0_utmi { + status = "okay"; +}; + &cp0_usb3_0 { usb-phy = <&cp0_usb3_0_phy>; + phys = <&cp0_utmi0>; + phy-names = "utmi"; + dr_mode = "host"; status = "okay"; }; @@ -168,8 +175,9 @@ /* CON10 on CP0 expansion */ &cp0_usb3_1 { - phys = <&cp0_comphy4 1>; - phy-names = "cp0-usb3h1-comphy"; + phys = <&cp0_comphy4 1>, <&cp0_utmi1>; + phy-names = "usb", "utmi"; + dr_mode = "host"; status = "okay"; }; @@ -306,14 +314,23 @@ }; }; +&cp1_utmi { + status = "okay"; +}; + /* CON9 on CP1 expansion */ &cp1_usb3_0 { usb-phy = <&cp1_usb3_0_phy>; + phys = <&cp1_utmi0>; + phy-names = "utmi"; + dr_mode = "host"; status = "okay"; }; /* CON10 on CP1 expansion */ &cp1_usb3_1 { + phys = <&cp1_utmi1>; + phy-names = "utmi"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi index cbcb210cb6d8..adbfecc678b5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi @@ -259,13 +259,23 @@ vqmmc-supply = <&v_3_3>; }; +&cp0_utmi { + status = "okay"; +}; + &cp0_usb3_0 { /* J38? - USB2.0 only */ + phys = <&cp0_utmi0>; + phy-names = "utmi"; + dr_mode = "host"; status = "okay"; }; &cp0_usb3_1 { /* J38? - USB2.0 only */ + phys = <&cp0_utmi1>; + phy-names = "utmi"; + dr_mode = "host"; status = "okay"; }; @@ -364,9 +374,14 @@ }; }; +&cp1_utmi { + status = "okay"; +}; + &cp1_usb3_0 { /* CPS Lane 2 - CON7 */ - phys = <&cp1_comphy2 0>; - phy-names = "cp1-usb3h0-comphy"; + phys = <&cp1_comphy2 0>, <&cp1_utmi0>; + phy-names = "cp1-usb3h0-comphy", "utmi"; + dr_mode = "host"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi index 64179a372ecf..3bd2182817fb 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi @@ -285,6 +285,25 @@ }; }; + CP11X_LABEL(utmi): utmi@580000 { + compatible = "marvell,cp110-utmi-phy"; + reg = <0x580000 0x2000>; + marvell,system-controller = <&CP11X_LABEL(syscon0)>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + CP11X_LABEL(utmi0): usb-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + + CP11X_LABEL(utmi1): usb-phy@1 { + reg = <1>; + #phy-cells = <0>; + }; + }; + CP11X_LABEL(usb3_0): usb@500000 { compatible = "marvell,armada-8k-xhci", "generic-xhci"; @@ -310,9 +329,11 @@ }; CP11X_LABEL(sata0): sata@540000 { - compatible = "marvell,armada-8k-ahci"; + compatible = "marvell,armada-8k-ahci", + "generic-ahci"; reg = <0x540000 0x30000>; dma-coherent; + interrupts = <107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&CP11X_LABEL(clk) 1 15>, <&CP11X_LABEL(clk) 1 16>; #address-cells = <1>; @@ -320,12 +341,10 @@ status = "disabled"; sata-port@0 { - interrupts = <109 IRQ_TYPE_LEVEL_HIGH>; reg = <0>; }; sata-port@1 { - interrupts = <107 IRQ_TYPE_LEVEL_HIGH>; reg = <1>; }; }; diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dts b/arch/arm64/boot/dts/marvell/cn9130-db.dts index 79020e6d2792..2c2af001619b 100644 --- a/arch/arm64/boot/dts/marvell/cn9130-db.dts +++ b/arch/arm64/boot/dts/marvell/cn9130-db.dts @@ -392,14 +392,22 @@ }; }; +&cp0_utmi { + status = "okay"; +}; + &cp0_usb3_0 { status = "okay"; usb-phy = <&cp0_usb3_0_phy0>; - phy-names = "usb"; + phys = <&cp0_utmi0>; + phy-names = "utmi"; + dr_mode = "host"; }; &cp0_usb3_1 { status = "okay"; usb-phy = <&cp0_usb3_0_phy1>; - phy-names = "usb"; + phys = <&cp0_utmi1>; + phy-names = "utmi"; + dr_mode = "host"; }; diff --git a/arch/arm64/boot/dts/marvell/cn9131-db.dts b/arch/arm64/boot/dts/marvell/cn9131-db.dts index 3c975f98b2a3..ba2d4e1da159 100644 --- a/arch/arm64/boot/dts/marvell/cn9131-db.dts +++ b/arch/arm64/boot/dts/marvell/cn9131-db.dts @@ -193,10 +193,15 @@ }; /* CON58 */ +&cp1_utmi { + status = "okay"; +}; + &cp1_usb3_1 { status = "okay"; usb-phy = <&cp1_usb3_0_phy0>; /* Generic PHY, providing serdes lanes */ - phys = <&cp1_comphy3 1>; - phy-names = "usb"; + phys = <&cp1_comphy3 1>, <&cp1_utmi1>; + phy-names = "usb", "utmi"; + dr_mode = "host"; }; diff --git a/arch/arm64/boot/dts/marvell/cn9132-db.dts b/arch/arm64/boot/dts/marvell/cn9132-db.dts index 4ef0df3097ca..81fba024b22d 100644 --- a/arch/arm64/boot/dts/marvell/cn9132-db.dts +++ b/arch/arm64/boot/dts/marvell/cn9132-db.dts @@ -205,17 +205,24 @@ }; }; +&cp2_utmi { + status = "okay"; +}; + &cp2_usb3_0 { status = "okay"; usb-phy = <&cp2_usb3_0_phy0>; + phys = <&cp2_utmi0>; phy-names = "usb"; + dr_mode = "host"; }; /* SLM-1521-V2, CON11 */ &cp2_usb3_1 { status = "okay"; usb-phy = <&cp2_usb3_0_phy1>; - phy-names = "usb"; /* Generic PHY, providing serdes lanes */ - phys = <&cp2_comphy3 1>; + phys = <&cp2_comphy3 1>, <&cp2_utmi1>; + phy-names = "usb", "utmi"; + dr_mode = "host"; }; diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile index deba27ab7657..a1c50adc98fa 100644 --- a/arch/arm64/boot/dts/mediatek/Makefile +++ b/arch/arm64/boot/dts/mediatek/Makefile @@ -13,7 +13,15 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana-rev7.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-damu.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-juniper-sku16.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kakadu.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku16.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku272.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku288.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku32.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku0.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku176.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-pumpkin.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-evb.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index db17d0a4ed57..a9cca9c146fd 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -805,7 +805,7 @@ ranges; status = "disabled"; - usb_host0: xhci@11270000 { + usb_host0: usb@11270000 { compatible = "mediatek,mt2712-xhci", "mediatek,mtk-xhci"; reg = <0 0x11270000 0 0x1000>; @@ -818,7 +818,7 @@ }; }; - u3phy0: usb-phy@11290000 { + u3phy0: t-phy@11290000 { compatible = "mediatek,mt2712-tphy", "mediatek,generic-tphy-v2"; #address-cells = <1>; @@ -869,7 +869,7 @@ ranges; status = "disabled"; - usb_host1: xhci@112c0000 { + usb_host1: usb@112c0000 { compatible = "mediatek,mt2712-xhci", "mediatek,mtk-xhci"; reg = <0 0x112c0000 0 0x1000>; @@ -882,7 +882,7 @@ }; }; - u3phy1: usb-phy@112e0000 { + u3phy1: t-phy@112e0000 { compatible = "mediatek,mt2712-tphy", "mediatek,generic-tphy-v2"; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index 08ad0ffb24df..f2dc850010f1 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -495,6 +495,16 @@ groups = "watchdog"; }; }; + + wmac_pins: wmac-pins { + mux { + function = "antsel"; + groups = "antsel0", "antsel1", "antsel2", "antsel3", + "antsel4", "antsel5", "antsel6", "antsel7", + "antsel8", "antsel9", "antsel12", "antsel13", + "antsel14", "antsel15", "antsel16", "antsel17"; + }; + }; }; &pwm { @@ -559,5 +569,7 @@ }; &wmac { + pinctrl-names = "default"; + pinctrl-0 = <&wmac_pins>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 7c6d871538a6..890a942ec608 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -742,8 +742,8 @@ status = "disabled"; }; - u3phy: usb-phy@1a0c4000 { - compatible = "mediatek,mt7622-u3phy", + u3phy: t-phy@1a0c4000 { + compatible = "mediatek,mt7622-tphy", "mediatek,generic-tphy-v1"; reg = <0 0x1a0c4000 0 0x700>; #address-cells = <2>; @@ -877,8 +877,9 @@ status = "disabled"; }; - sata_phy: sata-phy@1a243000 { - compatible = "mediatek,generic-tphy-v1"; + sata_phy: t-phy@1a243000 { + compatible = "mediatek,mt7622-tphy", + "mediatek,generic-tphy-v1"; #address-cells = <2>; #size-cells = <2>; ranges; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 6dffada2e66b..f6a1738dfbaa 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -294,7 +294,7 @@ &pwrap { /* Only MT8173 E1 needs USB power domain */ - power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>; + power-domains = <&spm MT8173_POWER_DOMAIN_USB>; pmic: mt6397 { compatible = "mediatek,mt6397"; @@ -516,10 +516,8 @@ extcon = <&extcon_usb>; dr_mode = "otg"; wakeup-source; - pinctrl-names = "default", "id_float", "id_ground"; + pinctrl-names = "default"; pinctrl-0 = <&usb_id_pins_float>; - pinctrl-1 = <&usb_id_pins_float>; - pinctrl-2 = <&usb_id_pins_ground>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 7fa870e4386a..003a5653c505 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -631,7 +631,7 @@ #mbox-cells = <2>; }; - mipi_tx0: mipi-dphy@10215000 { + mipi_tx0: dsi-phy@10215000 { compatible = "mediatek,mt8173-mipi-tx"; reg = <0 0x10215000 0 0x1000>; clocks = <&clk26m>; @@ -641,7 +641,7 @@ status = "disabled"; }; - mipi_tx1: mipi-dphy@10216000 { + mipi_tx1: dsi-phy@10216000 { compatible = "mediatek,mt8173-mipi-tx"; reg = <0 0x10216000 0 0x1000>; clocks = <&clk26m>; @@ -926,7 +926,7 @@ }; ssusb: usb@11271000 { - compatible = "mediatek,mt8173-mtu3"; + compatible = "mediatek,mt8173-mtu3", "mediatek,mtu3"; reg = <0 0x11271000 0 0x3000>, <0 0x11280700 0 0x0100>; reg-names = "mac", "ippc"; @@ -943,8 +943,9 @@ ranges; status = "disabled"; - usb_host: xhci@11270000 { - compatible = "mediatek,mt8173-xhci"; + usb_host: usb@11270000 { + compatible = "mediatek,mt8173-xhci", + "mediatek,mtk-xhci"; reg = <0 0x11270000 0 0x1000>; reg-names = "mac"; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>; @@ -955,7 +956,7 @@ }; }; - u3phy: usb-phy@11290000 { + u3phy: t-phy@11290000 { compatible = "mediatek,mt8173-u3phy"; reg = <0 0x11290000 0 0x800>; #address-cells = <2>; @@ -1235,7 +1236,7 @@ <&mmsys CLK_MM_DSI1_DIGITAL>, <&mipi_tx1>; clock-names = "engine", "digital", "hs"; - phy = <&mipi_tx1>; + phys = <&mipi_tx1>; phy-names = "dphy"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts index 3249c959f76f..edff1e03e6fe 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts @@ -352,6 +352,10 @@ }; }; +&mfg { + domain-supply = <&mt6358_vgpu_reg>; +}; + &spi0 { pinctrl-names = "default"; pinctrl-0 = <&spi_pins_0>; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts new file mode 100644 index 000000000000..42ba9c00866c --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + */ + +/dts-v1/; +#include "mt8183-kukui-jacuzzi.dtsi" + +/ { + model = "Google damu board"; + compatible = "google,damu", "mediatek,mt8183"; +}; + +&touchscreen { + status = "okay"; + + compatible = "hid-over-i2c"; + reg = <0x10>; + interrupt-parent = <&pio>; + interrupts = <155 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&touchscreen_pins>; + + post-power-on-delay-ms = <10>; + hid-descr-addr = <0x0001>; +}; + +&qca_wifi { + qcom,ath10k-calibration-variant = "GO_DAMU"; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts new file mode 100644 index 000000000000..36d2c3b3cadf --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + */ + +/dts-v1/; +#include "mt8183-kukui-jacuzzi-juniper.dtsi" + +/ { + model = "Google juniper sku16 board"; + compatible = "google,juniper-sku16", "google,juniper", "mediatek,mt8183"; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi new file mode 100644 index 000000000000..078bc765646f --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + */ + +/dts-v1/; +#include "mt8183-kukui-jacuzzi.dtsi" + +&i2c2 { + trackpad@2c { + compatible = "hid-over-i2c"; + reg = <0x2c>; + hid-descr-addr = <0x20>; + + pinctrl-names = "default"; + pinctrl-0 = <&trackpad_pins>; + + interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>; + + wakeup-source; + }; +}; + +&qca_wifi { + qcom,ath10k-calibration-variant = "GO_JUNIPER"; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi new file mode 100644 index 000000000000..4049dff8464b --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + */ + +#include "mt8183-kukui.dtsi" + +/ { + panel: panel { + compatible = "auo,b116xw03"; + power-supply = <&pp3300_panel>; + ddc-i2c-bus = <&i2c4>; + backlight = <&backlight_lcd0>; + + port { + panel_in: endpoint { + remote-endpoint = <&anx7625_out>; + }; + }; + }; + + pp1200_mipibrdg: pp1200-mipibrdg { + compatible = "regulator-fixed"; + regulator-name = "pp1200_mipibrdg"; + pinctrl-names = "default"; + pinctrl-0 = <&pp1200_mipibrdg_en>; + + enable-active-high; + regulator-boot-on; + + gpio = <&pio 54 GPIO_ACTIVE_HIGH>; + }; + + pp1800_mipibrdg: pp1800-mipibrdg { + compatible = "regulator-fixed"; + regulator-name = "pp1800_mipibrdg"; + pinctrl-names = "default"; + pinctrl-0 = <&pp1800_lcd_en>; + + enable-active-high; + regulator-boot-on; + + gpio = <&pio 36 GPIO_ACTIVE_HIGH>; + }; + + pp3300_panel: pp3300-panel { + compatible = "regulator-fixed"; + regulator-name = "pp3300_panel"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + pinctrl-names = "default"; + pinctrl-0 = <&pp3300_panel_pins>; + + enable-active-high; + regulator-boot-on; + + gpio = <&pio 35 GPIO_ACTIVE_HIGH>; + }; + + vddio_mipibrdg: vddio-mipibrdg { + compatible = "regulator-fixed"; + regulator-name = "vddio_mipibrdg"; + pinctrl-names = "default"; + pinctrl-0 = <&vddio_mipibrdg_en>; + + enable-active-high; + regulator-boot-on; + + gpio = <&pio 37 GPIO_ACTIVE_HIGH>; + }; + + volume_buttons: volume-buttons { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&volume_button_pins>; + + volume_down { + label = "Volume Down"; + linux,code = <KEY_VOLUMEDOWN>; + debounce-interval = <100>; + + gpios = <&pio 6 GPIO_ACTIVE_LOW>; + }; + + volume_up { + label = "Volume Up"; + linux,code = <KEY_VOLUMEUP>; + debounce-interval = <100>; + + gpios = <&pio 5 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&dsi0 { + status = "okay"; + /delete-node/panel@0; + ports { + port { + dsi_out: endpoint { + remote-endpoint = <&anx7625_in>; + }; + }; + }; +}; + +&i2c0 { + status = "okay"; + + touchscreen: touchscreen@10 { + compatible = "elan,ekth3500"; + reg = <0x10>; + + pinctrl-names = "default"; + pinctrl-0 = <&touchscreen_pins>; + + interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>; + + reset-gpios = <&pio 156 GPIO_ACTIVE_LOW>; + }; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + status = "okay"; + clock-frequency = <400000>; + + trackpad@15 { + compatible = "elan,ekth3000"; + reg = <0x15>; + + pinctrl-names = "default"; + pinctrl-0 = <&trackpad_pins>; + + interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>; + + wakeup-source; + }; +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_pins>; + status = "okay"; + clock-frequency = <100000>; + + anx_bridge: anx7625@58 { + compatible = "analogix,anx7625"; + reg = <0x58>; + pinctrl-names = "default"; + pinctrl-0 = <&anx7625_pins>; + panel_flags = <1>; + enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>; + vdd10-supply = <&pp1200_mipibrdg>; + vdd18-supply = <&pp1800_mipibrdg>; + vdd33-supply = <&vddio_mipibrdg>; + + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + + anx7625_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + + port@1 { + reg = <1>; + + anx7625_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; + }; +}; + +&i2c_tunnel { + google,remote-bus = <2>; +}; + +&pio { + /* 192 lines */ + gpio-line-names = + "SPI_AP_EC_CS_L", + "SPI_AP_EC_MOSI", + "SPI_AP_EC_CLK", + "I2S3_DO", + "USB_PD_INT_ODL", + "", + "", + "", + "", + "IT6505_HPD_L", + "I2S3_TDM_D3", + "SOC_I2C6_1V8_SCL", + "SOC_I2C6_1V8_SDA", + "DPI_D0", + "DPI_D1", + "DPI_D2", + "DPI_D3", + "DPI_D4", + "DPI_D5", + "DPI_D6", + "DPI_D7", + "DPI_D8", + "DPI_D9", + "DPI_D10", + "DPI_D11", + "DPI_HSYNC", + "DPI_VSYNC", + "DPI_DE", + "DPI_CK", + "AP_MSDC1_CLK", + "AP_MSDC1_DAT3", + "AP_MSDC1_CMD", + "AP_MSDC1_DAT0", + "AP_MSDC1_DAT2", + "AP_MSDC1_DAT1", + "", + "", + "", + "", + "", + "", + "OTG_EN", + "DRVBUS", + "DISP_PWM", + "DSI_TE", + "LCM_RST_1V8", + "AP_CTS_WIFI_RTS", + "AP_RTS_WIFI_CTS", + "SOC_I2C5_1V8_SCL", + "SOC_I2C5_1V8_SDA", + "SOC_I2C3_1V8_SCL", + "SOC_I2C3_1V8_SDA", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "SOC_I2C1_1V8_SDA", + "SOC_I2C0_1V8_SDA", + "SOC_I2C0_1V8_SCL", + "SOC_I2C1_1V8_SCL", + "AP_SPI_H1_MISO", + "AP_SPI_H1_CS_L", + "AP_SPI_H1_MOSI", + "AP_SPI_H1_CLK", + "I2S5_BCK", + "I2S5_LRCK", + "I2S5_DO", + "BOOTBLOCK_EN_L", + "MT8183_KPCOL0", + "SPI_AP_EC_MISO", + "UART_DBG_TX_AP_RX", + "UART_AP_TX_DBG_RX", + "I2S2_MCK", + "I2S2_BCK", + "CLK_5M_WCAM", + "CLK_2M_UCAM", + "I2S2_LRCK", + "I2S2_DI", + "SOC_I2C2_1V8_SCL", + "SOC_I2C2_1V8_SDA", + "SOC_I2C4_1V8_SCL", + "SOC_I2C4_1V8_SDA", + "", + "SCL8", + "SDA8", + "FCAM_PWDN_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "", + "", + "", + "", + "", + "", + /* + * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics + * call it BIOS_FLASH_WP_R_L. + */ + "AP_FLASH_WP_L", + "EC_AP_INT_ODL", + "IT6505_INT_ODL", + "H1_INT_OD_L", + "", + "", + "", + "", + "", + "", + "", + "AP_SPI_FLASH_MISO", + "AP_SPI_FLASH_CS_L", + "AP_SPI_FLASH_MOSI", + "AP_SPI_FLASH_CLK", + "DA7219_IRQ", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ""; + + pp1200_mipibrdg_en: pp1200-mipibrdg-en { + pins1 { + pinmux = <PINMUX_GPIO54__FUNC_GPIO54>; + output-low; + }; + }; + + pp1800_lcd_en: pp1800-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO36__FUNC_GPIO36>; + output-low; + }; + }; + + pp3300_panel_pins: pp3300-panel-pins { + panel_3v3_enable: panel-3v3-enable { + pinmux = <PINMUX_GPIO35__FUNC_GPIO35>; + output-low; + }; + }; + + ppvarp_lcd_en: ppvarp-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO66__FUNC_GPIO66>; + output-low; + }; + }; + + ppvarn_lcd_en: ppvarn-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO166__FUNC_GPIO166>; + output-low; + }; + }; + + anx7625_pins: anx7625-pins { + pins1 { + pinmux = <PINMUX_GPIO45__FUNC_GPIO45>, + <PINMUX_GPIO73__FUNC_GPIO73>; + output-low; + }; + pins2 { + pinmux = <PINMUX_GPIO4__FUNC_GPIO4>; + input-enable; + bias-pull-up; + }; + }; + + touchscreen_pins: touchscreen-pins { + touch_int_odl { + pinmux = <PINMUX_GPIO155__FUNC_GPIO155>; + input-enable; + bias-pull-up; + }; + + touch_rst_l { + pinmux = <PINMUX_GPIO156__FUNC_GPIO156>; + output-high; + }; + }; + + trackpad_pins: trackpad-pins { + trackpad_int { + pinmux = <PINMUX_GPIO7__FUNC_GPIO7>; + input-enable; + bias-disable; /* pulled externally */ + }; + }; + + vddio_mipibrdg_en: vddio-mipibrdg-en { + pins1 { + pinmux = <PINMUX_GPIO37__FUNC_GPIO37>; + output-low; + }; + }; + + volume_button_pins: volume-button-pins { + voldn-btn-odl { + pinmux = <PINMUX_GPIO6__FUNC_GPIO6>; + input-enable; + bias-pull-up; + }; + + volup-btn-odl { + pinmux = <PINMUX_GPIO5__FUNC_GPIO5>; + input-enable; + bias-pull-up; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts new file mode 100644 index 000000000000..20eb0dc68f09 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2020 Google LLC + */ + +/dts-v1/; +#include "mt8183-kukui-kakadu.dtsi" + +/ { + model = "MediaTek kakadu board"; + compatible = "google,kakadu-rev3", "google,kakadu-rev2", + "google,kakadu", "mediatek,mt8183"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi new file mode 100644 index 000000000000..b442e38a3156 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2020 Google LLC + */ + +#include "mt8183-kukui.dtsi" +#include <dt-bindings/input/gpio-keys.h> + +/ { + ppvarn_lcd: ppvarn-lcd { + compatible = "regulator-fixed"; + regulator-name = "ppvarn_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&ppvarn_lcd_en>; + + enable-active-high; + + gpio = <&pio 66 GPIO_ACTIVE_HIGH>; + }; + + ppvarp_lcd: ppvarp-lcd { + compatible = "regulator-fixed"; + regulator-name = "ppvarp_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&ppvarp_lcd_en>; + + enable-active-high; + + gpio = <&pio 166 GPIO_ACTIVE_HIGH>; + }; + + pp1800_lcd: pp1800-lcd { + compatible = "regulator-fixed"; + regulator-name = "pp1800_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&pp1800_lcd_en>; + + enable-active-high; + + gpio = <&pio 36 GPIO_ACTIVE_HIGH>; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&pen_eject>; + + pen-insert { + label = "Pen Insert"; + /* Insert = low, eject = high */ + gpios = <&pio 6 GPIO_ACTIVE_LOW>; + linux,code = <SW_PEN_INSERTED>; + linux,input-type = <EV_SW>; + wakeup-event-action = <EV_ACT_DEASSERTED>; + wakeup-source; + }; + }; +}; + +&bluetooth { + firmware-name = "nvm_00440302_i2s_eu.bin"; +}; + +&i2c0 { + status = "okay"; + + touchscreen: touchscreen@10 { + compatible = "hid-over-i2c"; + reg = <0x10>; + pinctrl-names = "default"; + pinctrl-0 = <&open_touch>; + + interrupt-parent = <&pio>; + interrupts = <155 IRQ_TYPE_EDGE_FALLING>; + + post-power-on-delay-ms = <10>; + hid-descr-addr = <0x0001>; + }; +}; + +&mt6358_vcama2_reg { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + status = "okay"; + clock-frequency = <400000>; + + eeprom@58 { + compatible = "atmel,24c32"; + reg = <0x58>; + pagesize = <32>; + }; +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_pins>; + status = "okay"; + clock-frequency = <400000>; + + eeprom@54 { + compatible = "atmel,24c32"; + reg = <0x54>; + pagesize = <32>; + }; +}; + +&mipi_tx0 { + drive-strength-microamp = <5800>; +}; + +&pio { + /* 192 lines */ + gpio-line-names = + "SPI_AP_EC_CS_L", + "SPI_AP_EC_MOSI", + "SPI_AP_EC_CLK", + "I2S3_DO", + "USB_PD_INT_ODL", + "", + "", + "", + "", + "IT6505_HPD_L", + "I2S3_TDM_D3", + "SOC_I2C6_1V8_SCL", + "SOC_I2C6_1V8_SDA", + "DPI_D0", + "DPI_D1", + "DPI_D2", + "DPI_D3", + "DPI_D4", + "DPI_D5", + "DPI_D6", + "DPI_D7", + "DPI_D8", + "DPI_D9", + "DPI_D10", + "DPI_D11", + "DPI_HSYNC", + "DPI_VSYNC", + "DPI_DE", + "DPI_CK", + "AP_MSDC1_CLK", + "AP_MSDC1_DAT3", + "AP_MSDC1_CMD", + "AP_MSDC1_DAT0", + "AP_MSDC1_DAT2", + "AP_MSDC1_DAT1", + "", + "", + "", + "", + "", + "", + "OTG_EN", + "DRVBUS", + "DISP_PWM", + "DSI_TE", + "LCM_RST_1V8", + "AP_CTS_WIFI_RTS", + "AP_RTS_WIFI_CTS", + "SOC_I2C5_1V8_SCL", + "SOC_I2C5_1V8_SDA", + "SOC_I2C3_1V8_SCL", + "SOC_I2C3_1V8_SDA", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "SOC_I2C1_1V8_SDA", + "SOC_I2C0_1V8_SDA", + "SOC_I2C0_1V8_SCL", + "SOC_I2C1_1V8_SCL", + "AP_SPI_H1_MISO", + "AP_SPI_H1_CS_L", + "AP_SPI_H1_MOSI", + "AP_SPI_H1_CLK", + "I2S5_BCK", + "I2S5_LRCK", + "I2S5_DO", + "BOOTBLOCK_EN_L", + "MT8183_KPCOL0", + "SPI_AP_EC_MISO", + "UART_DBG_TX_AP_RX", + "UART_AP_TX_DBG_RX", + "I2S2_MCK", + "I2S2_BCK", + "CLK_5M_WCAM", + "CLK_2M_UCAM", + "I2S2_LRCK", + "I2S2_DI", + "SOC_I2C2_1V8_SCL", + "SOC_I2C2_1V8_SDA", + "SOC_I2C4_1V8_SCL", + "SOC_I2C4_1V8_SDA", + "", + "SCL8", + "SDA8", + "FCAM_PWDN_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "", + "", + "", + "", + "", + "", + /* + * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics + * call it BIOS_FLASH_WP_R_L. + */ + "AP_FLASH_WP_L", + "EC_AP_INT_ODL", + "IT6505_INT_ODL", + "H1_INT_OD_L", + "", + "", + "", + "", + "", + "", + "", + "AP_SPI_FLASH_MISO", + "AP_SPI_FLASH_CS_L", + "AP_SPI_FLASH_MOSI", + "AP_SPI_FLASH_CLK", + "DA7219_IRQ", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ""; + + ppvarp_lcd_en: ppvarp-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO66__FUNC_GPIO66>; + output-low; + }; + }; + + ppvarn_lcd_en: ppvarn-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO166__FUNC_GPIO166>; + output-low; + }; + }; + + pp1800_lcd_en: pp1800-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO36__FUNC_GPIO36>; + output-low; + }; + }; + + open_touch: open_touch { + irq_pin { + pinmux = <PINMUX_GPIO155__FUNC_GPIO155>; + input-enable; + bias-pull-up; + }; + + rst_pin { + pinmux = <PINMUX_GPIO156__FUNC_GPIO156>; + + /* + * The pen driver doesn't currently support driving + * this reset line. By specifying output-high here + * we're relying on the fact that this pin has a default + * pulldown at boot (which makes sure the pen was in + * reset if it was powered) and then we set it high here + * to take it out of reset. Better would be if the pen + * driver could control this and we could remove + * "output-high" here. + */ + output-high; + }; + }; + + pen_eject: peneject { + pen_eject { + pinmux = <PINMUX_GPIO6__FUNC_GPIO6>; + input-enable; + /* External pull-up. */ + bias-disable; + }; + }; +}; + +&qca_wifi { + qcom,ath10k-calibration-variant = "GO_KAKADU"; +}; + +&panel { + status = "okay"; + compatible = "boe,tv105wum-nw0"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts new file mode 100644 index 000000000000..e3dd75bdaea4 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + * + * SKU: 0x10 => 16 + * - bit 8: Camera: 0 (OV5695) + * - bits 7..4: Panel ID: 0x1 (AUO) + */ + +/dts-v1/; +#include "mt8183-kukui-kodama.dtsi" + +/ { + model = "MediaTek kodama sku16 board"; + compatible = "google,kodama-sku16", "google,kodama", "mediatek,mt8183"; +}; + +&panel { + status = "okay"; + compatible = "auo,b101uan08.3"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts new file mode 100644 index 000000000000..d81935ae07bc --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2020 Google LLC + * + * SKU: 0x110 => 272 + * - bit 8: Camera: 1 (GC5035) + * - bits 7..4: Panel ID: 0x1 (AUO) + */ + +/dts-v1/; +#include "mt8183-kukui-kodama.dtsi" + +/ { + model = "MediaTek kodama sku272 board"; + compatible = "google,kodama-sku272", "google,kodama", "mediatek,mt8183"; +}; + +&panel { + status = "okay"; + compatible = "auo,b101uan08.3"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts new file mode 100644 index 000000000000..f4082fbe0517 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2020 Google LLC + * + * SKU: 0x120 => 288 + * - bit 8: Camera: 1 (GC5035) + * - bits 7..4: Panel ID: 0x2 (BOE) + */ + +/dts-v1/; +#include "mt8183-kukui-kodama.dtsi" + +/ { + model = "MediaTek kodama sku288 board"; + compatible = "google,kodama-sku288", "google,kodama", "mediatek,mt8183"; +}; + +&panel { + status = "okay"; + compatible = "boe,tv101wum-n53"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts new file mode 100644 index 000000000000..7739358008ee --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + * + * SKU: 0x20 => 32 + * - bit 8: Camera: 0 (OV5695) + * - bits 7..4: Panel ID: 0x2 (BOE) + */ + +/dts-v1/; +#include "mt8183-kukui-kodama.dtsi" + +/ { + model = "MediaTek kodama sku32 board"; + compatible = "google,kodama-sku32", "google,kodama", "mediatek,mt8183"; +}; + +&panel { + status = "okay"; + compatible = "boe,tv101wum-n53"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi new file mode 100644 index 000000000000..2f5234a16ead --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2021 Google LLC + */ + +/dts-v1/; +#include "mt8183-kukui.dtsi" + +/ { + ppvarn_lcd: ppvarn-lcd { + compatible = "regulator-fixed"; + regulator-name = "ppvarn_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&ppvarn_lcd_en>; + + enable-active-high; + + gpio = <&pio 66 GPIO_ACTIVE_HIGH>; + }; + + ppvarp_lcd: ppvarp-lcd { + compatible = "regulator-fixed"; + regulator-name = "ppvarp_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&ppvarp_lcd_en>; + + enable-active-high; + + gpio = <&pio 166 GPIO_ACTIVE_HIGH>; + }; + + pp1800_lcd: pp1800-lcd { + compatible = "regulator-fixed"; + regulator-name = "pp1800_lcd"; + pinctrl-names = "default"; + pinctrl-0 = <&pp1800_lcd_en>; + + enable-active-high; + + gpio = <&pio 36 GPIO_ACTIVE_HIGH>; + }; +}; + +&i2c0 { + status = "okay"; + + touchscreen: touchscreen@10 { + compatible = "hid-over-i2c"; + reg = <0x10>; + interrupt-parent = <&pio>; + interrupts = <155 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&touch_default>; + + post-power-on-delay-ms = <10>; + hid-descr-addr = <0x0001>; + }; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + status = "okay"; + clock-frequency = <400000>; + + eeprom@58 { + compatible = "atmel,24c64"; + reg = <0x58>; + pagesize = <32>; + }; +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_pins>; + status = "okay"; + clock-frequency = <400000>; + + eeprom@54 { + compatible = "atmel,24c64"; + reg = <0x54>; + pagesize = <32>; + }; +}; + +&mt6358_vcama2_reg { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; +}; + +&pio { + /* 192 lines */ + gpio-line-names = + "SPI_AP_EC_CS_L", + "SPI_AP_EC_MOSI", + "SPI_AP_EC_CLK", + "I2S3_DO", + "USB_PD_INT_ODL", + "", + "", + "", + "", + "IT6505_HPD_L", + "I2S3_TDM_D3", + "SOC_I2C6_1V8_SCL", + "SOC_I2C6_1V8_SDA", + "DPI_D0", + "DPI_D1", + "DPI_D2", + "DPI_D3", + "DPI_D4", + "DPI_D5", + "DPI_D6", + "DPI_D7", + "DPI_D8", + "DPI_D9", + "DPI_D10", + "DPI_D11", + "DPI_HSYNC", + "DPI_VSYNC", + "DPI_DE", + "DPI_CK", + "AP_MSDC1_CLK", + "AP_MSDC1_DAT3", + "AP_MSDC1_CMD", + "AP_MSDC1_DAT0", + "AP_MSDC1_DAT2", + "AP_MSDC1_DAT1", + "", + "", + "", + "", + "", + "", + "OTG_EN", + "DRVBUS", + "DISP_PWM", + "DSI_TE", + "LCM_RST_1V8", + "AP_CTS_WIFI_RTS", + "AP_RTS_WIFI_CTS", + "SOC_I2C5_1V8_SCL", + "SOC_I2C5_1V8_SDA", + "SOC_I2C3_1V8_SCL", + "SOC_I2C3_1V8_SDA", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "SOC_I2C1_1V8_SDA", + "SOC_I2C0_1V8_SDA", + "SOC_I2C0_1V8_SCL", + "SOC_I2C1_1V8_SCL", + "AP_SPI_H1_MISO", + "AP_SPI_H1_CS_L", + "AP_SPI_H1_MOSI", + "AP_SPI_H1_CLK", + "I2S5_BCK", + "I2S5_LRCK", + "I2S5_DO", + "BOOTBLOCK_EN_L", + "MT8183_KPCOL0", + "SPI_AP_EC_MISO", + "UART_DBG_TX_AP_RX", + "UART_AP_TX_DBG_RX", + "I2S2_MCK", + "I2S2_BCK", + "CLK_5M_WCAM", + "CLK_2M_UCAM", + "I2S2_LRCK", + "I2S2_DI", + "SOC_I2C2_1V8_SCL", + "SOC_I2C2_1V8_SDA", + "SOC_I2C4_1V8_SCL", + "SOC_I2C4_1V8_SDA", + "", + "SCL8", + "SDA8", + "FCAM_PWDN_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "I2S_PMIC", + "", + "", + "", + "", + "", + "", + /* + * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics + * call it BIOS_FLASH_WP_R_L. + */ + "AP_FLASH_WP_L", + "EC_AP_INT_ODL", + "IT6505_INT_ODL", + "H1_INT_OD_L", + "", + "", + "", + "", + "", + "", + "", + "AP_SPI_FLASH_MISO", + "AP_SPI_FLASH_CS_L", + "AP_SPI_FLASH_MOSI", + "AP_SPI_FLASH_CLK", + "DA7219_IRQ", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ""; + + ppvarp_lcd_en: ppvarp-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO66__FUNC_GPIO66>; + output-low; + }; + }; + + ppvarn_lcd_en: ppvarn-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO166__FUNC_GPIO166>; + output-low; + }; + }; + + pp1800_lcd_en: pp1800-lcd-en { + pins1 { + pinmux = <PINMUX_GPIO36__FUNC_GPIO36>; + output-low; + }; + }; + + touch_default: touchdefault { + pin_irq { + pinmux = <PINMUX_GPIO155__FUNC_GPIO155>; + input-enable; + bias-pull-up; + }; + + touch_pin_reset: pin_reset { + pinmux = <PINMUX_GPIO156__FUNC_GPIO156>; + + /* + * The touchscreen driver doesn't currently support driving + * this reset line. By specifying output-high here + * we're relying on the fact that this pin has a default + * pulldown at boot (which makes sure the controller was in + * reset if it was powered) and then we set it high here + * to take it out of reset. Better would be if the touchscreen + * driver could control this and we could remove + * "output-high" here. + */ + output-high; + }; + }; +}; + +&qca_wifi { + qcom,ath10k-calibration-variant = "GO_KODAMA"; +}; + +&i2c_tunnel { + google,remote-bus = <2>; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts new file mode 100644 index 000000000000..0aff5eb52e88 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 BayLibre, SAS. + * Author: Fabien Parent <fparent@baylibre.com> + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include "mt8183.dtsi" +#include "mt6358.dtsi" + +/ { + model = "Pumpkin MT8183"; + compatible = "mediatek,mt8183-pumpkin", "mediatek,mt8183"; + + aliases { + serial0 = &uart0; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0 0x40000000 0 0x80000000>; + }; + + chosen { + stdout-path = "serial0:921600n8"; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + scp_mem_reserved: scp_mem_region@50000000 { + compatible = "shared-dma-pool"; + reg = <0 0x50000000 0 0x2900000>; + no-map; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-red { + label = "red"; + gpios = <&pio 155 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + led-green { + label = "green"; + gpios = <&pio 156 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + ntc { + compatible = "murata,ncp03wf104"; + pullup-uv = <1800000>; + pullup-ohm = <390000>; + pulldown-ohm = <0>; + io-channels = <&auxadc 0>; + }; +}; + +&auxadc { + status = "okay"; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_0>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_1>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_2>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c3 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_3>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_4>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c5 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c_pins_5>; + status = "okay"; + clock-frequency = <100000>; +}; + +&i2c6 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c6_pins>; + status = "okay"; + clock-frequency = <100000>; +}; + +&mmc0 { + status = "okay"; + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc0_pins_default>; + pinctrl-1 = <&mmc0_pins_uhs>; + bus-width = <8>; + max-frequency = <200000000>; + cap-mmc-highspeed; + mmc-hs200-1_8v; + mmc-hs400-1_8v; + cap-mmc-hw-reset; + no-sdio; + no-sd; + hs400-ds-delay = <0x12814>; + vmmc-supply = <&mt6358_vemc_reg>; + vqmmc-supply = <&mt6358_vio18_reg>; + assigned-clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>; + assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_CK>; + non-removable; +}; + +&mmc1 { + status = "okay"; + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_uhs>; + bus-width = <4>; + max-frequency = <200000000>; + cap-sd-highspeed; + sd-uhs-sdr50; + sd-uhs-sdr104; + cap-sdio-irq; + no-mmc; + no-sd; + vmmc-supply = <&mt6358_vmch_reg>; + vqmmc-supply = <&mt6358_vmc_reg>; + keep-power-in-suspend; + enable-sdio-wakeup; + non-removable; +}; + +&pio { + i2c_pins_0: i2c0 { + pins_i2c{ + pinmux = <PINMUX_GPIO82__FUNC_SDA0>, + <PINMUX_GPIO83__FUNC_SCL0>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c_pins_1: i2c1 { + pins_i2c{ + pinmux = <PINMUX_GPIO81__FUNC_SDA1>, + <PINMUX_GPIO84__FUNC_SCL1>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c_pins_2: i2c2 { + pins_i2c{ + pinmux = <PINMUX_GPIO103__FUNC_SCL2>, + <PINMUX_GPIO104__FUNC_SDA2>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c_pins_3: i2c3 { + pins_i2c{ + pinmux = <PINMUX_GPIO50__FUNC_SCL3>, + <PINMUX_GPIO51__FUNC_SDA3>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c_pins_4: i2c4 { + pins_i2c{ + pinmux = <PINMUX_GPIO105__FUNC_SCL4>, + <PINMUX_GPIO106__FUNC_SDA4>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c_pins_5: i2c5 { + pins_i2c{ + pinmux = <PINMUX_GPIO48__FUNC_SCL5>, + <PINMUX_GPIO49__FUNC_SDA5>; + mediatek,pull-up-adv = <3>; + mediatek,drive-strength-adv = <00>; + }; + }; + + i2c6_pins: i2c6 { + pins_cmd_dat { + pinmux = <PINMUX_GPIO113__FUNC_SCL6>, + <PINMUX_GPIO114__FUNC_SDA6>; + mediatek,pull-up-adv = <3>; + }; + }; + + mmc0_pins_default: mmc0-pins-default { + pins_cmd_dat { + pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>, + <PINMUX_GPIO128__FUNC_MSDC0_DAT1>, + <PINMUX_GPIO125__FUNC_MSDC0_DAT2>, + <PINMUX_GPIO132__FUNC_MSDC0_DAT3>, + <PINMUX_GPIO126__FUNC_MSDC0_DAT4>, + <PINMUX_GPIO129__FUNC_MSDC0_DAT5>, + <PINMUX_GPIO127__FUNC_MSDC0_DAT6>, + <PINMUX_GPIO130__FUNC_MSDC0_DAT7>, + <PINMUX_GPIO122__FUNC_MSDC0_CMD>; + input-enable; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-up-adv = <01>; + }; + + pins_clk { + pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-down-adv = <10>; + }; + + pins_rst { + pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-down-adv = <01>; + }; + }; + + mmc0_pins_uhs: mmc0-pins-uhs { + pins_cmd_dat { + pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>, + <PINMUX_GPIO128__FUNC_MSDC0_DAT1>, + <PINMUX_GPIO125__FUNC_MSDC0_DAT2>, + <PINMUX_GPIO132__FUNC_MSDC0_DAT3>, + <PINMUX_GPIO126__FUNC_MSDC0_DAT4>, + <PINMUX_GPIO129__FUNC_MSDC0_DAT5>, + <PINMUX_GPIO127__FUNC_MSDC0_DAT6>, + <PINMUX_GPIO130__FUNC_MSDC0_DAT7>, + <PINMUX_GPIO122__FUNC_MSDC0_CMD>; + input-enable; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-up-adv = <01>; + }; + + pins_clk { + pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-down-adv = <10>; + }; + + pins_ds { + pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-down-adv = <10>; + }; + + pins_rst { + pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>; + drive-strength = <MTK_DRIVE_14mA>; + mediatek,pull-up-adv = <01>; + }; + }; + + mmc1_pins_default: mmc1-pins-default { + pins_cmd_dat { + pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>, + <PINMUX_GPIO32__FUNC_MSDC1_DAT0>, + <PINMUX_GPIO34__FUNC_MSDC1_DAT1>, + <PINMUX_GPIO33__FUNC_MSDC1_DAT2>, + <PINMUX_GPIO30__FUNC_MSDC1_DAT3>; + input-enable; + mediatek,pull-up-adv = <10>; + }; + + pins_clk { + pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>; + input-enable; + mediatek,pull-down-adv = <10>; + }; + + pins_pmu { + pinmux = <PINMUX_GPIO178__FUNC_GPIO178>; + output-high; + }; + }; + + mmc1_pins_uhs: mmc1-pins-uhs { + pins_cmd_dat { + pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>, + <PINMUX_GPIO32__FUNC_MSDC1_DAT0>, + <PINMUX_GPIO34__FUNC_MSDC1_DAT1>, + <PINMUX_GPIO33__FUNC_MSDC1_DAT2>, + <PINMUX_GPIO30__FUNC_MSDC1_DAT3>; + drive-strength = <MTK_DRIVE_6mA>; + input-enable; + mediatek,pull-up-adv = <10>; + }; + + pins_clk { + pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>; + drive-strength = <MTK_DRIVE_8mA>; + mediatek,pull-down-adv = <10>; + input-enable; + }; + }; +}; + +&mfg { + domain-supply = <&mt6358_vgpu_reg>; +}; + +&cpu0 { + proc-supply = <&mt6358_vproc12_reg>; +}; + +&cpu1 { + proc-supply = <&mt6358_vproc12_reg>; +}; + +&cpu2 { + proc-supply = <&mt6358_vproc12_reg>; +}; + +&cpu3 { + proc-supply = <&mt6358_vproc12_reg>; +}; + +&cpu4 { + proc-supply = <&mt6358_vproc11_reg>; +}; + +&cpu5 { + proc-supply = <&mt6358_vproc11_reg>; +}; + +&cpu6 { + proc-supply = <&mt6358_vproc11_reg>; +}; + +&cpu7 { + proc-supply = <&mt6358_vproc11_reg>; +}; + +&uart0 { + status = "okay"; +}; + +&scp { + status = "okay"; +}; + +&dsi0 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 80519a145f13..c5e822b6b77a 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -13,6 +13,7 @@ #include <dt-bindings/power/mt8183-power.h> #include <dt-bindings/reset-controller/mt8183-resets.h> #include <dt-bindings/phy/phy.h> +#include <dt-bindings/thermal/thermal.h> #include "mt8183-pinfunc.h" / { @@ -657,6 +658,142 @@ status = "disabled"; }; + thermal: thermal@1100b000 { + #thermal-sensor-cells = <1>; + compatible = "mediatek,mt8183-thermal"; + reg = <0 0x1100b000 0 0x1000>; + clocks = <&infracfg CLK_INFRA_THERM>, + <&infracfg CLK_INFRA_AUXADC>; + clock-names = "therm", "auxadc"; + resets = <&infracfg MT8183_INFRACFG_AO_THERM_SW_RST>; + interrupts = <0 76 IRQ_TYPE_LEVEL_LOW>; + mediatek,auxadc = <&auxadc>; + mediatek,apmixedsys = <&apmixedsys>; + nvmem-cells = <&thermal_calibration>; + nvmem-cell-names = "calibration-data"; + }; + + thermal-zones { + cpu_thermal: cpu_thermal { + polling-delay-passive = <100>; + polling-delay = <500>; + thermal-sensors = <&thermal 0>; + sustainable-power = <5000>; + + trips { + threshold: trip-point0 { + temperature = <68000>; + hysteresis = <2000>; + type = "passive"; + }; + + target: trip-point1 { + temperature = <80000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_crit: cpu-crit { + temperature = <115000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&target>; + cooling-device = <&cpu0 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu1 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu2 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu3 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>; + contribution = <3072>; + }; + map1 { + trip = <&target>; + cooling-device = <&cpu4 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu5 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu6 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>, + <&cpu7 + THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>; + contribution = <1024>; + }; + }; + }; + + /* The tzts1 ~ tzts6 don't need to polling */ + /* The tzts1 ~ tzts6 don't need to thermal throttle */ + + tzts1: tzts1 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 1>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + + tzts2: tzts2 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 2>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + + tzts3: tzts3 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 3>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + + tzts4: tzts4 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 4>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + + tzts5: tzts5 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 5>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + + tztsABB: tztsABB { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&thermal 6>; + sustainable-power = <5000>; + trips {}; + cooling-maps {}; + }; + }; + pwm0: pwm@1100e000 { compatible = "mediatek,mt8183-disp-pwm"; reg = <0 0x1100e000 0 0x1000>; @@ -874,13 +1011,13 @@ clocks = <&infracfg CLK_INFRA_UNIPRO_SCK>, <&infracfg CLK_INFRA_USB>; clock-names = "sys_ck", "ref_ck"; - mediatek,syscon-wakeup = <&pericfg 0x400 0>; + mediatek,syscon-wakeup = <&pericfg 0x420 101>; #address-cells = <2>; #size-cells = <2>; ranges; status = "disabled"; - usb_host: xhci@11200000 { + usb_host: usb@11200000 { compatible = "mediatek,mt8183-xhci", "mediatek,mtk-xhci"; reg = <0 0x11200000 0 0x1000>; @@ -923,11 +1060,10 @@ status = "disabled"; }; - mipi_tx0: mipi-dphy@11e50000 { + mipi_tx0: dsi-phy@11e50000 { compatible = "mediatek,mt8183-mipi-tx"; reg = <0 0x11e50000 0 0x1000>; clocks = <&apmixedsys CLK_APMIXED_MIPID0_26M>; - clock-names = "ref_clk"; #clock-cells = <0>; #phy-cells = <0>; clock-output-names = "mipi_tx0_pll"; @@ -941,16 +1077,19 @@ reg = <0 0x11f10000 0 0x1000>; #address-cells = <1>; #size-cells = <1>; + thermal_calibration: calib@180 { + reg = <0x180 0xc>; + }; + mipi_tx_calibration: calib@190 { reg = <0x190 0xc>; }; }; - u3phy: usb-phy@11f40000 { + u3phy: t-phy@11f40000 { compatible = "mediatek,mt8183-tphy", "mediatek,generic-tphy-v2"; #address-cells = <1>; - #phy-cells = <1>; #size-cells = <1>; ranges = <0 0 0x11f40000 0x1000>; status = "okay"; @@ -964,7 +1103,7 @@ status = "okay"; }; - u3port0: usb-phy@0700 { + u3port0: usb-phy@700 { reg = <0x0700 0x900>; clocks = <&clk26m>; clock-names = "ref"; @@ -983,6 +1122,9 @@ compatible = "mediatek,mt8183-mmsys", "syscon"; reg = <0 0x14000000 0 0x1000>; #clock-cells = <1>; + mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>, + <&gce 1 CMDQ_THR_PRIO_HIGHEST>; + mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>; }; ovl0: ovl@14008000 { @@ -1058,6 +1200,7 @@ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>; power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; clocks = <&mmsys CLK_MM_DISP_CCORR0>; + mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>; }; aal0: aal@14010000 { @@ -1067,6 +1210,7 @@ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>; power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; clocks = <&mmsys CLK_MM_DISP_AAL0>; + mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>; }; gamma0: gamma@14011000 { @@ -1075,6 +1219,7 @@ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>; power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; clocks = <&mmsys CLK_MM_DISP_GAMMA0>; + mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>; }; dither0: dither@14012000 { @@ -1083,6 +1228,7 @@ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>; power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; clocks = <&mmsys CLK_MM_DISP_DITHER0>; + mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>; }; dsi0: dsi@14014000 { diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi index b80e95574bef..bbe5a1419eff 100644 --- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi @@ -480,7 +480,7 @@ }; usb0: usb@11100000 { - compatible = "mediatek,mtk-musb"; + compatible = "mediatek,mt8516-musb", "mediatek,mtk-musb"; reg = <0 0x11100000 0 0x1000>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>; interrupt-names = "mc"; @@ -493,7 +493,7 @@ }; usb1: usb@11190000 { - compatible = "mediatek,mtk-musb"; + compatible = "mediatek,mt8516-musb", "mediatek,mtk-musb"; reg = <0 0x11190000 0 0x1000>; interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_LOW>; interrupt-names = "mc"; @@ -506,8 +506,9 @@ status = "disabled"; }; - usb_phy: usb@11110000 { - compatible = "mediatek,generic-tphy-v1"; + usb_phy: t-phy@11110000 { + compatible = "mediatek,mt8516-tphy", + "mediatek,generic-tphy-v1"; reg = <0 0x11110000 0 0x800>; #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi index 63fd70086bb8..fcddec14738d 100644 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi @@ -56,7 +56,7 @@ tca6416: gpio@20 { compatible = "ti,tca6416"; reg = <0x20>; - reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&tca6416_pins>; @@ -188,6 +188,7 @@ &usb0 { status = "okay"; dr_mode = "peripheral"; + usb-role-switch; usb_con: connector { compatible = "usb-c-connector"; diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts index 9f5f5e1fa82e..683743f81849 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts @@ -10,7 +10,7 @@ model = "NVIDIA Jetson TX2 Developer Kit"; compatible = "nvidia,p2771-0000", "nvidia,tegra186"; - aconnect { + aconnect@2900000 { status = "okay"; dma-controller@2930000 { diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi index fd9177447711..fcd71bfc6707 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi @@ -23,7 +23,7 @@ }; chosen { - bootargs = "earlycon console=ttyS0,115200n8"; + bootargs = "earlycon console=ttyS0,115200n8 fw_devlink=on"; stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 02b26b39cedc..9f75bbf00cf7 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -73,7 +73,7 @@ snps,rxpbl = <8>; }; - aconnect { + aconnect@2900000 { compatible = "nvidia,tegra186-aconnect", "nvidia,tegra210-aconnect"; clocks = <&bpmp TEGRA186_CLK_APE>, diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts index 2888efc42ba1..d618f197a1d3 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts @@ -651,6 +651,8 @@ reg = <0x1a>; interrupt-parent = <&gpio>; interrupts = <TEGRA194_MAIN_GPIO(S, 5) GPIO_ACTIVE_HIGH>; + clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>; + clock-names = "mclk"; realtek,jd-src = <2>; sound-name-prefix = "CVB-RT"; @@ -658,7 +660,6 @@ rt5658_ep: endpoint { remote-endpoint = <&i2s1_dap_ep>; mclk-fs = <256>; - clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>; }; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi index 7da3d48cb410..14da4206ea66 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi @@ -5,6 +5,10 @@ model = "NVIDIA Jetson Xavier NX (SD-card)"; compatible = "nvidia,p3668-0000", "nvidia,tegra194"; + aliases { + mmc0 = "/bus@0/mmc@3400000"; + }; + bus@0 { /* SDMMC1 (SD/MMC) */ mmc@3400000 { diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi index b7808648cfe4..f5a9ebbfb12f 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi @@ -5,6 +5,10 @@ model = "NVIDIA Jetson Xavier NX (eMMC)"; compatible = "nvidia,p3668-0001", "nvidia,tegra194"; + aliases { + mmc0 = "/bus@0/mmc@3460000"; + }; + bus@0 { /* SDMMC4 (eMMC) */ mmc@3460000 { diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi index 4f12721c332b..f16b0aa8a374 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi @@ -14,7 +14,6 @@ i2c5 = "/bus@0/i2c@31c0000"; i2c6 = "/bus@0/i2c@c250000"; i2c7 = "/bus@0/i2c@31e0000"; - mmc0 = "/bus@0/mmc@3460000"; rtc0 = "/bpmp/i2c/pmic@3c"; rtc1 = "/bus@0/rtc@c2a0000"; serial0 = &tcu; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 549a7a2151d4..456502aeee49 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -27,10 +27,16 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8998-asus-novago-tp370ql.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8998-hp-envy-x2.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8998-lenovo-miix-630.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8998-mtp.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8998-oneplus-cheeseburger.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8998-oneplus-dumpling.dtb dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb dtb-$(CONFIG_ARCH_QCOM) += qrb5165-rb5.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1-lte.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2-lte.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r0.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1-kb.dtb @@ -38,8 +44,16 @@ dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1-lte.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3-kb.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3-lte.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen-nots.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen-nots-r4.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1-lte.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2-lte.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1.dtb dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1-lte.dtb +dtb-$(CONFIG_ARCH_QCOM) += sc7280-idp.dtb dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-ganges-kirin.dtb dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-nile-discovery.dtb dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-nile-pioneer.dtb @@ -59,4 +73,5 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8150-hdk.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8150-mtp.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8250-hdk.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8250-mtp.dtb +dtb-$(CONFIG_ARCH_QCOM) += sm8350-hdk.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8350-mtp.dtb diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts index 48bd1c2874de..f3c0dbfd0a23 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts @@ -9,5 +9,5 @@ / { model = "Qualcomm Technologies, Inc. APQ 8016 SBC"; - compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc"; + compatible = "qcom,apq8016-sbc", "qcom,apq8016"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 5353da521974..4f06c0a9c425 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -25,10 +25,10 @@ chosen { }; - memory { + memory@80000000 { device_type = "memory"; /* We expect the bootloader to fill in the reg */ - reg = <0 0 0 0>; + reg = <0 0x80000000 0 0>; }; reserved-memory { @@ -1766,7 +1766,9 @@ compatible = "qcom,msm-qgic2"; interrupt-controller; #interrupt-cells = <3>; - reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>; + reg = <0x0b000000 0x1000>, <0x0b002000 0x2000>, + <0x0b001000 0x1000>, <0x0b004000 0x2000>; + interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; }; apcs: mailbox@b011000 { diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index f49d442d2edf..f9f0b5aa6a26 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -149,10 +149,10 @@ }; }; - memory { + memory@80000000 { device_type = "memory"; /* We expect the bootloader to fill in the reg */ - reg = <0 0 0 0>; + reg = <0 0x80000000 0 0>; }; tcsr_mutex: hwlock { diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi index b500f24d47bc..125d7923d713 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi @@ -281,6 +281,10 @@ }; }; +&remoteproc_mss { + status = "okay"; +}; + &tlmm { gpio-reserved-ranges = <0 4>, <81 4>; diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi index c1ef0c71d5f5..a1d15eab8553 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi @@ -328,6 +328,10 @@ status = "okay"; }; +&remoteproc_mss { + status = "okay"; +}; + &remoteproc_slpi { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts new file mode 100644 index 000000000000..66b9297588ab --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * OnePlus 5 (cheeseburger) device tree + * + * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com> + */ + +#include <dt-bindings/leds/common.h> +#include "msm8998-oneplus-common.dtsi" + +/ { + model = "OnePlus 5"; + compatible = "oneplus,cheeseburger", "qcom,msm8998"; + /* Required for bootloader to select correct board */ + qcom,board-id = <8 0 16859 23>; + + /* Capacitive keypad button backlight */ + leds { + compatible = "gpio-leds"; + + pinctrl-names = "default"; + pinctrl-0 = <&button_backlight_default>; + + button-backlight { + gpios = <&pmi8998_gpio 5 GPIO_ACTIVE_HIGH>; + color = <LED_COLOR_ID_WHITE>; + function = LED_FUNCTION_KBD_BACKLIGHT; + default-state = "off"; + }; + }; +}; + +&pmi8998_gpio { + button_backlight_default: button-backlight-default { + pinconf { + pins = "gpio5"; + function = "normal"; + bias-pull-down; + qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi new file mode 100644 index 000000000000..0f5c7828a901 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * OnePlus 5(T) (cheeseburger / dumpling) common device tree source based on msm8998-mtp.dtsi + * + * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com> + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/pinctrl/qcom,pmic-gpio.h> +#include "msm8998.dtsi" +#include "pm8998.dtsi" +#include "pmi8998.dtsi" +#include "pm8005.dtsi" + +/ { + /* Required for bootloader to select correct board */ + qcom,msm-id = <292 0x20001>; /* 8998 v2.1 */ + + chosen { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* Use display framebuffer setup by the UEFI XBL bootloader for simplefb */ + framebuffer0: framebuffer@9d400000 { + compatible = "simple-framebuffer"; + reg = <0x0 0x9d400000 0x0 0x2400000>; + width = <1080>; + height = <1920>; + stride = <(1080 * 4)>; + format = "a8r8g8b8"; + }; + }; + + reserved-memory { + /* Bootloader display framebuffer region */ + cont_splash_mem: memory@9d400000 { + reg = <0x0 0x9d400000 0x0 0x2400000>; + no-map; + }; + + /* For getting crash logs using Android downstream kernels */ + ramoops@ac000000 { + compatible = "ramoops"; + reg = <0x0 0xac000000 0x0 0x200000>; + console-size = <0x80000>; + pmsg-size = <0x40000>; + record-size = <0x8000>; + ftrace-size = <0x20000>; + }; + + /* + * The following memory regions on downstream are "dynamically allocated" + * but given the same addresses every time. Hard code them as these addresses + * are where the OnePlus signed firmware expects them to be. + */ + ipa_fws_region: ipa@f6800000 { + compatible = "shared-dma-pool"; + reg = <0x0 0xf6800000 0x0 0x5000>; + no-map; + }; + zap_shader_region: gpu@f6900000 { + compatible = "shared-dma-pool"; + reg = <0x0 0xf6900000 0x0 0x2000>; + no-map; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + label = "Volume buttons"; + autorepeat; + + pinctrl-names = "default"; + pinctrl-0 = <&vol_keys_default>; + + vol-down { + label = "Volume down"; + gpios = <&pm8998_gpio 5 GPIO_ACTIVE_LOW>; + linux,code = <KEY_VOLUMEDOWN>; + debounce-interval = <15>; + wakeup-source; + }; + + vol-up { + label = "Volume up"; + gpios = <&pm8998_gpio 6 GPIO_ACTIVE_LOW>; + linux,code = <KEY_VOLUMEUP>; + debounce-interval = <15>; + wakeup-source; + }; + }; + + gpio-hall-sensor { + compatible = "gpio-keys"; + label = "Hall effect sensor"; + + pinctrl-names = "default"; + pinctrl-0 = <&hall_sensor_default>; + + hall-sensor { + label = "Hall Effect Sensor"; + gpios = <&tlmm 124 GPIO_ACTIVE_LOW>; + linux,input-type = <EV_SW>; + linux,code = <SW_LID>; + linux,can-disable; + wakeup-source; + }; + }; + + vph_pwr: vph-pwr-regulator { + compatible = "regulator-fixed"; + regulator-name = "vph_pwr"; + regulator-always-on; + regulator-boot-on; + }; +}; + +/* + * OnePlus' ADSP firmware requires 30 MiB in total, so increase the adsp_mem + * region by 4 MiB to account for this while relocating the other now + * conflicting memory nodes accordingly. + */ +&adsp_mem { + reg = <0x0 0x8b200000 0x0 0x1e00000>; +}; +&mpss_mem { + reg = <0x0 0x8d000000 0x0 0x7000000>; +}; +&venus_mem { + reg = <0x0 0x94000000 0x0 0x500000>; +}; +&mba_mem { + reg = <0x0 0x94500000 0x0 0x200000>; +}; +&slpi_mem { + reg = <0x0 0x94700000 0x0 0xf00000>; +}; +&ipa_fw_mem { + reg = <0x0 0x95600000 0x0 0x10000>; +}; +&ipa_gsi_mem { + reg = <0x0 0x95610000 0x0 0x5000>; +}; +&gpu_mem { + reg = <0x0 0x95615000 0x0 0x100000>; +}; +&wlan_msa_mem { + reg = <0x0 0x95715000 0x0 0x100000>; +}; + +&blsp1_i2c5 { + status = "okay"; + + touchscreen@20 { + compatible = "syna,rmi4-i2c"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + interrupt-parent = <&tlmm>; + interrupts = <125 IRQ_TYPE_EDGE_FALLING>; + + pinctrl-names = "default"; + pinctrl-0 = <&ts_int_active &ts_reset_active>; + + vdd-supply = <&vreg_l28_3p0>; + vio-supply = <&vreg_l6a_1p8>; + + syna,reset-delay-ms = <20>; + syna,startup-delay-ms = <20>; + + rmi4-f01@1 { + reg = <0x01>; + syna,nosleep-mode = <1>; + }; + + rmi4_f12: rmi4-f12@12 { + reg = <0x12>; + syna,rezero-wait-ms = <20>; + syna,sensor-type = <1>; + touchscreen-x-mm = <68>; + touchscreen-y-mm = <122>; + }; + }; +}; + +&blsp1_uart3 { + status = "okay"; + + bluetooth { + compatible = "qcom,wcn3990-bt"; + + vddio-supply = <&vreg_s4a_1p8>; + vddxo-supply = <&vreg_l7a_1p8>; + vddrf-supply = <&vreg_l17a_1p3>; + vddch0-supply = <&vreg_l25a_3p3>; + max-speed = <3200000>; + }; +}; + +&blsp1_uart3_on { + rx { + /delete-property/ bias-disable; + /* + * Configure a pull-up on 46 (RX). This is needed to + * avoid garbage data when the TX pin of the Bluetooth + * module is in tri-state (module powered off or not + * driving the signal yet). + */ + bias-pull-up; + }; + + cts { + /delete-property/ bias-disable; + /* + * Configure a pull-down on 47 (CTS) to match the pull + * of the Bluetooth module. + */ + bias-pull-down; + }; +}; + +&blsp2_uart1 { + status = "okay"; +}; + +&pm8005_lsid1 { + pm8005-regulators { + compatible = "qcom,pm8005-regulators"; + + vdd_s1-supply = <&vph_pwr>; + + pm8005_s1: s1 { /* VDD_GFX supply */ + regulator-min-microvolt = <524000>; + regulator-max-microvolt = <1100000>; + regulator-enable-ramp-delay = <500>; + + /* hack until we rig up the gpu consumer */ + regulator-always-on; + }; + }; +}; + +&pm8998_gpio { + vol_keys_default: vol-keys-default { + pinconf { + pins = "gpio5", "gpio6"; + function = "normal"; + bias-pull-up; + input-enable; + qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>; + }; + }; +}; + +&qusb2phy { + status = "okay"; + + vdda-pll-supply = <&vreg_l12a_1p8>; + vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; +}; + +&rpm_requests { + pm8998-regulators { + compatible = "qcom,rpm-pm8998-regulators"; + + vdd_s1-supply = <&vph_pwr>; + vdd_s2-supply = <&vph_pwr>; + vdd_s3-supply = <&vph_pwr>; + vdd_s4-supply = <&vph_pwr>; + vdd_s5-supply = <&vph_pwr>; + vdd_s6-supply = <&vph_pwr>; + vdd_s7-supply = <&vph_pwr>; + vdd_s8-supply = <&vph_pwr>; + vdd_s9-supply = <&vph_pwr>; + vdd_s10-supply = <&vph_pwr>; + vdd_s11-supply = <&vph_pwr>; + vdd_s12-supply = <&vph_pwr>; + vdd_s13-supply = <&vph_pwr>; + vdd_l1_l27-supply = <&vreg_s7a_1p025>; + vdd_l2_l8_l17-supply = <&vreg_s3a_1p35>; + vdd_l3_l11-supply = <&vreg_s7a_1p025>; + vdd_l4_l5-supply = <&vreg_s7a_1p025>; + vdd_l6-supply = <&vreg_s5a_2p04>; + vdd_l7_l12_l14_l15-supply = <&vreg_s5a_2p04>; + vdd_l9-supply = <&vreg_bob>; + vdd_l10_l23_l25-supply = <&vreg_bob>; + vdd_l13_l19_l21-supply = <&vreg_bob>; + vdd_l16_l28-supply = <&vreg_bob>; + vdd_l18_l22-supply = <&vreg_bob>; + vdd_l20_l24-supply = <&vreg_bob>; + vdd_l26-supply = <&vreg_s3a_1p35>; + vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>; + + vreg_s3a_1p35: s3 { + regulator-min-microvolt = <1352000>; + regulator-max-microvolt = <1352000>; + }; + vreg_s4a_1p8: s4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-allow-set-load; + }; + vreg_s5a_2p04: s5 { + regulator-min-microvolt = <1904000>; + regulator-max-microvolt = <2040000>; + }; + vreg_s7a_1p025: s7 { + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <1028000>; + }; + vreg_l1a_0p875: l1 { + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + }; + vreg_l2a_1p2: l2 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + vreg_l3a_1p0: l3 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + vreg_l5a_0p8: l5 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + }; + vreg_l6a_1p8: l6 { + regulator-min-microvolt = <1808000>; + regulator-max-microvolt = <1808000>; + }; + vreg_l7a_1p8: l7 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + vreg_l8a_1p2: l8 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + vreg_l9a_1p8: l9 { + regulator-min-microvolt = <1808000>; + regulator-max-microvolt = <2960000>; + }; + vreg_l10a_1p8: l10 { + regulator-min-microvolt = <1808000>; + regulator-max-microvolt = <2960000>; + }; + vreg_l11a_1p0: l11 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + vreg_l12a_1p8: l12 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + vreg_l13a_2p95: l13 { + regulator-min-microvolt = <1808000>; + regulator-max-microvolt = <2960000>; + }; + vreg_l14a_1p88: l14 { + regulator-min-microvolt = <1880000>; + regulator-max-microvolt = <1880000>; + }; + vreg_l15a_1p8: l15 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + vreg_l16a_2p7: l16 { + regulator-min-microvolt = <2704000>; + regulator-max-microvolt = <2704000>; + }; + vreg_l17a_1p3: l17 { + regulator-min-microvolt = <1304000>; + regulator-max-microvolt = <1304000>; + }; + vreg_l18a_2p7: l18 { + regulator-min-microvolt = <2704000>; + regulator-max-microvolt = <2704000>; + }; + vreg_l19a_3p0: l19 { + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3008000>; + }; + vreg_l20a_2p95: l20 { + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + regulator-allow-set-load; + }; + vreg_l21a_2p95: l21 { + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + regulator-allow-set-load; + regulator-system-load = <800000>; + }; + vreg_l22a_2p85: l22 { + regulator-min-microvolt = <2864000>; + regulator-max-microvolt = <2864000>; + }; + vreg_l23a_3p3: l23 { + regulator-min-microvolt = <3312000>; + regulator-max-microvolt = <3312000>; + }; + vreg_l24a_3p075: l24 { + regulator-min-microvolt = <3088000>; + regulator-max-microvolt = <3088000>; + }; + vreg_l25a_3p3: l25 { + regulator-min-microvolt = <3104000>; + regulator-max-microvolt = <3312000>; + }; + vreg_l26a_1p2: l26 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-allow-set-load; + }; + vreg_l28_3p0: l28 { + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3008000>; + }; + vreg_lvs1a_1p8: lvs1 { }; + vreg_lvs2a_1p8: lvs2 { }; + }; + + pmi8998-regulators { + compatible = "qcom,rpm-pmi8998-regulators"; + + vdd_bob-supply = <&vph_pwr>; + + vreg_bob: bob { + regulator-min-microvolt = <3312000>; + regulator-max-microvolt = <3600000>; + }; + }; +}; + +&tlmm { + gpio-reserved-ranges = <0 4>, <81 4>; + + hall_sensor_default: hall-sensor-default { + pins = "gpio124"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + input-enable; + }; + + ts_int_active: ts-int-active { + pins = "gpio125"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + + ts_reset_active: ts-reset-active { + pins = "gpio89"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; +}; + +&ufshc { + status = "okay"; + + vcc-supply = <&vreg_l20a_2p95>; + vccq-supply = <&vreg_l26a_1p2>; + vccq2-supply = <&vreg_s4a_1p8>; + vcc-max-microamp = <750000>; + vccq-max-microamp = <560000>; + vccq2-max-microamp = <750000>; +}; + +&ufsphy { + status = "okay"; + + vdda-phy-supply = <&vreg_l1a_0p875>; + vdda-pll-supply = <&vreg_l2a_1p2>; + vddp-ref-clk-supply = <&vreg_l26a_1p2>; + vdda-phy-max-microamp = <51400>; + vdda-pll-max-microamp = <14600>; + vddp-ref-clk-max-microamp = <100>; + vddp-ref-clk-always-on; +}; + +&usb3 { + status = "okay"; + + /* Disable USB3 clock requirement as the device only supports USB2 */ + qcom,select-utmi-as-pipe-clk; +}; + +&usb3_dwc3 { + /* Drop the unused USB 3 PHY */ + phys = <&qusb2phy>; + phy-names = "usb2-phy"; + + /* Fastest mode for USB 2 */ + maximum-speed = "high-speed"; + + /* Force to peripheral until we can switch modes */ + dr_mode = "peripheral"; +}; + +&wifi { + /* Leave disabled until MSS is functional */ + vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>; + vdd-1.8-xo-supply = <&vreg_l7a_1p8>; + vdd-1.3-rfa-supply = <&vreg_l17a_1p3>; + vdd-3.3-ch0-supply = <&vreg_l25a_3p3>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts b/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts new file mode 100644 index 000000000000..544b9b0ae44b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * OnePlus 5T (dumpling) device tree + * + * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com> + */ + +#include "msm8998-oneplus-common.dtsi" + +/ { + model = "OnePlus 5T"; + compatible = "oneplus,dumpling", "qcom,msm8998"; + /* Required for bootloader to select correct board */ + qcom,board-id = <8 0 17801 43>; +}; + +/* Update the screen height values from 1920 to 2160 on the 5T */ +&framebuffer0 { + height = <2160>; +}; + +/* Adjust digitizer area height to match the 5T's taller panel */ +&rmi4_f12 { + touchscreen-y-mm = <137>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 1f2e93aa6553..e9d3ce29937c 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -1398,6 +1398,8 @@ <&rpmpd MSM8998_VDDMX>; power-domain-names = "cx", "mx"; + status = "disabled"; + mba { memory-region = <&mba_mem>; }; diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi index bdc76d504b78..fa4ea7ded0ab 100644 --- a/arch/arm64/boot/dts/qcom/pm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi @@ -107,13 +107,11 @@ status = "disabled"; }; - pm8150_rtc: rtc@6000 { + rtc@6000 { compatible = "qcom,pm8941-rtc"; reg = <0x6000>; reg-names = "rtc", "alarm"; interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>; - - status = "disabled"; }; pm8150_gpios: gpio@c000 { diff --git a/arch/arm64/boot/dts/qcom/pm8350.dtsi b/arch/arm64/boot/dts/qcom/pm8350.dtsi new file mode 100644 index 000000000000..308f9ca7c744 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pm8350.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pm8350: pmic@1 { + compatible = "qcom,pm8350", "qcom,spmi-pmic"; + reg = <0x1 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pm8350_gpios: gpio@8800 { + compatible = "qcom,pm8350-gpio"; + reg = <0x8800>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm8350b.dtsi b/arch/arm64/boot/dts/qcom/pm8350b.dtsi new file mode 100644 index 000000000000..b23bb1d49a4d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pm8350b.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pm8350b: pmic@3 { + compatible = "qcom,pm8350b", "qcom,spmi-pmic"; + reg = <0x3 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pm8350b_gpios: gpio@8800 { + compatible = "qcom,pm8350b-gpio"; + reg = <0x8800>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm8350c.dtsi b/arch/arm64/boot/dts/qcom/pm8350c.dtsi new file mode 100644 index 000000000000..2b9b75ecec60 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pm8350c.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pm8350c: pmic@2 { + compatible = "qcom,pm8350c", "qcom,spmi-pmic"; + reg = <0x2 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pm8350c_gpios: gpio@8800 { + compatible = "qcom,pm8350c-gpio"; + reg = <0x8800>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi new file mode 100644 index 000000000000..1530b8ff270f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pmk8350: pmic@0 { + compatible = "qcom,pmk8350", "qcom,spmi-pmic"; + reg = <0x0 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pmk8350_gpios: gpio@b000 { + compatible = "qcom,pmk8350-gpio"; + reg = <0xb000>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pmr735a.dtsi b/arch/arm64/boot/dts/qcom/pmr735a.dtsi new file mode 100644 index 000000000000..1c675af13cbf --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pmr735a.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pmr735a: pmic@4 { + compatible = "qcom,pmr735a", "qcom,spmi-pmic"; + reg = <0x4 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pmr735a_gpios: gpio@8800 { + compatible = "qcom,pmr735a-gpio"; + reg = <0x8800>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pmr735b.dtsi b/arch/arm64/boot/dts/qcom/pmr735b.dtsi new file mode 100644 index 000000000000..1144086280f5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pmr735b.dtsi @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2021, Linaro Limited + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/spmi/spmi.h> + +&spmi_bus { + pmr735b: pmic@5 { + compatible = "qcom,pmr735b", "qcom,spmi-pmic"; + reg = <0x5 SPMI_USID>; + #address-cells = <1>; + #size-cells = <0>; + + pmr735b_gpios: gpio@8800 { + compatible = "qcom,pmr735b-gpio"; + reg = <0x8800>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 2f0528d01299..5f41de20aa22 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -811,10 +811,6 @@ }; }; -&pm8150_rtc { - status = "okay"; -}; - &qupv3_id_0 { status = "okay"; }; @@ -952,6 +948,9 @@ /* CAN */ &spi0 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs_gpio>; + cs-gpios = <&tlmm 31 GPIO_ACTIVE_LOW>; can@0 { compatible = "microchip,mcp2518fd"; @@ -1352,3 +1351,14 @@ vdd-micb-supply = <&vreg_s4a_1p8>; qcom,dmic-sample-rate = <600000>; }; + +/* PINCTRL - additions to nodes defined in sm8250.dtsi */ +&qup_spi0_cs_gpio { + drive-strength = <6>; + bias-disable; +}; + +&qup_spi0_data_clk { + drive-strength = <6>; + bias-disable; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts new file mode 100644 index 000000000000..533c048903ea --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google CoachZ board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180-trogdor-coachz-r1.dts" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google CoachZ (rev1) with LTE"; + compatible = "google,coachz-rev1-sku0", "qcom,sc7180"; +}; + +&cros_ec_proximity { + label = "proximity-wifi-lte"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts new file mode 100644 index 000000000000..1b1dbdb2a82f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google CoachZ board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-coachz.dtsi" + +/ { + model = "Google CoachZ (rev1)"; + compatible = "google,coachz-rev1", "qcom,sc7180"; +}; + +&tlmm { + gpio-line-names = "HUB_RST_L", + "AP_RAM_ID0", + "AP_SKU_ID2", + "AP_RAM_ID1", + "FP_TO_AP_IRQ_L", + "AP_RAM_ID2", + "UF_CAM_EN", + "WF_CAM_EN", + "TS_RESET_L", + "TS_INT_L", + "FPMCU_BOOT0", + "EDP_BRIJ_IRQ", + "AP_EDP_BKLTEN", + "UF_CAM_MCLK", + "WF_CAM_CLK", + "EDP_BRIJ_I2C_SDA", + "EDP_BRIJ_I2C_SCL", + "UF_CAM_SDA", + "UF_CAM_SCL", + "WF_CAM_SDA", + "WF_CAM_SCL", + "WLC_IRQ", + "FP_RST_L", + "AMP_EN", + "WLC_NRST", + "AP_SAR_SENSOR_SDA", + "AP_SAR_SENSOR_SCL", + "", + "", + "WF_CAM_RST_L", + "UF_CAM_RST_L", + "AP_BRD_ID2", + "BRIJ_SUSPEND", + "AP_BRD_ID0", + "AP_H1_SPI_MISO", + "AP_H1_SPI_MOSI", + "AP_H1_SPI_CLK", + "AP_H1_SPI_CS_L", + "", + "", + "", + "", + "H1_AP_INT_ODL", + "", + "UART_AP_TX_DBG_RX", + "UART_DBG_TX_AP_RX", + "", + "", + "FORCED_USB_BOOT", + "AMP_BCLK", + "AMP_LRCLK", + "AMP_DIN", + "EN_PP3300_DX_EDP", + "HP_BCLK", + "HP_LRCLK", + "HP_DOUT", + "HP_DIN", + "HP_MCLK", + "AP_SKU_ID0", + "AP_EC_SPI_MISO", + "AP_EC_SPI_MOSI", + "AP_EC_SPI_CLK", + "AP_EC_SPI_CS_L", + "AP_SPI_CLK", + "AP_SPI_MOSI", + "AP_SPI_MISO", + /* + * AP_FLASH_WP_L is crossystem ABI. Schematics + * call it BIOS_FLASH_WP_L. + */ + "AP_FLASH_WP_L", + "", + "AP_SPI_CS0_L", + "SD_CD_ODL", + "", + "", + "", + "", + "FPMCU_SEL", + "UIM2_DATA", + "UIM2_CLK", + "UIM2_RST", + "UIM2_PRESENT_L", + "UIM1_DATA", + "UIM1_CLK", + "UIM1_RST", + "", + "DMIC_CLK_EN", + "HUB_EN", + "", + "AP_SPI_FP_MISO", + "AP_SPI_FP_MOSI", + "AP_SPI_FP_CLK", + "AP_SPI_FP_CS_L", + "AP_SKU_ID1", + "AP_RST_REQ", + "", + "AP_BRD_ID1", + "AP_EC_INT_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "EDP_BRIJ_EN", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "AP_TS_PEN_I2C_SDA", + "AP_TS_PEN_I2C_SCL", + "DP_HOT_PLUG_DET", + "EC_IN_RW_ODL"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts new file mode 100644 index 000000000000..6e7745801fae --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google CoachZ board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180-trogdor-coachz-r2.dts" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google CoachZ (rev2+) with LTE"; + compatible = "google,coachz-sku0", "qcom,sc7180"; +}; + +&cros_ec_proximity { + label = "proximity-wifi-lte"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts new file mode 100644 index 000000000000..4f69b6ba299f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google CoachZ board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-coachz.dtsi" + +/ { + model = "Google CoachZ (rev2+)"; + compatible = "google,coachz", "qcom,sc7180"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi new file mode 100644 index 000000000000..4c6e433c8226 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google CoachZ board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180.dtsi" + +ap_ec_spi: &spi6 {}; +ap_h1_spi: &spi0 {}; + +#include "sc7180-trogdor.dtsi" + +/* Deleted nodes from trogdor.dtsi */ + +/delete-node/ &alc5682; +/delete-node/ &pp3300_codec; + +/ { + /* BOARD-SPECIFIC TOP LEVEL NODES */ + + adau7002: audio-codec-1 { + compatible = "adi,adau7002"; + IOVDD-supply = <&pp1800_l15a>; + #sound-dai-cells = <0>; + }; +}; + +&ap_spi_fp { + status = "okay"; +}; + +&backlight { + pwms = <&cros_ec_pwm 0>; +}; + +&camcc { + status = "okay"; +}; + +&cros_ec { + cros_ec_proximity: proximity { + compatible = "google,cros-ec-mkbp-proximity"; + label = "proximity-wifi"; + }; +}; + +ap_ts_pen_1v8: &i2c4 { + status = "okay"; + clock-frequency = <400000>; + + ap_ts: touchscreen@5d { + compatible = "goodix,gt7375p"; + reg = <0x5d>; + pinctrl-names = "default"; + pinctrl-0 = <&ts_int_l>, <&ts_reset_l>; + + interrupt-parent = <&tlmm>; + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; + + reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>; + + vdd-supply = <&pp3300_ts>; + }; +}; + +&i2c7 { + status = "disabled"; +}; + +&i2c9 { + status = "disabled"; +}; + +&panel { + compatible = "boe,nv110wtm-n61"; +}; + +&pp3300_dx_edp { + gpio = <&tlmm 67 GPIO_ACTIVE_HIGH>; +}; + +&sdhc_2 { + status = "okay"; +}; + +&sn65dsi86_out { + data-lanes = <0 1 2 3>; +}; + +&sound { + compatible = "google,sc7180-coachz"; + model = "sc7180-adau7002-max98357a"; + audio-routing = "PDM_DAT", "DMIC"; + + pinctrl-names = "default"; + pinctrl-0 = <&dmic_clk_en>; +}; + +&sound_multimedia0_codec { + sound-dai = <&adau7002>; +}; + +/* PINCTRL - modifications to sc7180-trogdor.dtsi */ + +&en_pp3300_dx_edp { + pinmux { + pins = "gpio67"; + }; + + pinconf { + pins = "gpio67"; + }; +}; + +&ts_reset_l { + pinconf { + /* + * We want reset state by default and it will be up to the + * driver to disable this when it's ready. + */ + output-low; + }; +}; + +/* PINCTRL - board-specific pinctrl */ + +&tlmm { + gpio-line-names = "HUB_RST_L", + "AP_RAM_ID0", + "AP_SKU_ID2", + "AP_RAM_ID1", + "FP_TO_AP_IRQ_L", + "AP_RAM_ID2", + "UF_CAM_EN", + "WF_CAM_EN", + "TS_RESET_L", + "TS_INT_L", + "FPMCU_BOOT0", + "EDP_BRIJ_IRQ", + "AP_EDP_BKLTEN", + "UF_CAM_MCLK", + "WF_CAM_CLK", + "EDP_BRIJ_I2C_SDA", + "EDP_BRIJ_I2C_SCL", + "UF_CAM_SDA", + "UF_CAM_SCL", + "WF_CAM_SDA", + "WF_CAM_SCL", + "WLC_IRQ", + "FP_RST_L", + "AMP_EN", + "WLC_NRST", + "AP_SAR_SENSOR_SDA", + "AP_SAR_SENSOR_SCL", + "", + "", + "WF_CAM_RST_L", + "UF_CAM_RST_L", + "AP_BRD_ID2", + "BRIJ_SUSPEND", + "AP_BRD_ID0", + "AP_H1_SPI_MISO", + "AP_H1_SPI_MOSI", + "AP_H1_SPI_CLK", + "AP_H1_SPI_CS_L", + "", + "", + "", + "", + "H1_AP_INT_ODL", + "", + "UART_AP_TX_DBG_RX", + "UART_DBG_TX_AP_RX", + "", + "", + "FORCED_USB_BOOT", + "AMP_BCLK", + "AMP_LRCLK", + "AMP_DIN", + "", + "HP_BCLK", + "HP_LRCLK", + "HP_DOUT", + "HP_DIN", + "HP_MCLK", + "AP_SKU_ID0", + "AP_EC_SPI_MISO", + "AP_EC_SPI_MOSI", + "AP_EC_SPI_CLK", + "AP_EC_SPI_CS_L", + "AP_SPI_CLK", + "AP_SPI_MOSI", + "AP_SPI_MISO", + /* + * AP_FLASH_WP_L is crossystem ABI. Schematics + * call it BIOS_FLASH_WP_L. + */ + "AP_FLASH_WP_L", + "EN_PP3300_DX_EDP", + "AP_SPI_CS0_L", + "SD_CD_ODL", + "", + "", + "", + "", + "EN_FP_RAILS", + "UIM2_DATA", + "UIM2_CLK", + "UIM2_RST", + "UIM2_PRESENT_L", + "UIM1_DATA", + "UIM1_CLK", + "UIM1_RST", + "", + "", + "HUB_EN", + "", + "AP_SPI_FP_MISO", + "AP_SPI_FP_MOSI", + "AP_SPI_FP_CLK", + "AP_SPI_FP_CS_L", + "AP_SKU_ID1", + "AP_RST_REQ", + "", + "AP_BRD_ID1", + "AP_EC_INT_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "EDP_BRIJ_EN", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "AP_TS_PEN_I2C_SDA", + "AP_TS_PEN_I2C_SCL", + "DP_HOT_PLUG_DET", + "EC_IN_RW_ODL"; + + dmic_clk_en: dmic_clk_en { + pinmux { + pins = "gpio83"; + function = "gpio"; + }; + + pinconf { + pins = "gpio83"; + drive-strength = <8>; + bias-pull-up; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts new file mode 100644 index 000000000000..6ebde0828550 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Lazor Limozeen board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180-trogdor-lazor-limozeen-nots.dts" + +/ { + model = "Google Lazor Limozeen without Touchscreen (rev4)"; + compatible = "google,lazor-rev4-sku5", "qcom,sc7180"; +}; + +/* + * rev4-sku5 was built with a different trackpad. + */ +/delete-node/&trackpad; +&ap_tp_i2c { + trackpad: trackpad@2c { + compatible = "hid-over-i2c"; + reg = <0x2c>; + pinctrl-names = "default"; + pinctrl-0 = <&tp_int_odl>; + + interrupt-parent = <&tlmm>; + interrupts = <58 IRQ_TYPE_EDGE_FALLING>; + + vcc-supply = <&pp3300_fp_tp>; + hid-descr-addr = <0x20>; + + wakeup-source; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts new file mode 100644 index 000000000000..0456c7e05d00 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Lazor Limozeen board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-lazor.dtsi" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google Lazor Limozeen without Touchscreen"; + compatible = "google,lazor-sku6", "google,lazor-sku5", "qcom,sc7180"; +}; + +/delete-node/&ap_ts; + +&panel { + compatible = "innolux,n116bca-ea1", "innolux,n116bge"; +}; + +&sdhc_2 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts new file mode 100644 index 000000000000..e6ad6dae4e60 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Lazor Limozeen board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-lazor.dtsi" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google Lazor Limozeen"; + compatible = "google,lazor-sku4", "qcom,sc7180"; +}; + +/delete-node/&ap_ts; + +&ap_ts_pen_1v8 { + ap_ts: touchscreen@10 { + compatible = "elan,ekth3500"; + reg = <0x10>; + pinctrl-names = "default"; + pinctrl-0 = <&ts_int_l>, <&ts_reset_l>; + + interrupt-parent = <&tlmm>; + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; + + vcc33-supply = <&pp3300_ts>; + + reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>; + }; +}; + +&panel { + compatible = "auo,b116xa01"; +}; + +&sdhc_2 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts index 30e3e769d2b4..5c997cd90069 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts @@ -14,6 +14,15 @@ compatible = "google,lazor-rev0", "qcom,sc7180"; }; +/* + * Lazor is stuffed with a 47k NTC as charger thermistor which currently is + * not supported by the PM6150 ADC driver. Disable the charger thermal zone + * to avoid using bogus temperature values. + */ +&charger_thermal { + status = "disabled"; +}; + &pp3300_hub { /* pp3300_l7c is used to power the USB hub */ /delete-property/regulator-always-on; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts index c2ef06367baf..d9fbcc7bc5bd 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts @@ -14,6 +14,15 @@ compatible = "google,lazor-rev1", "google,lazor-rev2", "qcom,sc7180"; }; +/* + * Lazor is stuffed with a 47k NTC as charger thermistor which currently is + * not supported by the PM6150 ADC driver. Disable the charger thermal zone + * to avoid using bogus temperature values. + */ +&charger_thermal { + status = "disabled"; +}; + &pp3300_hub { /* pp3300_l7c is used to power the USB hub */ /delete-property/regulator-always-on; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts index 6985beb97e53..dcb41afdc82a 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts @@ -5,7 +5,10 @@ * Copyright 2020 Google LLC. */ -#include "sc7180-trogdor-lazor-r3.dts" +/dts-v1/; + +#include "sc7180-trogdor-lazor.dtsi" +#include "sc7180-lite.dtsi" / { model = "Google Lazor (rev3+) with KB Backlight"; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts index 0881f8dd02c9..be44900602d7 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts @@ -5,7 +5,9 @@ * Copyright 2020 Google LLC. */ -#include "sc7180-trogdor-lazor-r3.dts" +/dts-v1/; + +#include "sc7180-trogdor-lazor.dtsi" #include "sc7180-trogdor-lte-sku.dtsi" / { diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts index 1b9d2f46359e..ea8c2ee09741 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts @@ -8,8 +8,18 @@ /dts-v1/; #include "sc7180-trogdor-lazor.dtsi" +#include "sc7180-lite.dtsi" / { model = "Google Lazor (rev3+)"; compatible = "google,lazor", "qcom,sc7180"; }; + +/* + * Lazor is stuffed with a 47k NTC as charger thermistor which currently is + * not supported by the PM6150 ADC driver. Disable the charger thermal zone + * to avoid using bogus temperature values. + */ +&charger_thermal { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi index 89e5cd29ec09..6b10b96173e8 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi @@ -12,23 +12,6 @@ ap_h1_spi: &spi0 {}; #include "sc7180-trogdor.dtsi" -/ { - panel: panel { - compatible = "boe,nv133fhm-n62"; - power-supply = <&pp3300_dx_edp>; - backlight = <&backlight>; - hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; - - ports { - port { - panel_in_edp: endpoint { - remote-endpoint = <&sn65dsi86_out>; - }; - }; - }; - }; -}; - &ap_sar_sensor { semtech,cs0-ground; semtech,combined-sensors = <3>; @@ -58,8 +41,30 @@ ap_ts_pen_1v8: &i2c4 { }; }; +&panel { + compatible = "boe,nv133fhm-n62"; +}; + +&trackpad { + interrupts = <58 IRQ_TYPE_EDGE_FALLING>; +}; + +&wifi { + qcom,ath10k-calibration-variant = "GO_LAZOR"; +}; + /* PINCTRL - modifications to sc7180-trogdor.dtsi */ +&trackpad_int_1v8_odl { + pinmux { + pins = "gpio58"; + }; + + pinconf { + pins = "gpio58"; + }; +}; + &ts_reset_l { pinconf { /* This pin is not connected on -rev0, pull up to park. */ diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts new file mode 100644 index 000000000000..0202f03eafe6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Pompom board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180-trogdor-pompom-r1.dts" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google Pompom (rev1) with LTE"; + compatible = "google,pompom-rev1-sku0", "qcom,sc7180"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts new file mode 100644 index 000000000000..e720e7bd0d70 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Pompom board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-pompom.dtsi" + +/ { + model = "Google Pompom (rev1)"; + compatible = "google,pompom-rev1", "qcom,sc7180"; +}; + +&pp3300_hub { + /* pp3300_l7c is used to power the USB hub */ + /delete-property/regulator-always-on; + /delete-property/regulator-boot-on; +}; + +&pp3300_l7c { + regulator-always-on; + regulator-boot-on; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts new file mode 100644 index 000000000000..791d496ad046 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Pompom board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180-trogdor-pompom-r2.dts" +#include "sc7180-trogdor-lte-sku.dtsi" + +/ { + model = "Google Pompom (rev2+) with LTE"; + compatible = "google,pompom-sku0", "qcom,sc7180"; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts new file mode 100644 index 000000000000..984d7337da78 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Pompom board device tree source + * + * Copyright 2020 Google LLC. + */ + +/dts-v1/; + +#include "sc7180-trogdor-pompom.dtsi" + +/ { + model = "Google Pompom (rev2+)"; + compatible = "google,pompom", "qcom,sc7180"; +}; + +&keyboard_controller { + function-row-physmap = < + MATRIX_KEY(0x00, 0x02, 0) /* T1 */ + MATRIX_KEY(0x03, 0x02, 0) /* T2 */ + MATRIX_KEY(0x02, 0x02, 0) /* T3 */ + MATRIX_KEY(0x01, 0x02, 0) /* T4 */ + MATRIX_KEY(0x03, 0x04, 0) /* T5 */ + MATRIX_KEY(0x02, 0x04, 0) /* T6 */ + MATRIX_KEY(0x01, 0x04, 0) /* T7 */ + MATRIX_KEY(0x02, 0x09, 0) /* T8 */ + MATRIX_KEY(0x01, 0x09, 0) /* T9 */ + MATRIX_KEY(0x00, 0x04, 0) /* T10 */ + >; + linux,keymap = < + MATRIX_KEY(0x00, 0x02, KEY_BACK) + MATRIX_KEY(0x03, 0x02, KEY_REFRESH) + MATRIX_KEY(0x02, 0x02, KEY_ZOOM) + MATRIX_KEY(0x01, 0x02, KEY_SCALE) + MATRIX_KEY(0x03, 0x04, KEY_SYSRQ) + MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN) + MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP) + MATRIX_KEY(0x02, 0x09, KEY_MUTE) + MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN) + MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP) + + CROS_STD_MAIN_KEYMAP + >; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi new file mode 100644 index 000000000000..622b5f1b88a2 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Google Pompom board device tree source + * + * Copyright 2020 Google LLC. + */ + +#include "sc7180.dtsi" + +ap_ec_spi: &spi6 {}; +ap_h1_spi: &spi0 {}; + +#include "sc7180-trogdor.dtsi" + +/ { + thermal-zones { + 5v-choke-thermal { + polling-delay-passive = <0>; + polling-delay = <250>; + + thermal-sensors = <&pm6150_adc_tm 1>; + + trips { + 5v-choke-crit { + temperature = <125000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + }; +}; + +&alc5682 { + realtek,dmic-clk-driving-high = "true"; +}; + +&cpu6_alert0 { + temperature = <60000>; +}; + +&cpu6_alert1 { + temperature = <65000>; +}; + +&cpu6_thermal { + sustainable-power = <948>; +}; + +&cpu7_alert0 { + temperature = <60000>; +}; + +&cpu7_alert1 { + temperature = <65000>; +}; + +&cpu7_thermal { + sustainable-power = <948>; +}; + +&cpu8_alert0 { + temperature = <60000>; +}; + +&cpu8_alert1 { + temperature = <65000>; +}; + +&cpu8_thermal { + sustainable-power = <948>; +}; + +&cpu9_alert0 { + temperature = <60000>; +}; + +&cpu9_alert1 { + temperature = <65000>; +}; + +&cpu9_thermal { + sustainable-power = <948>; +}; + +&gpio_keys { + status = "okay"; +}; + +ap_ts_pen_1v8: &i2c4 { + status = "okay"; + clock-frequency = <400000>; + + ap_ts: touchscreen@10 { + compatible = "hid-over-i2c"; + reg = <0x10>; + pinctrl-names = "default"; + pinctrl-0 = <&ts_int_l>, <&ts_reset_l>; + + interrupt-parent = <&tlmm>; + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; + + post-power-on-delay-ms = <20>; + hid-descr-addr = <0x0001>; + + vdd-supply = <&pp3300_ts>; + }; +}; + +&panel { + compatible = "kingdisplay,kd116n21-30nv-a010"; +}; + +&pen_insert { + /* Insert = high, eject = low */ + gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>; +}; + +&pm6150_adc { + 5v-choke-thermistor@4e { + reg = <ADC5_AMUX_THM2_100K_PU>; + qcom,ratiometric; + qcom,hw-settle-time = <200>; + }; +}; + +&pm6150_adc_tm { + status = "okay"; + + 5v-choke-thermistor@1 { + reg = <1>; + io-channels = <&pm6150_adc ADC5_AMUX_THM2_100K_PU>; + qcom,ratiometric; + qcom,hw-settle-time-us = <200>; + }; +}; + +&sdhc_2 { + status = "okay"; +}; + +&sound { + model = "sc7180-rt5682-max98357a-2mic"; + pinctrl-names = "default"; + pinctrl-0 = <&dmic_sel>; + dmic-gpios = <&tlmm 86 GPIO_ACTIVE_HIGH>; +}; + +&usb_c1 { + status = "disabled"; +}; + +&wifi { + qcom,ath10k-calibration-variant = "GO_POMPOM"; +}; + +/* PINCTRL - board-specific pinctrl */ + +&tlmm { + gpio-line-names = "TP_INT_ODL", + "AP_RAM_ID0", + "AP_SKU_ID2", + "AP_RAM_ID1", + "", + "AP_RAM_ID2", + "AP_TP_I2C_SDA", + "AP_TP_I2C_SCL", + "TS_RESET_L", + "TS_INT_L", + "", + "EDP_BRIJ_IRQ", + "AP_EDP_BKLTEN", + "", + "", + "EDP_BRIJ_I2C_SDA", + "EDP_BRIJ_I2C_SCL", + "HUB_RST_L", + "", + "", + "", + "", + "", + "AMP_EN", + "P_SENSOR_INT_L", + "AP_SAR_SENSOR_SDA", + "AP_SAR_SENSOR_SCL", + "", + "HP_IRQ", + "", + "EN_PP3300_DX_EDP", + "AP_BRD_ID2", + "BRIJ_SUSPEND", + "AP_BRD_ID0", + "AP_H1_SPI_MISO", + "AP_H1_SPI_MOSI", + "AP_H1_SPI_CLK", + "AP_H1_SPI_CS_L", + "", + "", + "", + "", + "H1_AP_INT_ODL", + "", + "UART_AP_TX_DBG_RX", + "UART_DBG_TX_AP_RX", + "HP_I2C_SDA", + "HP_I2C_SCL", + "FORCED_USB_BOOT", + "AMP_BCLK", + "AMP_LRCLK", + "AMP_DIN", + "PEN_PDCT_L", + "HP_BCLK", + "HP_LRCLK", + "HP_DOUT", + "HP_DIN", + "HP_MCLK", + "AP_SKU_ID0", + "AP_EC_SPI_MISO", + "AP_EC_SPI_MOSI", + "AP_EC_SPI_CLK", + "AP_EC_SPI_CS_L", + "AP_SPI_CLK", + "AP_SPI_MOSI", + "AP_SPI_MISO", + /* + * AP_FLASH_WP_L is crossystem ABI. Schematics + * call it BIOS_FLASH_WP_L. + */ + "AP_FLASH_WP_L", + "", + "AP_SPI_CS0_L", + "SD_CD_ODL", + "", + "", + "", + "", + "", + "UIM2_DATA", + "UIM2_CLK", + "UIM2_RST", + "UIM2_PRESENT", + "UIM1_DATA", + "UIM1_CLK", + "UIM1_RST", + "", + "EN_PP3300_CODEC", + "EN_PP3300_HUB", + "", + "DMIC_SEL", + "", + "", + "", + "AP_SKU_ID1", + "AP_RST_REQ", + "", + "AP_BRD_ID1", + "AP_EC_INT_R_L", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "EDP_BRIJ_EN", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "AP_TS_PEN_I2C_SDA", + "AP_TS_PEN_I2C_SCL", + "DP_HOT_PLUG_DET", + "EC_IN_RW_ODL"; + + dmic_sel: dmic-sel { + pinmux { + pins = "gpio86"; + function = "gpio"; + }; + + pinconf { + pins = "gpio86"; + bias-pull-down; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts index 2cb522d6962e..2b522f9e0d8f 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts @@ -17,21 +17,6 @@ ap_h1_spi: &spi0 {}; / { model = "Google Trogdor (rev1+)"; compatible = "google,trogdor", "qcom,sc7180"; - - panel: panel { - compatible = "auo,b116xa01"; - power-supply = <&pp3300_dx_edp>; - backlight = <&backlight>; - hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; - - ports { - port { - panel_in_edp: endpoint { - remote-endpoint = <&sn65dsi86_out>; - }; - }; - }; - }; }; ap_ts_pen_1v8: &i2c4 { @@ -53,6 +38,10 @@ ap_ts_pen_1v8: &i2c4 { }; }; +&panel { + compatible = "auo,b116xa01"; +}; + &pp3300_hub { /* pp3300_l7c is used to power the USB hub */ /delete-property/regulator-always-on; @@ -68,6 +57,22 @@ ap_ts_pen_1v8: &i2c4 { status = "okay"; }; +&trackpad { + interrupts = <58 IRQ_TYPE_EDGE_FALLING>; +}; + +/* PINCTRL - modifications to sc7180-trogdor.dtsi */ + +&trackpad_int_1v8_odl { + pinmux { + pins = "gpio58"; + }; + + pinconf { + pins = "gpio58"; + }; +}; + /* PINCTRL - board-specific pinctrl */ &tlmm { diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index 07c8b2c926c0..24d293ef56d7 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -6,8 +6,10 @@ */ #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/gpio-keys.h> #include <dt-bindings/input/input.h> #include <dt-bindings/regulator/qcom,rpmh-regulator.h> +#include <dt-bindings/sound/sc7180-lpass.h> /* PMICs depend on spmi_bus label and so must come after SoC */ #include "pm6150.dtsi" @@ -15,16 +17,18 @@ / { thermal-zones { - charger-thermal { + charger_thermal: charger-thermal { polling-delay-passive = <0>; polling-delay = <0>; - thermal-sensors = <&pm6150_adc_tm 1>; + thermal-sensors = <&pm6150_adc_tm 0>; trips { - temperature = <125000>; - hysteresis = <1000>; - type = "critical"; + charger-crit { + temperature = <125000>; + hysteresis = <1000>; + type = "critical"; + }; }; }; }; @@ -45,7 +49,7 @@ /* Increase the size from 2MB to 8MB */ &rmtfs_mem { - reg = <0x0 0x84400000 0x0 0x800000>; + reg = <0x0 0x94600000 0x0 0x800000>; }; / { @@ -243,6 +247,7 @@ gpios = <&tlmm 52 GPIO_ACTIVE_LOW>; linux,code = <SW_PEN_INSERTED>; linux,input-type = <EV_SW>; + wakeup-event-action = <EV_ACT_DEASSERTED>; wakeup-source; }; }; @@ -255,6 +260,21 @@ #sound-dai-cells = <0>; }; + panel: panel { + /* Compatible will be filled in per-board */ + power-supply = <&pp3300_dx_edp>; + backlight = <&backlight>; + hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; + + ports { + port { + panel_in_edp: endpoint { + remote-endpoint = <&sn65dsi86_out>; + }; + }; + }; + }; + pwmleds { compatible = "pwm-leds"; keyboard_backlight: keyboard-backlight { @@ -264,6 +284,42 @@ max-brightness = <1023>; }; }; + + sound: sound { + compatible = "google,sc7180-trogdor"; + model = "sc7180-rt5682-max98357a-1mic"; + + audio-routing = + "Headphone Jack", "HPOL", + "Headphone Jack", "HPOR"; + + #address-cells = <1>; + #size-cells = <0>; + + dai-link@0 { + link-name = "MultiMedia0"; + reg = <MI2S_PRIMARY>; + cpu { + sound-dai = <&lpass_cpu MI2S_PRIMARY>; + }; + + sound_multimedia0_codec: codec { + sound-dai = <&alc5682 0 /* aif1 */>; + }; + }; + + dai-link@1 { + link-name = "MultiMedia1"; + reg = <MI2S_SECONDARY>; + cpu { + sound-dai = <&lpass_cpu MI2S_SECONDARY>; + }; + + sound_multimedia1_codec: codec { + sound-dai = <&max98357a>; + }; + }; + }; }; &qfprom { @@ -551,6 +607,10 @@ }; }; +&camcc { + status = "disabled"; +}; + &dsi0 { status = "okay"; vdda-supply = <&vdda_mipi_dsi0_1p2>; @@ -642,14 +702,14 @@ ap_tp_i2c: &i2c7 { status = "okay"; clock-frequency = <400000>; - trackpad@15 { + trackpad: trackpad@15 { compatible = "elan,ekth3000"; reg = <0x15>; pinctrl-names = "default"; - pinctrl-0 = <&trackpad_int_1v8_odl>; + pinctrl-0 = <&tp_int_odl>; interrupt-parent = <&tlmm>; - interrupts = <58 IRQ_TYPE_EDGE_FALLING>; + interrupts = <0 IRQ_TYPE_EDGE_FALLING>; vcc-supply = <&pp3300_fp_tp>; @@ -697,6 +757,27 @@ hp_i2c: &i2c9 { modem-init; }; +&lpass_cpu { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&sec_mi2s_active>, <&pri_mi2s_active>, <&pri_mi2s_mclk_active>; + + #address-cells = <1>; + #size-cells = <0>; + + mi2s@0 { + reg = <MI2S_PRIMARY>; + qcom,playback-sd-lines = <1>; + qcom,capture-sd-lines = <0>; + }; + + mi2s@1 { + reg = <MI2S_SECONDARY>; + qcom,playback-sd-lines = <0>; + }; +}; + &mdp { status = "okay"; }; @@ -716,8 +797,8 @@ hp_i2c: &i2c9 { &pm6150_adc_tm { status = "okay"; - charger-thermistor@1 { - reg = <1>; + charger-thermistor@0 { + reg = <0>; io-channels = <&pm6150_adc ADC5_AMUX_THM3_100K_PU>; qcom,ratiometric; qcom,hw-settle-time-us = <200>; @@ -768,17 +849,17 @@ hp_i2c: &i2c9 { }; &spi0 { - pinctrl-0 = <&qup_spi0_cs_gpio>; + pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>; cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>; }; &spi6 { - pinctrl-0 = <&qup_spi6_cs_gpio>; + pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>; cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>; }; ap_spi_fp: &spi10 { - pinctrl-0 = <&qup_spi10_cs_gpio>; + pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>; cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>; cros_ec_fp: ec@0 { @@ -787,7 +868,7 @@ ap_spi_fp: &spi10 { interrupt-parent = <&tlmm>; interrupts = <4 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; - pinctrl-0 = <&fp_to_ap_irq_l>, <&fp_rst_l>, <&fpmcu_boot0>, <&fpmcu_sel>; + pinctrl-0 = <&fp_to_ap_irq_l>; spi-max-frequency = <3000000>; }; }; @@ -812,7 +893,6 @@ ap_spi_fp: &spi10 { vddrf-supply = <&pp1300_l2c>; vddch0-supply = <&pp3300_l10c>; max-speed = <3200000>; - clocks = <&rpmhcc RPMH_RF_CLK2>; }; }; @@ -875,6 +955,22 @@ ap_spi_fp: &spi10 { }; }; +&pri_mi2s_active { + pinconf { + pins = "gpio53", "gpio54", "gpio55", "gpio56"; + drive-strength = <2>; + bias-pull-down; + }; +}; + +&pri_mi2s_mclk_active { + pinconf { + pins = "gpio57"; + drive-strength = <2>; + bias-pull-down; + }; +}; + &qspi_cs0 { pinconf { pins = "gpio68"; @@ -1015,6 +1111,14 @@ ap_spi_fp: &spi10 { }; }; +&sec_mi2s_active { + pinconf { + pins = "gpio49", "gpio50", "gpio51"; + drive-strength = <2>; + bias-pull-down; + }; +}; + /* PINCTRL - board-specific pinctrl */ &pm6150_gpio { @@ -1109,20 +1213,6 @@ ap_spi_fp: &spi10 { }; }; - dp_hot_plug_det: dp-hot-plug-det { - pinmux { - pins = "gpio117"; - function = "dp_hot"; - }; - - config { - pins = "gpio117"; - bias-disable; - input-enable; - drive-strength = <2>; - }; - }; - edp_brij_en: edp-brij-en { pinmux { pins = "gpio104"; @@ -1188,48 +1278,6 @@ ap_spi_fp: &spi10 { }; }; - fpmcu_boot0: fpmcu-boot0 { - pinmux { - pins = "gpio10"; - function = "gpio"; - }; - - pinconf { - pins = "gpio10"; - bias-disable; - drive-strength = <2>; - output-low; - }; - }; - - fpmcu_sel: fpmcu-sel { - pinmux { - pins = "gpio22"; - function = "gpio"; - }; - - pinconf { - pins = "gpio22"; - bias-disable; - drive-strength = <2>; - output-high; - }; - }; - - fp_rst_l: fp-rst-l { - pinmux { - pins = "gpio5"; - function = "gpio"; - }; - - pinconf { - pins = "gpio5"; - bias-disable; - drive-strength = <2>; - output-high; - }; - }; - fp_to_ap_irq_l: fp-to-ap-irq-l { pinmux { pins = "gpio4"; @@ -1245,7 +1293,6 @@ ap_spi_fp: &spi10 { }; }; - h1_ap_int_odl: h1-ap-int-odl { pinmux { pins = "gpio42"; @@ -1339,6 +1386,27 @@ ap_spi_fp: &spi10 { }; }; + qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high { + pinconf { + pins = "gpio37"; + output-high; + }; + }; + + qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high { + pinconf { + pins = "gpio62"; + output-high; + }; + }; + + qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high { + pinconf { + pins = "gpio89"; + output-high; + }; + }; + qup_uart3_sleep: qup-uart3-sleep { pinmux { pins = "gpio38", "gpio39", @@ -1386,14 +1454,16 @@ ap_spi_fp: &spi10 { }; }; - trackpad_int_1v8_odl: trackpad-int-1v8-odl { + /* Named trackpad_int_1v8_odl on earlier revision schematics */ + trackpad_int_1v8_odl: + tp_int_odl: tp-int-odl { pinmux { - pins = "gpio58"; + pins = "gpio0"; function = "gpio"; }; pinconf { - pins = "gpio58"; + pins = "gpio0"; /* Has external pullup */ bias-disable; diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index 1ea3344ab62c..6228ba2d8513 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -110,9 +110,9 @@ no-map; }; - rmtfs_mem: memory@84400000 { + rmtfs_mem: memory@94600000 { compatible = "qcom,rmtfs-mem"; - reg = <0x0 0x84400000 0x0 0x200000>; + reg = <0x0 0x94600000 0x0 0x200000>; no-map; qcom,client-id = <1>; @@ -1856,12 +1856,6 @@ pins = "gpio49", "gpio50", "gpio51"; function = "mi2s_1"; }; - - pinconf { - pins = "gpio49", "gpio50", "gpio51"; - drive-strength = <8>; - bias-pull-up; - }; }; pri_mi2s_active: pri-mi2s-active { @@ -1869,12 +1863,6 @@ pins = "gpio53", "gpio54", "gpio55", "gpio56"; function = "mi2s_0"; }; - - pinconf { - pins = "gpio53", "gpio54", "gpio55", "gpio56"; - drive-strength = <8>; - bias-pull-up; - }; }; pri_mi2s_mclk_active: pri-mi2s-mclk-active { @@ -1882,12 +1870,6 @@ pins = "gpio57"; function = "lpass_ext"; }; - - pinconf { - pins = "gpio57"; - drive-strength = <8>; - bias-pull-up; - }; }; sdc1_on: sdc1-on { @@ -2770,12 +2752,11 @@ }; usb_1_qmpphy: phy-wrapper@88e9000 { - compatible = "qcom,sc7180-qmp-usb3-phy"; + compatible = "qcom,sc7180-qmp-usb3-dp-phy"; reg = <0 0x088e9000 0 0x18c>, - <0 0x088e8000 0 0x38>; - reg-names = "reg-base", "dp_com"; + <0 0x088e8000 0 0x38>, + <0 0x088ea000 0 0x40>; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -2790,7 +2771,7 @@ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>; reset-names = "phy", "common"; - usb_1_ssphy: phy@88e9200 { + usb_1_ssphy: usb3-phy@88e9200 { reg = <0 0x088e9200 0 0x128>, <0 0x088e9400 0 0x200>, <0 0x088e9c00 0 0x218>, @@ -2803,6 +2784,16 @@ clock-names = "pipe0"; clock-output-names = "usb3_phy_pipe_clk_src"; }; + + dp_phy: dp-phy@88ea200 { + reg = <0 0x088ea200 0 0x200>, + <0 0x088ea400 0 0x200>, + <0 0x088eaa00 0 0x200>, + <0 0x088ea600 0 0x200>, + <0 0x088ea800 0 0x200>; + #clock-cells = <1>; + #phy-cells = <0>; + }; }; dc_noc: interconnect@9160000 { @@ -2854,10 +2845,10 @@ <&gcc GCC_USB30_PRIM_MASTER_CLK>; assigned-clock-rates = <19200000>, <150000000>; - interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 6 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 8 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 9 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hs_phy_irq", "ss_phy_irq", "dm_hs_phy_irq", "dp_hs_phy_irq"; @@ -3166,8 +3157,8 @@ <&gcc GCC_DISP_GPLL0_CLK_SRC>, <&dsi_phy 0>, <&dsi_phy 1>, - <0>, - <0>; + <&dp_phy 0>, + <&dp_phy 1>; clock-names = "bi_tcxo", "gcc_disp_gpll0_clk_src", "dsi0_phy_pll_out_byteclk", @@ -3222,7 +3213,7 @@ #reset-cells = <1>; }; - aoss_qmp: qmp@c300000 { + aoss_qmp: power-controller@c300000 { compatible = "qcom,sc7180-aoss-qmp"; reg = <0 0x0c300000 0 0x100000>; interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>; @@ -3575,7 +3566,8 @@ reg = <0 0x62f00000 0 0x29000>; reg-names = "lpass-lpaif"; - iommus = <&apps_smmu 0x1020 0>; + iommus = <&apps_smmu 0x1020 0>, + <&apps_smmu 0x1021 0>; power-domains = <&lpass_hm LPASS_CORE_HM_GDSCR>; diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts new file mode 100644 index 000000000000..54d2cb365b71 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * sc7280 IDP board device tree source + * + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +/dts-v1/; + +#include "sc7280.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. sc7280 IDP platform"; + compatible = "qcom,sc7280-idp", "qcom,sc7280"; + + aliases { + serial0 = &uart5; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&apps_rsc { + pm7325-regulators { + compatible = "qcom,pm7325-rpmh-regulators"; + qcom,pmic-id = "b"; + + vreg_s1b_1p8: smps1 { + regulator-min-microvolt = <1856000>; + regulator-max-microvolt = <2040000>; + }; + + vreg_s7b_0p9: smps7 { + regulator-min-microvolt = <535000>; + regulator-max-microvolt = <1120000>; + }; + + vreg_s8b_1p2: smps8 { + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1500000>; + }; + + vreg_l1b_0p8: ldo1 { + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <925000>; + }; + + vreg_l2b_3p0: ldo2 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <3544000>; + }; + + vreg_l6b_1p2: ldo6 { + regulator-min-microvolt = <1140000>; + regulator-max-microvolt = <1260000>; + }; + + vreg_l7b_2p9: ldo7 { + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + }; + + vreg_l8b_0p9: ldo8 { + regulator-min-microvolt = <870000>; + regulator-max-microvolt = <970000>; + }; + + vreg_l9b_1p2: ldo9 { + regulator-min-microvolt = <1080000>; + regulator-max-microvolt = <1304000>; + }; + + vreg_l11b_1p7: ldo11 { + regulator-min-microvolt = <1504000>; + regulator-max-microvolt = <2000000>; + }; + + vreg_l12b_0p8: ldo12 { + regulator-min-microvolt = <751000>; + regulator-max-microvolt = <824000>; + }; + + vreg_l13b_0p8: ldo13 { + regulator-min-microvolt = <530000>; + regulator-max-microvolt = <824000>; + }; + + vreg_l14b_1p2: ldo14 { + regulator-min-microvolt = <1080000>; + regulator-max-microvolt = <1304000>; + }; + + vreg_l15b_0p8: ldo15 { + regulator-min-microvolt = <765000>; + regulator-max-microvolt = <1020000>; + }; + + vreg_l16b_1p2: ldo16 { + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1300000>; + }; + + vreg_l17b_1p8: ldo17 { + regulator-min-microvolt = <1700000>; + regulator-max-microvolt = <1900000>; + }; + + vreg_l18b_1p8: ldo18 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2000000>; + }; + + vreg_l19b_1p8: ldo19 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + }; + + pm8350c-regulators { + compatible = "qcom,pm8350c-rpmh-regulators"; + qcom,pmic-id = "c"; + + vreg_s1c_2p2: smps1 { + regulator-min-microvolt = <2190000>; + regulator-max-microvolt = <2210000>; + }; + + vreg_s9c_1p0: smps9 { + regulator-min-microvolt = <1010000>; + regulator-max-microvolt = <1170000>; + }; + + vreg_l1c_1p8: ldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1980000>; + }; + + vreg_l2c_1p8: ldo2 { + regulator-min-microvolt = <1620000>; + regulator-max-microvolt = <1980000>; + }; + + vreg_l3c_3p0: ldo3 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <3540000>; + }; + + vreg_l4c_1p8: ldo4 { + regulator-min-microvolt = <1620000>; + regulator-max-microvolt = <3300000>; + }; + + vreg_l5c_1p8: ldo5 { + regulator-min-microvolt = <1620000>; + regulator-max-microvolt = <3300000>; + }; + + vreg_l6c_2p9: ldo6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2950000>; + }; + + vreg_l7c_3p0: ldo7 { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3544000>; + }; + + vreg_l8c_1p8: ldo8 { + regulator-min-microvolt = <1620000>; + regulator-max-microvolt = <2000000>; + }; + + vreg_l9c_2p9: ldo9 { + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + }; + + vreg_l10c_0p8: ldo10 { + regulator-min-microvolt = <720000>; + regulator-max-microvolt = <1050000>; + }; + + vreg_l11c_2p8: ldo11 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <3544000>; + }; + + vreg_l12c_1p8: ldo12 { + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <2000000>; + }; + + vreg_l13c_3p0: ldo13 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <3544000>; + }; + + vreg_bob: bob { + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3960000>; + }; + }; + + pmr735a-regulators { + compatible = "qcom,pmr735a-rpmh-regulators"; + qcom,pmic-id = "e"; + + vreg_l2e_1p2: ldo2 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + + vreg_l3e_0p9: ldo3 { + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <1020000>; + }; + + vreg_l4e_1p7: ldo4 { + regulator-min-microvolt = <1776000>; + regulator-max-microvolt = <1890000>; + }; + + vreg_l5e_0p8: ldo5 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + }; + + vreg_l6e_0p8: ldo6 { + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <904000>; + }; + }; +}; + +&qupv3_id_0 { + status = "okay"; +}; + +&uart5 { + status = "okay"; +}; + +/* PINCTRL - additions to nodes defined in sc7280.dtsi */ + +&qup_uart5_default { + tx { + pins = "gpio46"; + drive-strength = <2>; + bias-disable; + }; + + rx { + pins = "gpio47"; + drive-strength = <2>; + bias-pull-up; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi new file mode 100644 index 000000000000..2cc478553935 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -0,0 +1,1128 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * sc7280 SoC device tree source + * + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#include <dt-bindings/clock/qcom,gcc-sc7280.h> +#include <dt-bindings/clock/qcom,rpmh.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/mailbox/qcom-ipcc.h> +#include <dt-bindings/power/qcom-aoss-qmp.h> +#include <dt-bindings/power/qcom-rpmpd.h> +#include <dt-bindings/soc/qcom,rpmh-rsc.h> + +/ { + interrupt-parent = <&intc>; + + #address-cells = <2>; + #size-cells = <2>; + + chosen { }; + + clocks { + xo_board: xo-board { + compatible = "fixed-clock"; + clock-frequency = <76800000>; + #clock-cells = <0>; + }; + + sleep_clk: sleep-clk { + compatible = "fixed-clock"; + clock-frequency = <32000>; + #clock-cells = <0>; + }; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + aop_mem: memory@80800000 { + reg = <0x0 0x80800000 0x0 0x60000>; + no-map; + }; + + aop_cmd_db_mem: memory@80860000 { + reg = <0x0 0x80860000 0x0 0x20000>; + compatible = "qcom,cmd-db"; + no-map; + }; + + cpucp_mem: memory@80b00000 { + no-map; + reg = <0x0 0x80b00000 0x0 0x100000>; + }; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x0>; + enable-method = "psci"; + cpu-idle-states = <&LITTLE_CPU_SLEEP_0 + &LITTLE_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + L3_0: l3-cache { + compatible = "cache"; + }; + }; + }; + + CPU1: cpu@100 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x100>; + enable-method = "psci"; + cpu-idle-states = <&LITTLE_CPU_SLEEP_0 + &LITTLE_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_100>; + L2_100: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU2: cpu@200 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x200>; + enable-method = "psci"; + cpu-idle-states = <&LITTLE_CPU_SLEEP_0 + &LITTLE_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_200>; + L2_200: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU3: cpu@300 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x300>; + enable-method = "psci"; + cpu-idle-states = <&LITTLE_CPU_SLEEP_0 + &LITTLE_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_300>; + L2_300: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU4: cpu@400 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x400>; + enable-method = "psci"; + cpu-idle-states = <&BIG_CPU_SLEEP_0 + &BIG_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_400>; + L2_400: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU5: cpu@500 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x500>; + enable-method = "psci"; + cpu-idle-states = <&BIG_CPU_SLEEP_0 + &BIG_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_500>; + L2_500: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU6: cpu@600 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x600>; + enable-method = "psci"; + cpu-idle-states = <&BIG_CPU_SLEEP_0 + &BIG_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_600>; + L2_600: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + CPU7: cpu@700 { + device_type = "cpu"; + compatible = "arm,kryo"; + reg = <0x0 0x700>; + enable-method = "psci"; + cpu-idle-states = <&BIG_CPU_SLEEP_0 + &BIG_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + next-level-cache = <&L2_700>; + L2_700: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + }; + }; + + idle-states { + entry-method = "psci"; + + LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 { + compatible = "arm,idle-state"; + idle-state-name = "little-power-down"; + arm,psci-suspend-param = <0x40000003>; + entry-latency-us = <549>; + exit-latency-us = <901>; + min-residency-us = <1774>; + local-timer-stop; + }; + + LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 { + compatible = "arm,idle-state"; + idle-state-name = "little-rail-power-down"; + arm,psci-suspend-param = <0x40000004>; + entry-latency-us = <702>; + exit-latency-us = <915>; + min-residency-us = <4001>; + local-timer-stop; + }; + + BIG_CPU_SLEEP_0: cpu-sleep-1-0 { + compatible = "arm,idle-state"; + idle-state-name = "big-power-down"; + arm,psci-suspend-param = <0x40000003>; + entry-latency-us = <523>; + exit-latency-us = <1244>; + min-residency-us = <2207>; + local-timer-stop; + }; + + BIG_CPU_SLEEP_1: cpu-sleep-1-1 { + compatible = "arm,idle-state"; + idle-state-name = "big-rail-power-down"; + arm,psci-suspend-param = <0x40000004>; + entry-latency-us = <526>; + exit-latency-us = <1854>; + min-residency-us = <5555>; + local-timer-stop; + }; + + CLUSTER_SLEEP_0: cluster-sleep-0 { + compatible = "arm,idle-state"; + idle-state-name = "cluster-power-down"; + arm,psci-suspend-param = <0x40003444>; + entry-latency-us = <3263>; + exit-latency-us = <6562>; + min-residency-us = <9926>; + local-timer-stop; + }; + }; + }; + + memory@80000000 { + device_type = "memory"; + /* We expect the bootloader to fill in the size */ + reg = <0 0x80000000 0 0>; + }; + + firmware { + scm { + compatible = "qcom,scm-sc7280", "qcom,scm"; + }; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + soc: soc@0 { + #address-cells = <2>; + #size-cells = <2>; + ranges = <0 0 0 0 0x10 0>; + dma-ranges = <0 0 0 0 0x10 0>; + compatible = "simple-bus"; + + gcc: clock-controller@100000 { + compatible = "qcom,gcc-sc7280"; + reg = <0 0x00100000 0 0x1f0000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>, + <0>, <0>, <0>, <0>, <0>, <0>; + clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk", + "pcie_0_pipe_clk", "pcie_1_pipe-clk", + "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk", + "ufs_phy_tx_symbol_0_clk", + "usb3_phy_wrapper_gcc_usb30_pipe_clk"; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + + ipcc: mailbox@408000 { + compatible = "qcom,sc7280-ipcc", "qcom,ipcc"; + reg = <0 0x00408000 0 0x1000>; + interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>; + interrupt-controller; + #interrupt-cells = <3>; + #mbox-cells = <2>; + }; + + qupv3_id_0: geniqup@9c0000 { + compatible = "qcom,geni-se-qup"; + reg = <0 0x009c0000 0 0x2000>; + clock-names = "m-ahb", "s-ahb"; + clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + uart5: serial@994000 { + compatible = "qcom,geni-debug-uart"; + reg = <0 0x00994000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_uart5_default>; + interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + }; + + stm@6002000 { + compatible = "arm,coresight-stm", "arm,primecell"; + reg = <0 0x06002000 0 0x1000>, + <0 0x16280000 0 0x180000>; + reg-names = "stm-base", "stm-stimulus-base"; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + stm_out: endpoint { + remote-endpoint = <&funnel0_in7>; + }; + }; + }; + }; + + funnel@6041000 { + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x06041000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + funnel0_out: endpoint { + remote-endpoint = <&merge_funnel_in0>; + }; + }; + }; + + in-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@7 { + reg = <7>; + funnel0_in7: endpoint { + remote-endpoint = <&stm_out>; + }; + }; + }; + }; + + funnel@6042000 { + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x06042000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + funnel1_out: endpoint { + remote-endpoint = <&merge_funnel_in1>; + }; + }; + }; + + in-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@4 { + reg = <4>; + funnel1_in4: endpoint { + remote-endpoint = <&apss_merge_funnel_out>; + }; + }; + }; + }; + + funnel@6045000 { + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x06045000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + merge_funnel_out: endpoint { + remote-endpoint = <&swao_funnel_in>; + }; + }; + }; + + in-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + merge_funnel_in0: endpoint { + remote-endpoint = <&funnel0_out>; + }; + }; + + port@1 { + reg = <1>; + merge_funnel_in1: endpoint { + remote-endpoint = <&funnel1_out>; + }; + }; + }; + }; + + replicator@6046000 { + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; + reg = <0 0x06046000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + replicator_out: endpoint { + remote-endpoint = <&etr_in>; + }; + }; + }; + + in-ports { + port { + replicator_in: endpoint { + remote-endpoint = <&swao_replicator_out>; + }; + }; + }; + }; + + etr@6048000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0 0x06048000 0 0x1000>; + iommus = <&apps_smmu 0x04c0 0>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,scatter-gather; + + in-ports { + port { + etr_in: endpoint { + remote-endpoint = <&replicator_out>; + }; + }; + }; + }; + + funnel@6b04000 { + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x06b04000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + swao_funnel_out: endpoint { + remote-endpoint = <&etf_in>; + }; + }; + }; + + in-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@7 { + reg = <7>; + swao_funnel_in: endpoint { + remote-endpoint = <&merge_funnel_out>; + }; + }; + }; + }; + + etf@6b05000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0 0x06b05000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + etf_out: endpoint { + remote-endpoint = <&swao_replicator_in>; + }; + }; + }; + + in-ports { + port { + etf_in: endpoint { + remote-endpoint = <&swao_funnel_out>; + }; + }; + }; + }; + + replicator@6b06000 { + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; + reg = <0 0x06b06000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + qcom,replicator-loses-context; + + out-ports { + port { + swao_replicator_out: endpoint { + remote-endpoint = <&replicator_in>; + }; + }; + }; + + in-ports { + port { + swao_replicator_in: endpoint { + remote-endpoint = <&etf_out>; + }; + }; + }; + }; + + etm@7040000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07040000 0 0x1000>; + + cpu = <&CPU0>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm0_out: endpoint { + remote-endpoint = <&apss_funnel_in0>; + }; + }; + }; + }; + + etm@7140000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07140000 0 0x1000>; + + cpu = <&CPU1>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm1_out: endpoint { + remote-endpoint = <&apss_funnel_in1>; + }; + }; + }; + }; + + etm@7240000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07240000 0 0x1000>; + + cpu = <&CPU2>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm2_out: endpoint { + remote-endpoint = <&apss_funnel_in2>; + }; + }; + }; + }; + + etm@7340000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07340000 0 0x1000>; + + cpu = <&CPU3>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm3_out: endpoint { + remote-endpoint = <&apss_funnel_in3>; + }; + }; + }; + }; + + etm@7440000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07440000 0 0x1000>; + + cpu = <&CPU4>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm4_out: endpoint { + remote-endpoint = <&apss_funnel_in4>; + }; + }; + }; + }; + + etm@7540000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07540000 0 0x1000>; + + cpu = <&CPU5>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm5_out: endpoint { + remote-endpoint = <&apss_funnel_in5>; + }; + }; + }; + }; + + etm@7640000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07640000 0 0x1000>; + + cpu = <&CPU6>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm6_out: endpoint { + remote-endpoint = <&apss_funnel_in6>; + }; + }; + }; + }; + + etm@7740000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x07740000 0 0x1000>; + + cpu = <&CPU7>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + arm,coresight-loses-context-with-cpu; + qcom,skip-power-up; + + out-ports { + port { + etm7_out: endpoint { + remote-endpoint = <&apss_funnel_in7>; + }; + }; + }; + }; + + funnel@7800000 { /* APSS Funnel */ + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x07800000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + apss_funnel_out: endpoint { + remote-endpoint = <&apss_merge_funnel_in>; + }; + }; + }; + + in-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + apss_funnel_in0: endpoint { + remote-endpoint = <&etm0_out>; + }; + }; + + port@1 { + reg = <1>; + apss_funnel_in1: endpoint { + remote-endpoint = <&etm1_out>; + }; + }; + + port@2 { + reg = <2>; + apss_funnel_in2: endpoint { + remote-endpoint = <&etm2_out>; + }; + }; + + port@3 { + reg = <3>; + apss_funnel_in3: endpoint { + remote-endpoint = <&etm3_out>; + }; + }; + + port@4 { + reg = <4>; + apss_funnel_in4: endpoint { + remote-endpoint = <&etm4_out>; + }; + }; + + port@5 { + reg = <5>; + apss_funnel_in5: endpoint { + remote-endpoint = <&etm5_out>; + }; + }; + + port@6 { + reg = <6>; + apss_funnel_in6: endpoint { + remote-endpoint = <&etm6_out>; + }; + }; + + port@7 { + reg = <7>; + apss_funnel_in7: endpoint { + remote-endpoint = <&etm7_out>; + }; + }; + }; + }; + + funnel@7810000 { + compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; + reg = <0 0x07810000 0 0x1000>; + + clocks = <&aoss_qmp>; + clock-names = "apb_pclk"; + + out-ports { + port { + apss_merge_funnel_out: endpoint { + remote-endpoint = <&funnel1_in4>; + }; + }; + }; + + in-ports { + port { + apss_merge_funnel_in: endpoint { + remote-endpoint = <&apss_funnel_out>; + }; + }; + }; + }; + + system-cache-controller@9200000 { + compatible = "qcom,sc7280-llcc"; + reg = <0 0x09200000 0 0xd0000>, <0 0x09600000 0 0x50000>; + reg-names = "llcc_base", "llcc_broadcast_base"; + interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>; + }; + + pdc: interrupt-controller@b220000 { + compatible = "qcom,sc7280-pdc", "qcom,pdc"; + reg = <0 0x0b220000 0 0x30000>; + qcom,pdc-ranges = <0 480 40>, <40 140 14>, <54 263 1>, + <55 306 4>, <59 312 3>, <62 374 2>, + <64 434 2>, <66 438 3>, <69 86 1>, + <70 520 54>, <124 609 31>, <155 63 1>, + <156 716 12>; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupt-controller; + }; + + aoss_qmp: power-controller@c300000 { + compatible = "qcom,sc7280-aoss-qmp"; + reg = <0 0x0c300000 0 0x100000>; + interrupts-extended = <&ipcc IPCC_CLIENT_AOP + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_AOP + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + #clock-cells = <0>; + #power-domain-cells = <1>; + }; + + spmi_bus: spmi@c440000 { + compatible = "qcom,spmi-pmic-arb"; + reg = <0 0x0c440000 0 0x1100>, + <0 0x0c600000 0 0x2000000>, + <0 0x0e600000 0 0x100000>, + <0 0x0e700000 0 0xa0000>, + <0 0x0c40a000 0 0x26000>; + reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; + interrupt-names = "periph_irq"; + interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>; + qcom,ee = <0>; + qcom,channel = <0>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-controller; + #interrupt-cells = <4>; + }; + + tlmm: pinctrl@f100000 { + compatible = "qcom,sc7280-pinctrl"; + reg = <0 0x0f100000 0 0x300000>; + interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + gpio-ranges = <&tlmm 0 0 175>; + wakeup-parent = <&pdc>; + + qup_uart5_default: qup-uart5-default { + pins = "gpio46", "gpio47"; + function = "qup13"; + }; + }; + + apps_smmu: iommu@15000000 { + compatible = "qcom,sc7280-smmu-500", "arm,mmu-500"; + reg = <0 0x15000000 0 0x100000>; + #iommu-cells = <2>; + #global-interrupts = <1>; + dma-coherent; + interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>; + }; + + intc: interrupt-controller@17a00000 { + compatible = "arm,gic-v3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0 0x17a00000 0 0x10000>, /* GICD */ + <0 0x17a60000 0 0x100000>; /* GICR * 8 */ + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; + + gic-its@17a40000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0 0x17a40000 0 0x20000>; + status = "disabled"; + }; + }; + + watchdog@17c10000 { + compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt"; + reg = <0 0x17c10000 0 0x1000>; + clocks = <&sleep_clk>; + interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>; + }; + + timer@17c20000 { + #address-cells = <2>; + #size-cells = <2>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0 0x17c20000 0 0x1000>; + + frame@17c21000 { + frame-number = <0>; + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c21000 0 0x1000>, + <0 0x17c22000 0 0x1000>; + }; + + frame@17c23000 { + frame-number = <1>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c23000 0 0x1000>; + status = "disabled"; + }; + + frame@17c25000 { + frame-number = <2>; + interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c25000 0 0x1000>; + status = "disabled"; + }; + + frame@17c27000 { + frame-number = <3>; + interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c27000 0 0x1000>; + status = "disabled"; + }; + + frame@17c29000 { + frame-number = <4>; + interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c29000 0 0x1000>; + status = "disabled"; + }; + + frame@17c2b000 { + frame-number = <5>; + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c2b000 0 0x1000>; + status = "disabled"; + }; + + frame@17c2d000 { + frame-number = <6>; + interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; + reg = <0 0x17c2d000 0 0x1000>; + status = "disabled"; + }; + }; + + apps_rsc: rsc@18200000 { + compatible = "qcom,rpmh-rsc"; + reg = <0 0x18200000 0 0x10000>, + <0 0x18210000 0 0x10000>, + <0 0x18220000 0 0x10000>; + reg-names = "drv-0", "drv-1", "drv-2"; + interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>; + qcom,tcs-offset = <0xd00>; + qcom,drv-id = <2>; + qcom,tcs-config = <ACTIVE_TCS 2>, + <SLEEP_TCS 3>, + <WAKE_TCS 3>, + <CONTROL_TCS 1>; + + rpmhpd: power-controller { + compatible = "qcom,sc7280-rpmhpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmhpd_opp_table>; + + rpmhpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmhpd_opp_ret: opp1 { + opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>; + }; + + rpmhpd_opp_low_svs: opp2 { + opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>; + }; + + rpmhpd_opp_svs: opp3 { + opp-level = <RPMH_REGULATOR_LEVEL_SVS>; + }; + + rpmhpd_opp_svs_l1: opp4 { + opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>; + }; + + rpmhpd_opp_svs_l2: opp5 { + opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>; + }; + + rpmhpd_opp_nom: opp6 { + opp-level = <RPMH_REGULATOR_LEVEL_NOM>; + }; + + rpmhpd_opp_nom_l1: opp7 { + opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>; + }; + + rpmhpd_opp_turbo: opp8 { + opp-level = <RPMH_REGULATOR_LEVEL_TURBO>; + }; + + rpmhpd_opp_turbo_l1: opp9 { + opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>; + }; + }; + }; + + rpmhcc: clock-controller { + compatible = "qcom,sc7280-rpmh-clk"; + clocks = <&xo_board>; + clock-names = "xo"; + #clock-cells = <1>; + }; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, + <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, + <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, + <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index c4ac6f5dc008..2d5533dd4ec2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -244,7 +244,7 @@ &adsp_pas { status = "okay"; - firmware-name = "qcom/sdm845/adsp.mdt"; + firmware-name = "qcom/sdm845/adsp.mbn"; }; &apps_rsc { @@ -390,7 +390,7 @@ &cdsp_pas { status = "okay"; - firmware-name = "qcom/sdm845/cdsp.mdt"; + firmware-name = "qcom/sdm845/cdsp.mbn"; }; &dsi0 { @@ -1015,7 +1015,7 @@ left_spkr: wsa8810-left{ compatible = "sdw10217201000"; reg = <0 1>; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -1023,7 +1023,7 @@ right_spkr: wsa8810-right{ compatible = "sdw10217201000"; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; reg = <0 2>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; @@ -1108,6 +1108,25 @@ status = "okay"; }; +&camss { + vdda-supply = <&vreg_l1a_0p875>; + + status = "ok"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + csiphy0_ep: endpoint { + clock-lanes = <7>; + data-lanes = <0 1 2 3>; + remote-endpoint = <&ov8856_ep>; + }; + }; + }; +}; + &cci_i2c0 { camera@10 { compatible = "ovti,ov8856"; @@ -1137,7 +1156,7 @@ avdd-supply = <&cam0_avdd_2v8>; dvdd-supply = <&cam0_dvdd_1v2>; - status = "disable"; + status = "ok"; port { ov8856_ep: endpoint { @@ -1145,7 +1164,7 @@ link-frequencies = /bits/ 64 <360000000 180000000>; data-lanes = <1 2 3 4>; -// remote-endpoint = <&csiphy0_ep>; + remote-endpoint = <&csiphy0_ep>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts index 86cbae63eaf7..7d029425336e 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts @@ -157,6 +157,14 @@ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; + vreg_l14a_1p8: ldo14 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-boot-on; + regulator-always-on; + }; + vreg_l17a_1p3: ldo17 { regulator-min-microvolt = <1304000>; regulator-max-microvolt = <1304000>; @@ -191,6 +199,7 @@ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-boot-on; }; }; }; @@ -200,6 +209,43 @@ firmware-name = "qcom/sdm845/cdsp.mdt"; }; +&dsi0 { + status = "okay"; + vdda-supply = <&vreg_l26a_1p2>; + + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "tianma,fhd-video"; + reg = <0>; + vddi0-supply = <&vreg_l14a_1p8>; + vddpos-supply = <&lab>; + vddneg-supply = <&ibb>; + + #address-cells = <1>; + #size-cells = <0>; + + reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>; + + port { + tianma_nt36672a_in_0: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; +}; + +&dsi0_out { + remote-endpoint = <&tianma_nt36672a_in_0>; + data-lanes = <0 1 2 3>; +}; + +&dsi0_phy { + status = "okay"; + vdds-supply = <&vreg_l1a_0p875>; +}; + &gcc { protected-clocks = <GCC_QSPI_CORE_CLK>, <GCC_QSPI_CORE_CLK_SRC>, @@ -215,6 +261,31 @@ }; }; +&ibb { + regulator-min-microvolt = <4600000>; + regulator-max-microvolt = <6000000>; + regulator-over-current-protection; + regulator-pull-down; + regulator-soft-start; + qcom,discharge-resistor-kohms = <300>; +}; + +&lab { + regulator-min-microvolt = <4600000>; + regulator-max-microvolt = <6000000>; + regulator-over-current-protection; + regulator-pull-down; + regulator-soft-start; +}; + +&mdss { + status = "okay"; +}; + +&mdss_mdp { + status = "okay"; +}; + &mss_pil { status = "okay"; firmware-name = "qcom/sdm845/mba.mbn", "qcom/sdm845/modem.mdt"; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 454f794af547..0a86fe71a66d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -1061,6 +1061,16 @@ gcc: clock-controller@100000 { compatible = "qcom,gcc-sdm845"; reg = <0 0x00100000 0 0x1f0000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&rpmhcc RPMH_CXO_CLK_A>, + <&sleep_clk>, + <&pcie0_lane>, + <&pcie1_lane>; + clock-names = "bi_tcxo", + "bi_tcxo_ao", + "sleep_clk", + "pcie_0_pipe_clk", + "pcie_1_pipe_clk"; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; @@ -2062,6 +2072,7 @@ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>; clock-names = "pipe0"; + #clock-cells = <0>; #phy-cells = <0>; clock-output-names = "pcie_0_pipe_clk"; }; @@ -2170,6 +2181,7 @@ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>; clock-names = "pipe0"; + #clock-cells = <0>; #phy-cells = <0>; clock-output-names = "pcie_1_pipe_clk"; }; @@ -2382,7 +2394,7 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; - gpio-ranges = <&tlmm 0 0 150>; + gpio-ranges = <&tlmm 0 0 151>; wakeup-parent = <&pdc_intc>; cci0_default: cci0-default { @@ -3673,7 +3685,6 @@ <0 0x088e8000 0 0x10>; reg-names = "reg-base", "dp_com"; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -3695,6 +3706,7 @@ <0 0x088e9600 0 0x128>, <0 0x088e9800 0 0x200>, <0 0x088e9a00 0 0x100>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -3706,7 +3718,6 @@ compatible = "qcom,sdm845-qmp-usb3-uni-phy"; reg = <0 0x088eb000 0 0x18c>; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -3726,6 +3737,7 @@ <0 0x088eb400 0 0x1fc>, <0 0x088eb800 0 0x218>, <0 0x088eb600 0 0x70>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -3909,6 +3921,141 @@ #reset-cells = <1>; }; + camss: camss@a00000 { + compatible = "qcom,sdm845-camss"; + + reg = <0 0xacb3000 0 0x1000>, + <0 0xacba000 0 0x1000>, + <0 0xacc8000 0 0x1000>, + <0 0xac65000 0 0x1000>, + <0 0xac66000 0 0x1000>, + <0 0xac67000 0 0x1000>, + <0 0xac68000 0 0x1000>, + <0 0xacaf000 0 0x4000>, + <0 0xacb6000 0 0x4000>, + <0 0xacc4000 0 0x4000>; + reg-names = "csid0", + "csid1", + "csid2", + "csiphy0", + "csiphy1", + "csiphy2", + "csiphy3", + "vfe0", + "vfe1", + "vfe_lite"; + + interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "csid0", + "csid1", + "csid2", + "csiphy0", + "csiphy1", + "csiphy2", + "csiphy3", + "vfe0", + "vfe1", + "vfe_lite"; + + power-domains = <&clock_camcc IFE_0_GDSC>, + <&clock_camcc IFE_1_GDSC>, + <&clock_camcc TITAN_TOP_GDSC>; + + clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>, + <&clock_camcc CAM_CC_CPAS_AHB_CLK>, + <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>, + <&clock_camcc CAM_CC_IFE_0_CSID_CLK>, + <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>, + <&clock_camcc CAM_CC_IFE_1_CSID_CLK>, + <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>, + <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>, + <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>, + <&clock_camcc CAM_CC_CSIPHY0_CLK>, + <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>, + <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>, + <&clock_camcc CAM_CC_CSIPHY1_CLK>, + <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>, + <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>, + <&clock_camcc CAM_CC_CSIPHY2_CLK>, + <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>, + <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>, + <&clock_camcc CAM_CC_CSIPHY3_CLK>, + <&clock_camcc CAM_CC_CSI3PHYTIMER_CLK>, + <&clock_camcc CAM_CC_CSI3PHYTIMER_CLK_SRC>, + <&gcc GCC_CAMERA_AHB_CLK>, + <&gcc GCC_CAMERA_AXI_CLK>, + <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>, + <&clock_camcc CAM_CC_SOC_AHB_CLK>, + <&clock_camcc CAM_CC_IFE_0_AXI_CLK>, + <&clock_camcc CAM_CC_IFE_0_CLK>, + <&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>, + <&clock_camcc CAM_CC_IFE_0_CLK_SRC>, + <&clock_camcc CAM_CC_IFE_1_AXI_CLK>, + <&clock_camcc CAM_CC_IFE_1_CLK>, + <&clock_camcc CAM_CC_IFE_1_CPHY_RX_CLK>, + <&clock_camcc CAM_CC_IFE_1_CLK_SRC>, + <&clock_camcc CAM_CC_IFE_LITE_CLK>, + <&clock_camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>, + <&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cphy_rx_src", + "csi0", + "csi0_src", + "csi1", + "csi1_src", + "csi2", + "csi2_src", + "csiphy0", + "csiphy0_timer", + "csiphy0_timer_src", + "csiphy1", + "csiphy1_timer", + "csiphy1_timer_src", + "csiphy2", + "csiphy2_timer", + "csiphy2_timer_src", + "csiphy3", + "csiphy3_timer", + "csiphy3_timer_src", + "gcc_camera_ahb", + "gcc_camera_axi", + "slow_ahb_src", + "soc_ahb", + "vfe0_axi", + "vfe0", + "vfe0_cphy_rx", + "vfe0_src", + "vfe1_axi", + "vfe1", + "vfe1_cphy_rx", + "vfe1_src", + "vfe_lite", + "vfe_lite_cphy_rx", + "vfe_lite_src"; + + iommus = <&apps_smmu 0x0808 0x0>, + <&apps_smmu 0x0810 0x8>, + <&apps_smmu 0x0c08 0x0>, + <&apps_smmu 0x0c10 0x8>; + + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + cci: cci@ac4a000 { compatible = "qcom,sdm845-cci"; #address-cells = <1>; @@ -4428,7 +4575,7 @@ #reset-cells = <1>; }; - aoss_qmp: qmp@c300000 { + aoss_qmp: power-controller@c300000 { compatible = "qcom,sdm845-aoss-qmp"; reg = <0 0x0c300000 0 0x100000>; interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index e5bb17bc2f46..51235a9521c2 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -577,17 +577,188 @@ <&sleep_clk>; }; + qupv3_id_0: geniqup@8c0000 { + compatible = "qcom,geni-se-qup"; + reg = <0x0 0x008c0000 0x0 0x6000>; + clock-names = "m-ahb", "s-ahb"; + clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + iommus = <&apps_smmu 0xc3 0x0>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + i2c0: i2c@880000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00880000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c0_default>; + interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c1: i2c@884000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00884000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c1_default>; + interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@888000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00888000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c2_default>; + interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@88c000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x0088c000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c3_default>; + interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c4: i2c@890000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00890000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c4_default>; + interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c5: i2c@894000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00894000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c5_default>; + interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c6: i2c@898000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00898000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c6_default>; + interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c7: i2c@89c000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x0089c000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c7_default>; + interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + }; + qupv3_id_1: geniqup@ac0000 { compatible = "qcom,geni-se-qup"; reg = <0x0 0x00ac0000 0x0 0x6000>; clock-names = "m-ahb", "s-ahb"; clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + iommus = <&apps_smmu 0x603 0x0>; #address-cells = <2>; #size-cells = <2>; ranges; status = "disabled"; + i2c8: i2c@a80000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00a80000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c8_default>; + interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c9: i2c@a84000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00a84000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c9_default>; + interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c10: i2c@a88000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00a88000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c10_default>; + interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c11: i2c@a8c000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00a8c000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c11_default>; + interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + uart2: serial@a90000 { compatible = "qcom,geni-debug-uart"; reg = <0x0 0x00a90000 0x0 0x4000>; @@ -596,6 +767,124 @@ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; + + i2c12: i2c@a90000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00a90000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c12_default>; + interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c16: i2c@94000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x0094000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c16_default>; + interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + }; + + qupv3_id_2: geniqup@cc0000 { + compatible = "qcom,geni-se-qup"; + reg = <0x0 0x00cc0000 0x0 0x6000>; + + clock-names = "m-ahb", "s-ahb"; + clocks = <&gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>, + <&gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>; + iommus = <&apps_smmu 0x7a3 0x0>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + i2c17: i2c@c80000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c80000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c17_default>; + interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c18: i2c@c84000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c84000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c18_default>; + interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c19: i2c@c88000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c88000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c19_default>; + interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c13: i2c@c8c000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c8c000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c13_default>; + interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c14: i2c@c90000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c90000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c14_default>; + interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c15: i2c@c94000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x00c94000 0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>; + pinctrl-names = "default"; + pinctrl-0 = <&qup_i2c15_default>; + interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; }; config_noc: interconnect@1500000 { @@ -914,11 +1203,271 @@ <0x0 0x03D00000 0x0 0x300000>; reg-names = "west", "east", "north", "south"; interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>; - gpio-ranges = <&tlmm 0 0 175>; + gpio-ranges = <&tlmm 0 0 176>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + + qup_i2c0_default: qup-i2c0-default { + mux { + pins = "gpio0", "gpio1"; + function = "qup0"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c1_default: qup-i2c1-default { + mux { + pins = "gpio114", "gpio115"; + function = "qup1"; + }; + + config { + pins = "gpio114", "gpio115"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c2_default: qup-i2c2-default { + mux { + pins = "gpio126", "gpio127"; + function = "qup2"; + }; + + config { + pins = "gpio126", "gpio127"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c3_default: qup-i2c3-default { + mux { + pins = "gpio144", "gpio145"; + function = "qup3"; + }; + + config { + pins = "gpio144", "gpio145"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c4_default: qup-i2c4-default { + mux { + pins = "gpio51", "gpio52"; + function = "qup4"; + }; + + config { + pins = "gpio51", "gpio52"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c5_default: qup-i2c5-default { + mux { + pins = "gpio121", "gpio122"; + function = "qup5"; + }; + + config { + pins = "gpio121", "gpio122"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c6_default: qup-i2c6-default { + mux { + pins = "gpio6", "gpio7"; + function = "qup6"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c7_default: qup-i2c7-default { + mux { + pins = "gpio98", "gpio99"; + function = "qup7"; + }; + + config { + pins = "gpio98", "gpio99"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c8_default: qup-i2c8-default { + mux { + pins = "gpio88", "gpio89"; + function = "qup8"; + }; + + config { + pins = "gpio88", "gpio89"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c9_default: qup-i2c9-default { + mux { + pins = "gpio39", "gpio40"; + function = "qup9"; + }; + + config { + pins = "gpio39", "gpio40"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c10_default: qup-i2c10-default { + mux { + pins = "gpio9", "gpio10"; + function = "qup10"; + }; + + config { + pins = "gpio9", "gpio10"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c11_default: qup-i2c11-default { + mux { + pins = "gpio94", "gpio95"; + function = "qup11"; + }; + + config { + pins = "gpio94", "gpio95"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c12_default: qup-i2c12-default { + mux { + pins = "gpio83", "gpio84"; + function = "qup12"; + }; + + config { + pins = "gpio83", "gpio84"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c13_default: qup-i2c13-default { + mux { + pins = "gpio43", "gpio44"; + function = "qup13"; + }; + + config { + pins = "gpio43", "gpio44"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c14_default: qup-i2c14-default { + mux { + pins = "gpio47", "gpio48"; + function = "qup14"; + }; + + config { + pins = "gpio47", "gpio48"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c15_default: qup-i2c15-default { + mux { + pins = "gpio27", "gpio28"; + function = "qup15"; + }; + + config { + pins = "gpio27", "gpio28"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c16_default: qup-i2c16-default { + mux { + pins = "gpio86", "gpio85"; + function = "qup16"; + }; + + config { + pins = "gpio86", "gpio85"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c17_default: qup-i2c17-default { + mux { + pins = "gpio55", "gpio56"; + function = "qup17"; + }; + + config { + pins = "gpio55", "gpio56"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c18_default: qup-i2c18-default { + mux { + pins = "gpio23", "gpio24"; + function = "qup18"; + }; + + config { + pins = "gpio23", "gpio24"; + drive-strength = <0x02>; + bias-disable; + }; + }; + + qup_i2c19_default: qup-i2c19-default { + mux { + pins = "gpio57", "gpio58"; + function = "qup19"; + }; + + config { + pins = "gpio57", "gpio58"; + drive-strength = <0x02>; + bias-disable; + }; + }; }; remoteproc_mpss: remoteproc@4080000 { @@ -1612,7 +2161,6 @@ <0 0x088e8000 0 0x10>; reg-names = "reg-base", "dp_com"; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -1634,6 +2182,7 @@ <0 0x088e9600 0 0x200>, <0 0x088e9800 0 0x200>, <0 0x088e9a00 0 0x100>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -1659,7 +2208,6 @@ compatible = "qcom,sm8150-qmp-usb3-uni-phy"; reg = <0 0x088eb000 0 0x200>; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -1679,6 +2227,7 @@ <0 0x088eb400 0 0x200>, <0 0x088eb800 0 0x800>, <0 0x088eb600 0 0x200>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; clock-names = "pipe0"; diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts index 5b4c5b08434c..cfc4d1febe0f 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts @@ -601,10 +601,6 @@ }; }; -&pm8150_rtc { - status = "okay"; -}; - &qupv3_id_0 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index 947e1accae3a..4c0de12aaba6 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -17,6 +17,7 @@ #include <dt-bindings/soc/qcom,rpmh-rsc.h> #include <dt-bindings/sound/qcom,q6afe.h> #include <dt-bindings/thermal/thermal.h> +#include <dt-bindings/clock/qcom,videocc-sm8250.h> / { interrupt-parent = <&intc>; @@ -279,7 +280,7 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>; }; psci { @@ -548,8 +549,6 @@ reg = <0 0x00880000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi14_default>; interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -576,8 +575,6 @@ reg = <0 0x00884000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi15_default>; interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -604,8 +601,6 @@ reg = <0 0x00888000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi16_default>; interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -632,8 +627,6 @@ reg = <0 0x0088c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi17_default>; interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -673,8 +666,6 @@ reg = <0 0x00890000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi18_default>; interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -714,8 +705,6 @@ reg = <0 0x00894000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi19_default>; interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -755,8 +744,6 @@ reg = <0 0x00980000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi0_default>; interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -783,8 +770,6 @@ reg = <0 0x00984000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi1_default>; interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -811,8 +796,6 @@ reg = <0 0x00988000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi2_default>; interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -852,8 +835,6 @@ reg = <0 0x0098c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi3_default>; interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -880,8 +861,6 @@ reg = <0 0x00990000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi4_default>; interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -908,8 +887,6 @@ reg = <0 0x00994000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi5_default>; interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -936,8 +913,6 @@ reg = <0 0x00998000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi6_default>; interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -977,8 +952,6 @@ reg = <0 0x0099c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi7_default>; interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1018,8 +991,6 @@ reg = <0 0x00a80000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi8_default>; interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1046,8 +1017,6 @@ reg = <0 0x00a84000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi9_default>; interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1074,8 +1043,6 @@ reg = <0 0x00a88000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi10_default>; interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1102,8 +1069,6 @@ reg = <0 0x00a8c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi11_default>; interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1130,8 +1095,6 @@ reg = <0 0x00a90000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi12_default>; interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -1171,8 +1134,6 @@ reg = <0 0x00a94000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_spi13_default>; interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; @@ -2097,12 +2058,11 @@ }; usb_1_qmpphy: phy@88e9000 { - compatible = "qcom,sm8250-qmp-usb3-phy"; + compatible = "qcom,sm8250-qmp-usb3-dp-phy"; reg = <0 0x088e9000 0 0x200>, - <0 0x088e8000 0 0x20>; - reg-names = "reg-base", "dp_com"; + <0 0x088e8000 0 0x40>, + <0 0x088ea000 0 0x200>; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -2116,25 +2076,39 @@ <&gcc GCC_USB3_PHY_PRIM_BCR>; reset-names = "phy", "common"; - usb_1_ssphy: lanes@88e9200 { + usb_1_ssphy: usb3-phy@88e9200 { reg = <0 0x088e9200 0 0x200>, <0 0x088e9400 0 0x200>, <0 0x088e9c00 0 0x400>, <0 0x088e9600 0 0x200>, <0 0x088e9800 0 0x200>, <0 0x088e9a00 0 0x100>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; clock-names = "pipe0"; clock-output-names = "usb3_phy_pipe_clk_src"; }; + + dp_phy: dp-phy@88ea200 { + reg = <0 0x088ea200 0 0x200>, + <0 0x088ea400 0 0x200>, + <0 0x088eac00 0 0x400>, + <0 0x088ea600 0 0x200>, + <0 0x088ea800 0 0x200>, + <0 0x088eaa00 0 0x100>; + #phy-cells = <0>; + #clock-cells = <1>; + clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "usb3_phy_pipe_clk_src"; + }; }; usb_2_qmpphy: phy@88eb000 { compatible = "qcom,sm8250-qmp-usb3-uni-phy"; reg = <0 0x088eb000 0 0x200>; status = "disabled"; - #clock-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -2153,6 +2127,7 @@ reg = <0 0x088eb200 0 0x200>, <0 0x088eb400 0 0x200>, <0 0x088eb800 0 0x800>; + #clock-cells = <0>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -2322,15 +2297,86 @@ }; }; + venus: video-codec@aa00000 { + compatible = "qcom,sm8250-venus"; + reg = <0 0x0aa00000 0 0x100000>; + interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&videocc MVS0C_GDSC>, + <&videocc MVS0_GDSC>, + <&rpmhpd SM8250_MX>; + power-domain-names = "venus", "vcodec0", "mx"; + operating-points-v2 = <&venus_opp_table>; + + clocks = <&gcc GCC_VIDEO_AXI0_CLK>, + <&videocc VIDEO_CC_MVS0C_CLK>, + <&videocc VIDEO_CC_MVS0_CLK>; + clock-names = "iface", "core", "vcodec0_core"; + + interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_VENUS_CFG>, + <&mmss_noc MASTER_VIDEO_P0 &mc_virt SLAVE_EBI_CH0>; + interconnect-names = "cpu-cfg", "video-mem"; + + iommus = <&apps_smmu 0x2100 0x0400>; + memory-region = <&video_mem>; + + resets = <&gcc GCC_VIDEO_AXI0_CLK_ARES>, + <&videocc VIDEO_CC_MVS0C_CLK_ARES>; + reset-names = "bus", "core"; + + video-decoder { + compatible = "venus-decoder"; + }; + + video-encoder { + compatible = "venus-encoder"; + }; + + venus_opp_table: venus-opp-table { + compatible = "operating-points-v2"; + + opp-720000000 { + opp-hz = /bits/ 64 <720000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-1014000000 { + opp-hz = /bits/ 64 <1014000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-1098000000 { + opp-hz = /bits/ 64 <1098000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-1332000000 { + opp-hz = /bits/ 64 <1332000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + videocc: clock-controller@abf0000 { + compatible = "qcom,sm8250-videocc"; + reg = <0 0x0abf0000 0 0x10000>; + clocks = <&gcc GCC_VIDEO_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&rpmhcc RPMH_CXO_CLK_A>; + mmcx-supply = <&mmcx_reg>; + clock-names = "iface", "bi_tcxo", "bi_tcxo_ao"; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + mdss: mdss@ae00000 { compatible = "qcom,sdm845-mdss"; reg = <0 0x0ae00000 0 0x1000>; reg-names = "mdss"; - interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>, - <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>, + interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>, <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>; - interconnect-names = "notused", "mdp0-mem", "mdp1-mem"; + interconnect-names = "mdp0-mem", "mdp1-mem"; power-domains = <&dispcc MDSS_GDSC>; @@ -2580,36 +2626,22 @@ dispcc: clock-controller@af00000 { compatible = "qcom,sm8250-dispcc"; - reg = <0 0x0af00000 0 0x20000>; + reg = <0 0x0af00000 0 0x10000>; mmcx-supply = <&mmcx_reg>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&dsi0_phy 0>, <&dsi0_phy 1>, <&dsi1_phy 0>, <&dsi1_phy 1>, - <0>, - <0>, - <0>, - <0>, - <0>, - <0>, - <0>, - <0>, - <&sleep_clk>; + <&dp_phy 0>, + <&dp_phy 1>; clock-names = "bi_tcxo", "dsi0_phy_pll_out_byteclk", "dsi0_phy_pll_out_dsiclk", "dsi1_phy_pll_out_byteclk", "dsi1_phy_pll_out_dsiclk", - "dp_link_clk_divsel_ten", - "dp_vco_divided_clk_src_mux", - "dptx1_phy_pll_link_clk", - "dptx1_phy_pll_vco_div_clk", - "dptx2_phy_pll_link_clk", - "dptx2_phy_pll_vco_div_clk", - "edp_phy_pll_link_clk", - "edp_phy_pll_vco_div_clk", - "sleep_clk"; + "dp_phy_pll_link_clk", + "dp_phy_pll_vco_div_clk"; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; @@ -2647,7 +2679,7 @@ #thermal-sensor-cells = <1>; }; - aoss_qmp: qmp@c300000 { + aoss_qmp: power-controller@c300000 { compatible = "qcom,sm8250-aoss-qmp"; reg = <0 0x0c300000 0 0x100000>; interrupts-extended = <&ipcc IPCC_CLIENT_AOP @@ -2689,7 +2721,7 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; - gpio-ranges = <&tlmm 0 0 180>; + gpio-ranges = <&tlmm 0 0 181>; wakeup-parent = <&pdc>; pri_mi2s_active: pri-mi2s-active { @@ -2983,304 +3015,324 @@ }; }; - qup_spi0_default: qup-spi0-default { - mux { - pins = "gpio28", "gpio29", - "gpio30", "gpio31"; - function = "qup0"; - }; + qup_spi0_cs: qup-spi0-cs { + pins = "gpio31"; + function = "qup0"; + }; - config { - pins = "gpio28", "gpio29", - "gpio30", "gpio31"; - drive-strength = <6>; - bias-disable; - }; + qup_spi0_cs_gpio: qup-spi0-cs-gpio { + pins = "gpio31"; + function = "gpio"; }; - qup_spi1_default: qup-spi1-default { - mux { - pins = "gpio4", "gpio5", - "gpio6", "gpio7"; - function = "qup1"; - }; + qup_spi0_data_clk: qup-spi0-data-clk { + pins = "gpio28", "gpio29", + "gpio30"; + function = "qup0"; + }; - config { - pins = "gpio4", "gpio5", - "gpio6", "gpio7"; - drive-strength = <6>; - bias-disable; - }; + qup_spi1_cs: qup-spi1-cs { + pins = "gpio7"; + function = "qup1"; }; - qup_spi2_default: qup-spi2-default { - mux { - pins = "gpio115", "gpio116", - "gpio117", "gpio118"; - function = "qup2"; - }; + qup_spi1_cs_gpio: qup-spi1-cs-gpio { + pins = "gpio7"; + function = "gpio"; + }; - config { - pins = "gpio115", "gpio116", - "gpio117", "gpio118"; - drive-strength = <6>; - bias-disable; - }; + qup_spi1_data_clk: qup-spi1-data-clk { + pins = "gpio4", "gpio5", + "gpio6"; + function = "qup1"; }; - qup_spi3_default: qup-spi3-default { - mux { - pins = "gpio119", "gpio120", - "gpio121", "gpio122"; - function = "qup3"; - }; + qup_spi2_cs: qup-spi2-cs { + pins = "gpio118"; + function = "qup2"; + }; - config { - pins = "gpio119", "gpio120", - "gpio121", "gpio122"; - drive-strength = <6>; - bias-disable; - }; + qup_spi2_cs_gpio: qup-spi2-cs-gpio { + pins = "gpio118"; + function = "gpio"; }; - qup_spi4_default: qup-spi4-default { - mux { - pins = "gpio8", "gpio9", - "gpio10", "gpio11"; - function = "qup4"; - }; + qup_spi2_data_clk: qup-spi2-data-clk { + pins = "gpio115", "gpio116", + "gpio117"; + function = "qup2"; + }; - config { - pins = "gpio8", "gpio9", - "gpio10", "gpio11"; - drive-strength = <6>; - bias-disable; - }; + qup_spi3_cs: qup-spi3-cs { + pins = "gpio122"; + function = "qup3"; }; - qup_spi5_default: qup-spi5-default { - mux { - pins = "gpio12", "gpio13", - "gpio14", "gpio15"; - function = "qup5"; - }; + qup_spi3_cs_gpio: qup-spi3-cs-gpio { + pins = "gpio122"; + function = "gpio"; + }; - config { - pins = "gpio12", "gpio13", - "gpio14", "gpio15"; - drive-strength = <6>; - bias-disable; - }; + qup_spi3_data_clk: qup-spi3-data-clk { + pins = "gpio119", "gpio120", + "gpio121"; + function = "qup3"; }; - qup_spi6_default: qup-spi6-default { - mux { - pins = "gpio16", "gpio17", - "gpio18", "gpio19"; - function = "qup6"; - }; + qup_spi4_cs: qup-spi4-cs { + pins = "gpio11"; + function = "qup4"; + }; - config { - pins = "gpio16", "gpio17", - "gpio18", "gpio19"; - drive-strength = <6>; - bias-disable; - }; + qup_spi4_cs_gpio: qup-spi4-cs-gpio { + pins = "gpio11"; + function = "gpio"; }; - qup_spi7_default: qup-spi7-default { - mux { - pins = "gpio20", "gpio21", - "gpio22", "gpio23"; - function = "qup7"; - }; + qup_spi4_data_clk: qup-spi4-data-clk { + pins = "gpio8", "gpio9", + "gpio10"; + function = "qup4"; + }; - config { - pins = "gpio20", "gpio21", - "gpio22", "gpio23"; - drive-strength = <6>; - bias-disable; - }; + qup_spi5_cs: qup-spi5-cs { + pins = "gpio15"; + function = "qup5"; }; - qup_spi8_default: qup-spi8-default { - mux { - pins = "gpio24", "gpio25", - "gpio26", "gpio27"; - function = "qup8"; - }; + qup_spi5_cs_gpio: qup-spi5-cs-gpio { + pins = "gpio15"; + function = "gpio"; + }; - config { - pins = "gpio24", "gpio25", - "gpio26", "gpio27"; - drive-strength = <6>; - bias-disable; - }; + qup_spi5_data_clk: qup-spi5-data-clk { + pins = "gpio12", "gpio13", + "gpio14"; + function = "qup5"; }; - qup_spi9_default: qup-spi9-default { - mux { - pins = "gpio125", "gpio126", - "gpio127", "gpio128"; - function = "qup9"; - }; + qup_spi6_cs: qup-spi6-cs { + pins = "gpio19"; + function = "qup6"; + }; - config { - pins = "gpio125", "gpio126", - "gpio127", "gpio128"; - drive-strength = <6>; - bias-disable; - }; + qup_spi6_cs_gpio: qup-spi6-cs-gpio { + pins = "gpio19"; + function = "gpio"; }; - qup_spi10_default: qup-spi10-default { - mux { - pins = "gpio129", "gpio130", - "gpio131", "gpio132"; - function = "qup10"; - }; + qup_spi6_data_clk: qup-spi6-data-clk { + pins = "gpio16", "gpio17", + "gpio18"; + function = "qup6"; + }; - config { - pins = "gpio129", "gpio130", - "gpio131", "gpio132"; - drive-strength = <6>; - bias-disable; - }; + qup_spi7_cs: qup-spi7-cs { + pins = "gpio23"; + function = "qup7"; }; - qup_spi11_default: qup-spi11-default { - mux { - pins = "gpio60", "gpio61", - "gpio62", "gpio63"; - function = "qup11"; - }; + qup_spi7_cs_gpio: qup-spi7-cs-gpio { + pins = "gpio23"; + function = "gpio"; + }; - config { - pins = "gpio60", "gpio61", - "gpio62", "gpio63"; - drive-strength = <6>; - bias-disable; - }; + qup_spi7_data_clk: qup-spi7-data-clk { + pins = "gpio20", "gpio21", + "gpio22"; + function = "qup7"; }; - qup_spi12_default: qup-spi12-default { - mux { - pins = "gpio32", "gpio33", - "gpio34", "gpio35"; - function = "qup12"; - }; + qup_spi8_cs: qup-spi8-cs { + pins = "gpio27"; + function = "qup8"; + }; - config { - pins = "gpio32", "gpio33", - "gpio34", "gpio35"; - drive-strength = <6>; - bias-disable; - }; + qup_spi8_cs_gpio: qup-spi8-cs-gpio { + pins = "gpio27"; + function = "gpio"; }; - qup_spi13_default: qup-spi13-default { - mux { - pins = "gpio36", "gpio37", - "gpio38", "gpio39"; - function = "qup13"; - }; + qup_spi8_data_clk: qup-spi8-data-clk { + pins = "gpio24", "gpio25", + "gpio26"; + function = "qup8"; + }; - config { - pins = "gpio36", "gpio37", - "gpio38", "gpio39"; - drive-strength = <6>; - bias-disable; - }; + qup_spi9_cs: qup-spi9-cs { + pins = "gpio128"; + function = "qup9"; }; - qup_spi14_default: qup-spi14-default { - mux { - pins = "gpio40", "gpio41", - "gpio42", "gpio43"; - function = "qup14"; - }; + qup_spi9_cs_gpio: qup-spi9-cs-gpio { + pins = "gpio128"; + function = "gpio"; + }; - config { - pins = "gpio40", "gpio41", - "gpio42", "gpio43"; - drive-strength = <6>; - bias-disable; - }; + qup_spi9_data_clk: qup-spi9-data-clk { + pins = "gpio125", "gpio126", + "gpio127"; + function = "qup9"; }; - qup_spi15_default: qup-spi15-default { - mux { - pins = "gpio44", "gpio45", - "gpio46", "gpio47"; - function = "qup15"; - }; + qup_spi10_cs: qup-spi10-cs { + pins = "gpio132"; + function = "qup10"; + }; - config { - pins = "gpio44", "gpio45", - "gpio46", "gpio47"; - drive-strength = <6>; - bias-disable; - }; + qup_spi10_cs_gpio: qup-spi10-cs-gpio { + pins = "gpio132"; + function = "gpio"; }; - qup_spi16_default: qup-spi16-default { - mux { - pins = "gpio48", "gpio49", - "gpio50", "gpio51"; - function = "qup16"; - }; + qup_spi10_data_clk: qup-spi10-data-clk { + pins = "gpio129", "gpio130", + "gpio131"; + function = "qup10"; + }; - config { - pins = "gpio48", "gpio49", - "gpio50", "gpio51"; - drive-strength = <6>; - bias-disable; - }; + qup_spi11_cs: qup-spi11-cs { + pins = "gpio63"; + function = "qup11"; }; - qup_spi17_default: qup-spi17-default { - mux { - pins = "gpio52", "gpio53", - "gpio54", "gpio55"; - function = "qup17"; - }; + qup_spi11_cs_gpio: qup-spi11-cs-gpio { + pins = "gpio63"; + function = "gpio"; + }; - config { - pins = "gpio52", "gpio53", - "gpio54", "gpio55"; - drive-strength = <6>; - bias-disable; - }; + qup_spi11_data_clk: qup-spi11-data-clk { + pins = "gpio60", "gpio61", + "gpio62"; + function = "qup11"; }; - qup_spi18_default: qup-spi18-default { - mux { - pins = "gpio56", "gpio57", - "gpio58", "gpio59"; - function = "qup18"; - }; + qup_spi12_cs: qup-spi12-cs { + pins = "gpio35"; + function = "qup12"; + }; - config { - pins = "gpio56", "gpio57", - "gpio58", "gpio59"; - drive-strength = <6>; - bias-disable; - }; + qup_spi12_cs_gpio: qup-spi12-cs-gpio { + pins = "gpio35"; + function = "gpio"; }; - qup_spi19_default: qup-spi19-default { - mux { - pins = "gpio0", "gpio1", - "gpio2", "gpio3"; - function = "qup19"; - }; + qup_spi12_data_clk: qup-spi12-data-clk { + pins = "gpio32", "gpio33", + "gpio34"; + function = "qup12"; + }; - config { - pins = "gpio0", "gpio1", - "gpio2", "gpio3"; - drive-strength = <6>; - bias-disable; - }; + qup_spi13_cs: qup-spi13-cs { + pins = "gpio39"; + function = "qup13"; + }; + + qup_spi13_cs_gpio: qup-spi13-cs-gpio { + pins = "gpio39"; + function = "gpio"; + }; + + qup_spi13_data_clk: qup-spi13-data-clk { + pins = "gpio36", "gpio37", + "gpio38"; + function = "qup13"; + }; + + qup_spi14_cs: qup-spi14-cs { + pins = "gpio43"; + function = "qup14"; + }; + + qup_spi14_cs_gpio: qup-spi14-cs-gpio { + pins = "gpio43"; + function = "gpio"; + }; + + qup_spi14_data_clk: qup-spi14-data-clk { + pins = "gpio40", "gpio41", + "gpio42"; + function = "qup14"; + }; + + qup_spi15_cs: qup-spi15-cs { + pins = "gpio47"; + function = "qup15"; + }; + + qup_spi15_cs_gpio: qup-spi15-cs-gpio { + pins = "gpio47"; + function = "gpio"; + }; + + qup_spi15_data_clk: qup-spi15-data-clk { + pins = "gpio44", "gpio45", + "gpio46"; + function = "qup15"; + }; + + qup_spi16_cs: qup-spi16-cs { + pins = "gpio51"; + function = "qup16"; + }; + + qup_spi16_cs_gpio: qup-spi16-cs-gpio { + pins = "gpio51"; + function = "gpio"; + }; + + qup_spi16_data_clk: qup-spi16-data-clk { + pins = "gpio48", "gpio49", + "gpio50"; + function = "qup16"; + }; + + qup_spi17_cs: qup-spi17-cs { + pins = "gpio55"; + function = "qup17"; + }; + + qup_spi17_cs_gpio: qup-spi17-cs-gpio { + pins = "gpio55"; + function = "gpio"; + }; + + qup_spi17_data_clk: qup-spi17-data-clk { + pins = "gpio52", "gpio53", + "gpio54"; + function = "qup17"; + }; + + qup_spi18_cs: qup-spi18-cs { + pins = "gpio59"; + function = "qup18"; + }; + + qup_spi18_cs_gpio: qup-spi18-cs-gpio { + pins = "gpio59"; + function = "gpio"; + }; + + qup_spi18_data_clk: qup-spi18-data-clk { + pins = "gpio56", "gpio57", + "gpio58"; + function = "qup18"; + }; + + qup_spi19_cs: qup-spi19-cs { + pins = "gpio3"; + function = "qup19"; + }; + + qup_spi19_cs_gpio: qup-spi19-cs-gpio { + pins = "gpio3"; + function = "gpio"; + }; + + qup_spi19_data_clk: qup-spi19-data-clk { + pins = "gpio0", "gpio1", + "gpio2"; + function = "qup19"; }; qup_uart2_default: qup-uart2-default { @@ -3754,7 +3806,7 @@ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, - <GIC_PPI 12 + <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts new file mode 100644 index 000000000000..f23a0cf3f7b7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2020-2021, Linaro Limited + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/regulator/qcom,rpmh-regulator.h> +#include "sm8350.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SM8350 HDK"; + compatible = "qcom,sm8350-hdk", "qcom,sm8350"; + + aliases { + serial0 = &uart2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + vph_pwr: vph-pwr-regulator { + compatible = "regulator-fixed"; + regulator-name = "vph_pwr"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + + regulator-always-on; + regulator-boot-on; + }; +}; + +&adsp { + status = "okay"; + firmware-name = "qcom/sm8350/adsp.mbn"; +}; + +&apps_rsc { + pm8350-rpmh-regulators { + compatible = "qcom,pm8350-rpmh-regulators"; + qcom,pmic-id = "b"; + + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + vdd-s3-supply = <&vph_pwr>; + vdd-s4-supply = <&vph_pwr>; + vdd-s5-supply = <&vph_pwr>; + vdd-s6-supply = <&vph_pwr>; + vdd-s7-supply = <&vph_pwr>; + vdd-s8-supply = <&vph_pwr>; + vdd-s9-supply = <&vph_pwr>; + vdd-s10-supply = <&vph_pwr>; + vdd-s11-supply = <&vph_pwr>; + vdd-s12-supply = <&vph_pwr>; + + vdd-l1-l4-supply = <&vreg_s11b_0p95>; + vdd-l2-l7-supply = <&vreg_bob>; + vdd-l3-l5-supply = <&vreg_bob>; + vdd-l6-l9-l10-supply = <&vreg_s11b_0p95>; + + vreg_s10b_1p8: smps10 { + regulator-name = "vreg_s10b_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_s11b_0p95: smps11 { + regulator-name = "vreg_s11b_0p95"; + regulator-min-microvolt = <952000>; + regulator-max-microvolt = <952000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_s12b_1p25: smps12 { + regulator-name = "vreg_s12b_1p25"; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l1b_0p88: ldo1 { + regulator-name = "vreg_l1b_0p88"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l2b_3p07: ldo2 { + regulator-name = "vreg_l2b_3p07"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l3b_0p9: ldo3 { + regulator-name = "vreg_l3b_0p9"; + regulator-min-microvolt = <904000>; + regulator-max-microvolt = <904000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l5b_0p88: ldo5 { + regulator-name = "vreg_l5b_0p88"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <888000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-allow-set-load; + }; + + vreg_l6b_1p2: ldo6 { + regulator-name = "vreg_l6b_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1208000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-allow-set-load; + }; + + vreg_l7b_2p96: ldo7 { + regulator-name = "vreg_l7b_2p96"; + regulator-min-microvolt = <2504000>; + regulator-max-microvolt = <2504000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-allow-set-load; + }; + + vreg_l9b_1p2: ldo9 { + regulator-name = "vreg_l9b_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + regulator-allow-set-load; + }; + }; + + pm8350c-rpmh-regulators { + compatible = "qcom,pm8350c-rpmh-regulators"; + qcom,pmic-id = "c"; + + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + vdd-s3-supply = <&vph_pwr>; + vdd-s4-supply = <&vph_pwr>; + vdd-s5-supply = <&vph_pwr>; + vdd-s6-supply = <&vph_pwr>; + vdd-s7-supply = <&vph_pwr>; + vdd-s8-supply = <&vph_pwr>; + vdd-s9-supply = <&vph_pwr>; + vdd-s10-supply = <&vph_pwr>; + + vdd-l1-l12-supply = <&vreg_s1c_1p86>; + vdd-l2-l8-supply = <&vreg_s1c_1p86>; + vdd-l3-l4-l5-l7-l13-supply = <&vreg_bob>; + vdd-l6-l9-l11-supply = <&vreg_bob>; + vdd-l10-supply = <&vreg_s12b_1p25>; + + vdd-bob-supply = <&vph_pwr>; + + vreg_s1c_1p86: smps1 { + regulator-name = "vreg_s1c_1p86"; + regulator-min-microvolt = <1856000>; + regulator-max-microvolt = <1880000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_bob: bob { + regulator-name = "vreg_bob"; + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3960000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>; + }; + + vreg_l1c_1p8: ldo1 { + regulator-name = "vreg_l1c_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l2c_1p8: ldo2 { + regulator-name = "vreg_l2c_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l6c_1p8: ldo6 { + regulator-name = "vreg_l6c_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2960000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l9c_2p96: ldo9 { + regulator-name = "vreg_l9c_2p96"; + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <3008000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + + vreg_l10c_1p2: ldo10 { + regulator-name = "vreg_l10c_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; + }; + }; +}; + +&cdsp { + status = "okay"; + firmware-name = "qcom/sm8350/cdsp.mbn"; +}; + +&mpss { + status = "okay"; + firmware-name = "qcom/sm8350/modem.mbn"; +}; + +&qupv3_id_1 { + status = "okay"; +}; + +&slpi { + status = "okay"; + firmware-name = "qcom/sm8350/slpi.mbn"; +}; + +&tlmm { + gpio-reserved-ranges = <52 8>; +}; + +&uart2 { + status = "okay"; +}; + +&ufs_mem_hc { + status = "okay"; + + reset-gpios = <&tlmm 203 GPIO_ACTIVE_LOW>; + + vcc-supply = <&vreg_l7b_2p96>; + vcc-max-microamp = <800000>; + vccq-supply = <&vreg_l9b_1p2>; + vccq-max-microamp = <900000>; +}; + +&ufs_mem_phy { + status = "okay"; + + vdda-phy-supply = <&vreg_l5b_0p88>; + vdda-max-microamp = <91600>; + vdda-pll-supply = <&vreg_l6b_1p2>; + vdda-pll-max-microamp = <19000>; +}; + +&usb_1 { + status = "okay"; +}; + +&usb_1_dwc3 { + /* TODO: Define USB-C connector properly */ + dr_mode = "peripheral"; +}; + +&usb_1_hsphy { + status = "okay"; + + vdda-pll-supply = <&vreg_l5b_0p88>; + vdda18-supply = <&vreg_l1c_1p8>; + vdda33-supply = <&vreg_l2b_3p07>; +}; + +&usb_1_qmpphy { + status = "okay"; + + vdda-phy-supply = <&vreg_l6b_1p2>; + vdda-pll-supply = <&vreg_l1b_0p88>; +}; + +&usb_2 { + status = "okay"; +}; + +&usb_2_dwc3 { + dr_mode = "host"; + + pinctrl-names = "default"; + pinctrl-0 = <&usb_hub_enabled_state>; +}; + +&usb_2_hsphy { + status = "okay"; + + vdda-pll-supply = <&vreg_l5b_0p88>; + vdda18-supply = <&vreg_l1c_1p8>; + vdda33-supply = <&vreg_l2b_3p07>; +}; + +&usb_2_qmpphy { + status = "okay"; + + vdda-phy-supply = <&vreg_l6b_1p2>; + vdda-pll-supply = <&vreg_l5b_0p88>; +}; + +/* PINCTRL - additions to nodes defined in sm8350.dtsi */ + +&tlmm { + usb_hub_enabled_state: usb-hub-enabled-state { + pins = "gpio42"; + function = "gpio"; + + drive-strength = <2>; + output-low; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts index 8923657579fb..6ca638b4e321 100644 --- a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts @@ -5,8 +5,15 @@ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/regulator/qcom,rpmh-regulator.h> #include "sm8350.dtsi" +#include "pm8350.dtsi" +#include "pm8350b.dtsi" +#include "pm8350c.dtsi" +#include "pmk8350.dtsi" +#include "pmr735a.dtsi" +#include "pmr735b.dtsi" / { model = "Qualcomm Technologies, Inc. sm8350 MTP"; @@ -31,6 +38,11 @@ }; }; +&adsp { + status = "okay"; + firmware-name = "qcom/sm8350/adsp.mbn"; +}; + &apps_rsc { pm8350-rpmh-regulators { compatible = "qcom,pm8350-rpmh-regulators"; @@ -56,57 +68,67 @@ vdd-l8-supply = <&vreg_s2c_0p8>; vreg_s10b_1p8: smps10 { + regulator-name = "vreg_s10b_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; vreg_s11b_0p95: smps11 { + regulator-name = "vreg_s11b_0p95"; regulator-min-microvolt = <752000>; regulator-max-microvolt = <1000000>; }; vreg_s12b_1p25: smps12 { + regulator-name = "vreg_s12b_1p25"; regulator-min-microvolt = <1224000>; regulator-max-microvolt = <1360000>; }; vreg_l1b_0p88: ldo1 { + regulator-name = "vreg_l1b_0p88"; regulator-min-microvolt = <912000>; regulator-max-microvolt = <920000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l2b_3p07: ldo2 { + regulator-name = "vreg_l2b_3p07"; regulator-min-microvolt = <3072000>; regulator-max-microvolt = <3072000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l3b_0p9: ldo3 { + regulator-name = "vreg_l3b_0p9"; regulator-min-microvolt = <904000>; regulator-max-microvolt = <904000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l5b_0p88: ldo5 { + regulator-name = "vreg_l3b_0p9"; regulator-min-microvolt = <880000>; regulator-max-microvolt = <888000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l6b_1p2: ldo6 { + regulator-name = "vreg_l6b_1p2"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1208000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l7b_2p96: ldo7 { + regulator-name = "vreg_l7b_2p96"; regulator-min-microvolt = <2400000>; regulator-max-microvolt = <3008000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l9b_1p2: ldo9 { + regulator-name = "vreg_l9b_1p2"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; @@ -137,99 +159,116 @@ vdd-bob-supply = <&vph_pwr>; vreg_s1c_1p86: smps1 { + regulator-name = "vreg_s1c_1p86"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1952000>; }; vreg_s2c_0p8: smps2 { + regulator-name = "vreg_s2c_0p8"; regulator-min-microvolt = <640000>; regulator-max-microvolt = <1000000>; }; vreg_s10c_1p05: smps10 { + regulator-name = "vreg_s10c_1p05"; regulator-min-microvolt = <1048000>; regulator-max-microvolt = <1128000>; }; vreg_bob: bob { + regulator-name = "vreg_bob"; regulator-min-microvolt = <3008000>; regulator-max-microvolt = <3960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>; }; vreg_l1c_1p8: ldo1 { + regulator-name = "vreg_l1c_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l2c_1p8: ldo2 { + regulator-name = "vreg_l2c_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l3c_3p0: ldo3 { + regulator-name = "vreg_l3c_3p0"; regulator-min-microvolt = <3008000>; regulator-max-microvolt = <3008000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l4c_uim1: ldo4 { + regulator-name = "vreg_l4c_uim1"; regulator-min-microvolt = <1704000>; regulator-max-microvolt = <3000000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l5c_uim2: ldo5 { + regulator-name = "vreg_l5c_uim2"; regulator-min-microvolt = <1704000>; regulator-max-microvolt = <3000000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l6c_1p8: ldo6 { + regulator-name = "vreg_l6c_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l7c_3p0: ldo7 { + regulator-name = "vreg_l7c_3p0"; regulator-min-microvolt = <3008000>; regulator-max-microvolt = <3008000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l8c_1p8: ldo8 { + regulator-name = "vreg_l8c_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l9c_2p96: ldo9 { + regulator-name = "vreg_l9c_2p96"; regulator-min-microvolt = <2960000>; regulator-max-microvolt = <3008000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l10c_1p2: ldo10 { + regulator-name = "vreg_l10c_1p2"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l11c_2p96: ldo11 { + regulator-name = "vreg_l11c_2p96"; regulator-min-microvolt = <2400000>; regulator-max-microvolt = <3008000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l12c_1p8: ldo12 { + regulator-name = "vreg_l12c_1p8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2000000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; }; vreg_l13c_3p0: ldo13 { + regulator-name = "vreg_l13c_3p0"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; @@ -237,10 +276,25 @@ }; }; +&cdsp { + status = "okay"; + firmware-name = "qcom/sm8350/cdsp.mbn"; +}; + +&mpss { + status = "okay"; + firmware-name = "qcom/sm8350/modem.mbn"; +}; + &qupv3_id_1 { status = "okay"; }; +&slpi { + status = "okay"; + firmware-name = "qcom/sm8350/slpi.mbn"; +}; + &tlmm { gpio-reserved-ranges = <52 8>; }; @@ -248,3 +302,65 @@ &uart2 { status = "okay"; }; + +&ufs_mem_hc { + status = "okay"; + + reset-gpios = <&tlmm 203 GPIO_ACTIVE_LOW>; + + vcc-supply = <&vreg_l7b_2p96>; + vcc-max-microamp = <800000>; + vccq-supply = <&vreg_l9b_1p2>; + vccq-max-microamp = <900000>; +}; + +&ufs_mem_phy { + status = "okay"; + + vdda-phy-supply = <&vreg_l5b_0p88>; + vdda-max-microamp = <91600>; + vdda-pll-supply = <&vreg_l6b_1p2>; + vdda-pll-max-microamp = <19000>; +}; + +&usb_1 { + status = "okay"; +}; + +&usb_1_dwc3 { + dr_mode = "peripheral"; +}; + +&usb_1_hsphy { + status = "okay"; + + vdda-pll-supply = <&vreg_l5b_0p88>; + vdda18-supply = <&vreg_l1c_1p8>; + vdda33-supply = <&vreg_l2b_3p07>; +}; + +&usb_1_qmpphy { + status = "okay"; + + vdda-phy-supply = <&vreg_l6b_1p2>; + vdda-pll-supply = <&vreg_l1b_0p88>; +}; + +&usb_2 { + status = "okay"; +}; + +&usb_2_hsphy { + status = "okay"; + + vdda-pll-supply = <&vreg_l5b_0p88>; + vdda18-supply = <&vreg_l1c_1p8>; + vdda33-supply = <&vreg_l2b_3p07>; +}; + +&usb_2_qmpphy { + status = "okay"; + + vdda-phy-supply = <&vreg_l6b_1p2>; + vdda-pll-supply = <&vreg_l5b_0p88>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 5ef460458f5c..ed0b51bc03ea 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -1,14 +1,16 @@ // SPDX-License-Identifier: BSD-3-Clause /* - * Copyright (c) 2020, Linaro Limaited + * Copyright (c) 2020, Linaro Limited */ #include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/clock/qcom,gcc-sm8350.h> #include <dt-bindings/clock/qcom,rpmh.h> #include <dt-bindings/mailbox/qcom-ipcc.h> #include <dt-bindings/power/qcom-aoss-qmp.h> #include <dt-bindings/power/qcom-rpmpd.h> #include <dt-bindings/soc/qcom,rpmh-rsc.h> +#include <dt-bindings/thermal/thermal.h> / { interrupt-parent = <&intc>; @@ -43,6 +45,8 @@ reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&L2_0>; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; L2_0: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -58,6 +62,8 @@ reg = <0x0 0x100>; enable-method = "psci"; next-level-cache = <&L2_100>; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; L2_100: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -70,6 +76,8 @@ reg = <0x0 0x200>; enable-method = "psci"; next-level-cache = <&L2_200>; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; L2_200: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -82,6 +90,8 @@ reg = <0x0 0x300>; enable-method = "psci"; next-level-cache = <&L2_300>; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; L2_300: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -94,6 +104,8 @@ reg = <0x0 0x400>; enable-method = "psci"; next-level-cache = <&L2_400>; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; L2_400: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -106,6 +118,8 @@ reg = <0x0 0x500>; enable-method = "psci"; next-level-cache = <&L2_500>; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; L2_500: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -119,6 +133,8 @@ reg = <0x0 0x600>; enable-method = "psci"; next-level-cache = <&L2_600>; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; L2_600: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -131,6 +147,8 @@ reg = <0x0 0x700>; enable-method = "psci"; next-level-cache = <&L2_700>; + qcom,freq-domain = <&cpufreq_hw 2>; + #cooling-cells = <2>; L2_700: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; @@ -153,7 +171,7 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>; }; psci { @@ -257,6 +275,15 @@ no-map; }; + rmtfs_mem: memory@9b800000 { + compatible = "qcom,rmtfs-mem"; + reg = <0x0 0x9b800000 0x0 0x280000>; + no-map; + + qcom,client-id = <1>; + qcom,vmid = <15>; + }; + hyp_reserved_mem: memory@d0000000 { reg = <0x0 0xd0000000 0x0 0x800000>; no-map; @@ -294,6 +321,102 @@ hwlocks = <&tcsr_mutex 3>; }; + smp2p-adsp { + compatible = "qcom,smp2p"; + qcom,smem = <443>, <429>; + interrupts-extended = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <2>; + + smp2p_adsp_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_adsp_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-cdsp { + compatible = "qcom,smp2p"; + qcom,smem = <94>, <432>; + interrupts-extended = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <5>; + + smp2p_cdsp_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_cdsp_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-modem { + compatible = "qcom,smp2p"; + qcom,smem = <435>, <428>; + interrupts-extended = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <1>; + + smp2p_modem_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_modem_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-slpi { + compatible = "qcom,smp2p"; + qcom,smem = <481>, <430>; + interrupts-extended = <&ipcc IPCC_CLIENT_SLPI + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_SLPI + IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <3>; + + smp2p_slpi_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_slpi_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + soc: soc@0 { #address-cells = <2>; #size-cells = <2>; @@ -324,8 +447,8 @@ compatible = "qcom,geni-se-qup"; reg = <0x0 0x009c0000 0x0 0x6000>; clock-names = "m-ahb", "s-ahb"; - clocks = <&gcc 121>, - <&gcc 122>; + clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -335,7 +458,7 @@ compatible = "qcom,geni-debug-uart"; reg = <0 0x0098c000 0 0x4000>; clock-names = "se"; - clocks = <&gcc 83>; + clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; pinctrl-names = "default"; pinctrl-0 = <&qup_uart3_default_state>; interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>; @@ -345,12 +468,215 @@ }; }; + apps_smmu: iommu@15000000 { + compatible = "qcom,sm8350-smmu-500", "arm,mmu-500"; + reg = <0 0x15000000 0 0x100000>; + #iommu-cells = <2>; + #global-interrupts = <2>; + interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>; + }; + + config_noc: interconnect@1500000 { + compatible = "qcom,sm8350-config-noc"; + reg = <0 0x01500000 0 0xa580>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + mc_virt: interconnect@1580000 { + compatible = "qcom,sm8350-mc-virt"; + reg = <0 0x01580000 0 0x1000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + system_noc: interconnect@1680000 { + compatible = "qcom,sm8350-system-noc"; + reg = <0 0x01680000 0 0x1c200>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + aggre1_noc: interconnect@16e0000 { + compatible = "qcom,sm8350-aggre1-noc"; + reg = <0 0x016e0000 0 0x1f180>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + aggre2_noc: interconnect@1700000 { + compatible = "qcom,sm8350-aggre2-noc"; + reg = <0 0x01700000 0 0x33000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + mmss_noc: interconnect@1740000 { + compatible = "qcom,sm8350-mmss-noc"; + reg = <0 0x01740000 0 0x1f080>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + lpass_ag_noc: interconnect@3c40000 { + compatible = "qcom,sm8350-lpass-ag-noc"; + reg = <0 0x03c40000 0 0xf080>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + compute_noc: interconnect@a0c0000{ + compatible = "qcom,sm8350-compute-noc"; + reg = <0 0x0a0c0000 0 0xa180>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + tcsr_mutex: hwlock@1f40000 { compatible = "qcom,tcsr-mutex"; reg = <0x0 0x01f40000 0x0 0x40000>; #hwlock-cells = <1>; }; + mpss: remoteproc@4080000 { + compatible = "qcom,sm8350-mpss-pas"; + reg = <0x0 0x04080000 0x0 0x4040>; + + interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>, + <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 3 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 7 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", "handover", + "stop-ack", "shutdown-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&aoss_qmp AOSS_QMP_LS_MODEM>, + <&rpmhpd 0>, + <&rpmhpd 12>; + power-domain-names = "load_state", "cx", "mss"; + + interconnects = <&mc_virt 0 &mc_virt 1>; + + memory-region = <&pil_modem_mem>; + + qcom,smem-states = <&smp2p_modem_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_GLINK_QMP>; + interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>; + label = "modem"; + qcom,remote-pid = <1>; + }; + }; + pdc: interrupt-controller@b220000 { compatible = "qcom,sm8350-pdc", "qcom,pdc"; reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x60>; @@ -363,7 +689,29 @@ interrupt-controller; }; - aoss_qmp: qmp@c300000 { + tsens0: thermal-sensor@c222000 { + compatible = "qcom,sm8350-tsens", "qcom,tsens-v2"; + reg = <0 0x0c263000 0 0x1ff>, /* TM */ + <0 0x0c222000 0 0x8>; /* SROT */ + #qcom,sensors = <15>; + interrupts = <&pdc 26 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 28 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + + tsens1: thermal-sensor@c223000 { + compatible = "qcom,sm8350-tsens", "qcom,tsens-v2"; + reg = <0 0x0c265000 0 0x1ff>, /* TM */ + <0 0x0c223000 0 0x8>; /* SROT */ + #qcom,sensors = <14>; + interrupts = <&pdc 27 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 29 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + + aoss_qmp: power-controller@c300000 { compatible = "qcom,sm8350-aoss-qmp"; reg = <0 0x0c300000 0 0x100000>; interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP @@ -374,6 +722,24 @@ #power-domain-cells = <1>; }; + spmi_bus: spmi@c440000 { + compatible = "qcom,spmi-pmic-arb"; + reg = <0x0 0xc440000 0x0 0x1100>, + <0x0 0xc600000 0x0 0x2000000>, + <0x0 0xe600000 0x0 0x100000>, + <0x0 0xe700000 0x0 0xa0000>, + <0x0 0xc40a000 0x0 0x26000>; + reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; + interrupt-names = "periph_irq"; + interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>; + qcom,ee = <0>; + qcom,channel = <0>; + #address-cells = <2>; + #size-cells = <0>; + interrupt-controller; + #interrupt-cells = <4>; + }; + tlmm: pinctrl@f100000 { compatible = "qcom,sm8350-tlmm"; reg = <0 0x0f100000 0 0x300000>; @@ -382,7 +748,7 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; - gpio-ranges = <&tlmm 0 0 203>; + gpio-ranges = <&tlmm 0 0 204>; qup_uart3_default_state: qup-uart3-default-state { rx { @@ -396,6 +762,13 @@ }; }; + rng: rng@10d3000 { + compatible = "qcom,prng-ee"; + reg = <0 0x010d3000 0 0x1000>; + clocks = <&rpmhcc RPMH_HWKM_CLK>; + clock-names = "core"; + }; + intc: interrupt-controller@17a00000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; @@ -486,6 +859,1256 @@ clocks = <&xo_board>; }; + rpmhpd: power-controller { + compatible = "qcom,sm8350-rpmhpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmhpd_opp_table>; + + rpmhpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmhpd_opp_ret: opp1 { + opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>; + }; + + rpmhpd_opp_min_svs: opp2 { + opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>; + }; + + rpmhpd_opp_low_svs: opp3 { + opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>; + }; + + rpmhpd_opp_svs: opp4 { + opp-level = <RPMH_REGULATOR_LEVEL_SVS>; + }; + + rpmhpd_opp_svs_l1: opp5 { + opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>; + }; + + rpmhpd_opp_nom: opp6 { + opp-level = <RPMH_REGULATOR_LEVEL_NOM>; + }; + + rpmhpd_opp_nom_l1: opp7 { + opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>; + }; + + rpmhpd_opp_nom_l2: opp8 { + opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>; + }; + + rpmhpd_opp_turbo: opp9 { + opp-level = <RPMH_REGULATOR_LEVEL_TURBO>; + }; + + rpmhpd_opp_turbo_l1: opp10 { + opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>; + }; + }; + }; + + apps_bcm_voter: bcm_voter { + compatible = "qcom,bcm-voter"; + }; + }; + + cpufreq_hw: cpufreq@18591000 { + compatible = "qcom,sm8350-cpufreq-epss", "qcom,cpufreq-epss"; + reg = <0 0x18591000 0 0x1000>, + <0 0x18592000 0 0x1000>, + <0 0x18593000 0 0x1000>; + reg-names = "freq-domain0", "freq-domain1", "freq-domain2"; + + clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>; + clock-names = "xo", "alternate"; + + #freq-domain-cells = <1>; + }; + + ufs_mem_hc: ufshc@1d84000 { + compatible = "qcom,sm8350-ufshc", "qcom,ufshc", + "jedec,ufs-2.0"; + reg = <0 0x01d84000 0 0x3000>; + interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>; + phys = <&ufs_mem_phy_lanes>; + phy-names = "ufsphy"; + lanes-per-direction = <2>; + #reset-cells = <1>; + resets = <&gcc GCC_UFS_PHY_BCR>; + reset-names = "rst"; + + power-domains = <&gcc UFS_PHY_GDSC>; + + iommus = <&apps_smmu 0xe0 0x0>; + + clock-names = + "ref_clk", + "core_clk", + "bus_aggr_clk", + "iface_clk", + "core_clk_unipro", + "ref_clk", + "tx_lane0_sync_clk", + "rx_lane0_sync_clk", + "rx_lane1_sync_clk"; + clocks = + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_PHY_AXI_CLK>, + <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, + <&gcc GCC_UFS_PHY_AHB_CLK>, + <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; + freq-table-hz = + <75000000 300000000>, + <75000000 300000000>, + <0 0>, + <0 0>, + <75000000 300000000>, + <0 0>, + <0 0>, + <75000000 300000000>, + <75000000 300000000>; + status = "disabled"; + }; + + ufs_mem_phy: phy@1d87000 { + compatible = "qcom,sm8350-qmp-ufs-phy"; + reg = <0 0x01d87000 0 0xe10>; + #address-cells = <2>; + #size-cells = <2>; + #clock-cells = <1>; + ranges; + clock-names = "ref", + "ref_aux"; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; + + resets = <&ufs_mem_hc 0>; + reset-names = "ufsphy"; + status = "disabled"; + + ufs_mem_phy_lanes: lanes@1d87400 { + reg = <0 0x01d87400 0 0x108>, + <0 0x01d87600 0 0x1e0>, + <0 0x01d87c00 0 0x1dc>, + <0 0x01d87800 0 0x108>, + <0 0x01d87a00 0 0x1e0>; + #phy-cells = <0>; + #clock-cells = <0>; + }; + }; + + slpi: remoteproc@5c00000 { + compatible = "qcom,sm8350-slpi-pas"; + reg = <0 0x05c00000 0 0x4000>; + + interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>, + <&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_slpi_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&aoss_qmp AOSS_QMP_LS_SLPI>, + <&rpmhpd 4>, + <&rpmhpd 5>; + power-domain-names = "load_state", "lcx", "lmx"; + + memory-region = <&pil_slpi_mem>; + + qcom,smem-states = <&smp2p_slpi_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_SLPI + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_SLPI + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "slpi"; + qcom,remote-pid = <3>; + + }; + }; + + cdsp: remoteproc@98900000 { + compatible = "qcom,sm8350-cdsp-pas"; + reg = <0 0x098900000 0 0x1400000>; + + interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>, + <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&aoss_qmp AOSS_QMP_LS_CDSP>, + <&rpmhpd 0>, + <&rpmhpd 10>; + power-domain-names = "load_state", "cx", "mxc"; + + interconnects = <&compute_noc 1 &mc_virt 1>; + + memory-region = <&pil_cdsp_mem>; + + qcom,smem-states = <&smp2p_cdsp_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "cdsp"; + qcom,remote-pid = <5>; + }; + }; + + usb_1_hsphy: phy@88e3000 { + compatible = "qcom,sm8350-usb-hs-phy", + "qcom,usb-snps-hs-7nm-phy"; + reg = <0 0x088e3000 0 0x400>; + status = "disabled"; + #phy-cells = <0>; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; + }; + + usb_2_hsphy: phy@88e4000 { + compatible = "qcom,sm8250-usb-hs-phy", + "qcom,usb-snps-hs-7nm-phy"; + reg = <0 0x088e4000 0 0x400>; + status = "disabled"; + #phy-cells = <0>; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_SEC_BCR>; + }; + + usb_1_qmpphy: phy-wrapper@88e9000 { + compatible = "qcom,sm8350-qmp-usb3-phy"; + reg = <0 0x088e9000 0 0x200>, + <0 0x088e8000 0 0x20>; + reg-names = "reg-base", "dp_com"; + status = "disabled"; + #clock-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>; + clock-names = "aux", "ref_clk_src", "com_aux"; + + resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>, + <&gcc GCC_USB3_PHY_PRIM_BCR>; + reset-names = "phy", "common"; + + usb_1_ssphy: phy@88e9200 { + reg = <0 0x088e9200 0 0x200>, + <0 0x088e9400 0 0x200>, + <0 0x088e9c00 0 0x400>, + <0 0x088e9600 0 0x200>, + <0 0x088e9800 0 0x200>, + <0 0x088e9a00 0 0x100>; + #phy-cells = <0>; + #clock-cells = <1>; + clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "usb3_phy_pipe_clk_src"; + }; + }; + + usb_2_qmpphy: phy-wrapper@88eb000 { + compatible = "qcom,sm8350-qmp-usb3-uni-phy"; + reg = <0 0x088eb000 0 0x200>; + status = "disabled"; + #clock-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_USB3_SEC_CLKREF_EN>, + <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>; + clock-names = "aux", "ref_clk_src", "ref", "com_aux"; + + resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>, + <&gcc GCC_USB3_PHY_SEC_BCR>; + reset-names = "phy", "common"; + + usb_2_ssphy: phy@88ebe00 { + reg = <0 0x088ebe00 0 0x200>, + <0 0x088ec000 0 0x200>, + <0 0x088eb200 0 0x1100>; + #phy-cells = <0>; + #clock-cells = <1>; + clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "usb3_uni_phy_pipe_clk_src"; + }; + }; + + dc_noc: interconnect@90e0000 { + compatible = "qcom,sm8350-dc-noc"; + reg = <0 0x090c0000 0 0x4200>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + gem_noc: interconnect@9100000 { + compatible = "qcom,sm8350-gem-noc"; + reg = <0 0x09100000 0 0xb4000>; + #interconnect-cells = <1>; + qcom,bcm-voters = <&apps_bcm_voter>; + }; + + usb_1: usb@a6f8800 { + compatible = "qcom,sm8350-dwc3", "qcom,dwc3"; + reg = <0 0x0a6f8800 0 0x400>; + status = "disabled"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, + <&gcc GCC_USB30_PRIM_MASTER_CLK>, + <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>, + <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_PRIM_SLEEP_CLK>; + clock-names = "cfg_noc", "core", "iface", "mock_utmi", + "sleep"; + + assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_PRIM_MASTER_CLK>; + assigned-clock-rates = <19200000>, <200000000>; + + interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 14 IRQ_TYPE_EDGE_BOTH>, + <&pdc 15 IRQ_TYPE_EDGE_BOTH>, + <&pdc 17 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hs_phy_irq", "dp_hs_phy_irq", + "dm_hs_phy_irq", "ss_phy_irq"; + + power-domains = <&gcc USB30_PRIM_GDSC>; + + resets = <&gcc GCC_USB30_PRIM_BCR>; + + usb_1_dwc3: dwc3@a600000 { + compatible = "snps,dwc3"; + reg = <0 0x0a600000 0 0xcd00>; + interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; + iommus = <&apps_smmu 0x0 0x0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + phys = <&usb_1_hsphy>, <&usb_1_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; + + usb_2: usb@a8f8800 { + compatible = "qcom,sm8350-dwc3", "qcom,dwc3"; + reg = <0 0x0a8f8800 0 0x400>; + status = "disabled"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>, + <&gcc GCC_USB30_SEC_MASTER_CLK>, + <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>, + <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_SEC_SLEEP_CLK>, + <&gcc GCC_USB3_SEC_CLKREF_EN>; + clock-names = "cfg_noc", "core", "iface", "mock_utmi", + "sleep", "xo"; + + assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_SEC_MASTER_CLK>; + assigned-clock-rates = <19200000>, <200000000>; + + interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 12 IRQ_TYPE_EDGE_BOTH>, + <&pdc 13 IRQ_TYPE_EDGE_BOTH>, + <&pdc 16 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hs_phy_irq", "dp_hs_phy_irq", + "dm_hs_phy_irq", "ss_phy_irq"; + + power-domains = <&gcc USB30_SEC_GDSC>; + + resets = <&gcc GCC_USB30_SEC_BCR>; + + usb_2_dwc3: dwc3@a800000 { + compatible = "snps,dwc3"; + reg = <0 0x0a800000 0 0xcd00>; + interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; + iommus = <&apps_smmu 0x20 0x0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + phys = <&usb_2_hsphy>, <&usb_2_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; + + adsp: remoteproc@17300000 { + compatible = "qcom,sm8350-adsp-pas"; + reg = <0 0x17300000 0 0x100>; + + interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>, + <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&aoss_qmp AOSS_QMP_LS_LPASS>, + <&rpmhpd 4>, + <&rpmhpd 5>; + power-domain-names = "load_state", "lcx", "lmx"; + + memory-region = <&pil_adsp_mem>; + + qcom,smem-states = <&smp2p_adsp_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "lpass"; + qcom,remote-pid = <2>; + }; + }; + }; + + thermal-zones { + cpu0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 1>; + + trips { + cpu0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu0_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu0_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu0_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu0_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu1-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 2>; + + trips { + cpu1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu1_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu1_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu1_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu1_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu2-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 3>; + + trips { + cpu2_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu2_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu2_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu2_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu2_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu3-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 4>; + + trips { + cpu3_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu3_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu3_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu3_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu3_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu4-top-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 7>; + + trips { + cpu4_top_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu4_top_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu4_top_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu4_top_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu4_top_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu5-top-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 8>; + + trips { + cpu5_top_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu5_top_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu5_top_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu5_top_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu5_top_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu6-top-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 9>; + + trips { + cpu6_top_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu6_top_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu6_top_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu6_top_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu6_top_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu7-top-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 10>; + + trips { + cpu7_top_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu7_top_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu7_top_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu7_top_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu7_top_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu4-bottom-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 11>; + + trips { + cpu4_bottom_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu4_bottom_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu4_bottom_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu4_bottom_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu4_bottom_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu5-bottom-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 12>; + + trips { + cpu5_bottom_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu5_bottom_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu5_bottom_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu5_bottom_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu5_bottom_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu6-bottom-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 13>; + + trips { + cpu6_bottom_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu6_bottom_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu6_bottom_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu6_bottom_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu6_bottom_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu7-bottom-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 14>; + + trips { + cpu7_bottom_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu7_bottom_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu7_bottom_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu7_bottom_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu7_bottom_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + aoss0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 0>; + + trips { + aoss0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + cluster0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 5>; + + trips { + cluster0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + cluster0_crit: cluster0_crit { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + + cluster1-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 6>; + + trips { + cluster1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + cluster1_crit: cluster1_crit { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + + aoss1-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 0>; + + trips { + aoss1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + gpu-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 1>; + + trips { + gpu1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <1000>; + type = "hot"; + }; + }; + }; + + gpu-thermal-bottom { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 2>; + + trips { + gpu2_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <1000>; + type = "hot"; + }; + }; + }; + + nspss1-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 3>; + + trips { + nspss1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <1000>; + type = "hot"; + }; + }; + }; + + nspss2-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 4>; + + trips { + nspss2_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <1000>; + type = "hot"; + }; + }; + }; + + nspss3-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 5>; + + trips { + nspss3_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <1000>; + type = "hot"; + }; + }; + }; + + video-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 6>; + + trips { + video_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + mem-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 7>; + + trips { + mem_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + modem1-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 8>; + + trips { + modem1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + modem2-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 9>; + + trips { + modem2_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + modem3-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 10>; + + trips { + modem3_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + modem4-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 11>; + + trips { + modem4_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + camera-thermal-top { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 12>; + + trips { + camera1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + + camera-thermal-bottom { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens1 13>; + + trips { + camera2_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi index 30c169b08536..d8046fedf9c1 100644 --- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi +++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi @@ -615,7 +615,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -639,7 +639,7 @@ bitclock-master = <&rsnd_endpoint0>; frame-master = <&rsnd_endpoint0>; - playback = <&ssi1 &dvc1 &src1>; + playback = <&ssi1>, <&dvc1>, <&src1>; capture = <&ssi0>; }; }; diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi index 7a3da9b06f67..0c7e6f790590 100644 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi @@ -12,6 +12,9 @@ aliases { serial0 = &scif2; serial1 = &hscif0; + mmc0 = &sdhi3; + mmc1 = &sdhi0; + mmc2 = &sdhi2; }; chosen { diff --git a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi index 929f4a1d3f90..7fc0339a3ac9 100644 --- a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi @@ -80,7 +80,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; status = "okay"; diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts index 501cb05da228..3cf2e076940f 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts +++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts @@ -21,6 +21,9 @@ serial4 = &hscif2; serial5 = &scif5; ethernet0 = &avb; + mmc0 = &sdhi3; + mmc1 = &sdhi0; + mmc2 = &sdhi2; }; chosen { diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts index 71763f4402a7..3c0d59def8ee 100644 --- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts +++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts @@ -22,6 +22,9 @@ serial5 = &scif5; serial6 = &scif4; ethernet0 = &avb; + mmc0 = &sdhi3; + mmc1 = &sdhi0; + mmc2 = &sdhi2; }; chosen { diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts index ea87cb5a459c..4e72e4f2bab0 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts @@ -17,6 +17,8 @@ aliases { serial0 = &scif2; serial1 = &hscif2; + mmc0 = &sdhi0; + mmc1 = &sdhi3; }; chosen { @@ -351,7 +353,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -365,7 +367,7 @@ rcar_sound,dai { dai0 { - playback = <&ssi0 &src0 &dvc0>; + playback = <&ssi0>, <&src0>, <&dvc0>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts index 273f062f2909..7b6649a3ded0 100644 --- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts +++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts @@ -22,6 +22,9 @@ serial5 = &scif5; serial6 = &scif4; ethernet0 = &avb; + mmc0 = &sdhi3; + mmc1 = &sdhi0; + mmc2 = &sdhi2; }; chosen { diff --git a/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts index 2438825c9b22..3e3b954a4a9d 100644 --- a/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts @@ -52,29 +52,6 @@ status = "okay"; }; -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - &hdmi1 { status = "okay"; @@ -111,19 +88,7 @@ &rcar_sound { ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; + /* rsnd_port0/1 are described in salvator-common.dtsi */ rsnd_port2: port@2 { reg = <2>; rsnd_endpoint2: endpoint { diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi index d716c4386ae9..25b87da32eeb 100644 --- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi @@ -29,6 +29,11 @@ <&ipmmu_mp1 30>, <&ipmmu_mp1 31>; }; +&cluster0_opp { + /delete-node/ opp-1600000000; + /delete-node/ opp-1700000000; +}; + &du { renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts index a402a2fb6e3c..cf2165bdf625 100644 --- a/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts @@ -52,29 +52,6 @@ status = "okay"; }; -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - &hdmi1 { status = "okay"; @@ -111,19 +88,7 @@ &rcar_sound { ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; + /* rsnd_port0/1 are described in salvator-common.dtsi */ rsnd_port2: port@2 { reg = <2>; rsnd_endpoint2: endpoint { diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts index e5922329a4b8..37202fcdc35b 100644 --- a/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts @@ -57,29 +57,6 @@ status = "okay"; }; -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - &hdmi1 { status = "okay"; @@ -152,19 +129,7 @@ &rcar_sound { ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; + /* rsnd_port0/1 are described in salvator-common.dtsi */ rsnd_port2: port@2 { reg = <2>; rsnd_endpoint2: endpoint { diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts index ecfbeafeaf36..d5543f26c472 100644 --- a/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts @@ -35,49 +35,3 @@ clock-names = "du.0", "du.1", "du.2", "dclkin.0", "dclkin.1", "dclkin.2"; }; - -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - -&rcar_sound { - ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; - }; -}; - -&sound_card { - dais = <&rsnd_port0 /* ak4613 */ - &rsnd_port1>; /* HDMI0 */ -}; diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts index 249896a38fdc..9ebb47b6bf2d 100644 --- a/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts @@ -35,49 +35,3 @@ clock-names = "du.0", "du.1", "du.2", "dclkin.0", "dclkin.1", "dclkin.2"; }; - -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - -&rcar_sound { - ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; - }; -}; - -&sound_card { - dais = <&rsnd_port0 /* ak4613 */ - &rsnd_port1>; /* HDMI0 */ -}; diff --git a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts index 1e7603365106..c7f14177f7b9 100644 --- a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts @@ -40,49 +40,3 @@ clock-names = "du.0", "du.1", "du.2", "dclkin.0", "dclkin.1", "dclkin.2"; }; - -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - -&rcar_sound { - ports { - /* rsnd_port0 is on salvator-common */ - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; - }; -}; - -&sound_card { - dais = <&rsnd_port0 /* ak4613 */ - &rsnd_port1>; /* HDMI0 */ -}; diff --git a/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts index 7c6e60f6f32d..294a055f117e 100644 --- a/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts +++ b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts @@ -30,3 +30,14 @@ reg = <0x6 0x00000000 0x1 0x00000000>; }; }; + +&du { + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&versaclock5 1>, + <&versaclock5 3>, + <&versaclock5 2>; + clock-names = "du.0", "du.1", "du.2", + "dclkin.0", "dclkin.1", "dclkin.2"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi index e8c31ebec097..ab081f14af9a 100644 --- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi @@ -1155,13 +1155,35 @@ }; can0: can@e6c30000 { + compatible = "renesas,can-r8a77961", + "renesas,rcar-gen3-can"; reg = <0 0xe6c30000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 916>, + <&cpg CPG_CORE R8A77961_CLK_CANFD>, + <&can_clk>; + clock-names = "clkp1", "clkp2", "can_clk"; + assigned-clocks = <&cpg CPG_CORE R8A77961_CLK_CANFD>; + assigned-clock-rates = <40000000>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 916>; + status = "disabled"; }; can1: can@e6c38000 { + compatible = "renesas,can-r8a77961", + "renesas,rcar-gen3-can"; reg = <0 0xe6c38000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 915>, + <&cpg CPG_CORE R8A77961_CLK_CANFD>, + <&can_clk>; + clock-names = "clkp1", "clkp2", "can_clk"; + assigned-clocks = <&cpg CPG_CORE R8A77961_CLK_CANFD>; + assigned-clock-rates = <40000000>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 915>; + status = "disabled"; }; pwm0: pwm@e6e30000 { @@ -1397,43 +1419,259 @@ }; vin0: video@e6ef0000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef0000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 811>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 811>; + renesas,id = <0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin0csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin0>; + }; + vin0csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin0>; + }; + }; + }; }; vin1: video@e6ef1000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef1000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 810>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 810>; + renesas,id = <1>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin1csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin1>; + }; + vin1csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin1>; + }; + }; + }; }; vin2: video@e6ef2000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef2000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 809>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 809>; + renesas,id = <2>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin2csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin2>; + }; + vin2csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin2>; + }; + }; + }; }; vin3: video@e6ef3000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef3000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 808>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 808>; + renesas,id = <3>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin3csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin3>; + }; + vin3csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin3>; + }; + }; + }; }; vin4: video@e6ef4000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef4000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 807>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 807>; + renesas,id = <4>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin4csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin4>; + }; + vin4csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin4>; + }; + }; + }; }; vin5: video@e6ef5000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef5000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 806>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 806>; + renesas,id = <5>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin5csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin5>; + }; + vin5csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin5>; + }; + }; + }; }; vin6: video@e6ef6000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef6000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 805>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 805>; + renesas,id = <6>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin6csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin6>; + }; + vin6csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin6>; + }; + }; + }; }; vin7: video@e6ef7000 { + compatible = "renesas,vin-r8a77961"; reg = <0 0xe6ef7000 0 0x1000>; - /* placeholder */ + interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 804>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 804>; + renesas,id = <7>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin7csi20: endpoint@0 { + reg = <0>; + remote-endpoint = <&csi20vin7>; + }; + vin7csi40: endpoint@2 { + reg = <2>; + remote-endpoint = <&csi40vin7>; + }; + }; + }; }; rcar_sound: sound@ec500000 { @@ -2249,8 +2487,13 @@ }; csi20: csi2@fea80000 { + compatible = "renesas,r8a77961-csi2"; reg = <0 0xfea80000 0 0x10000>; - /* placeholder */ + interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 714>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 714>; + status = "disabled"; ports { #address-cells = <1>; @@ -2259,14 +2502,53 @@ port@1 { #address-cells = <1>; #size-cells = <0>; + reg = <1>; + + csi20vin0: endpoint@0 { + reg = <0>; + remote-endpoint = <&vin0csi20>; + }; + csi20vin1: endpoint@1 { + reg = <1>; + remote-endpoint = <&vin1csi20>; + }; + csi20vin2: endpoint@2 { + reg = <2>; + remote-endpoint = <&vin2csi20>; + }; + csi20vin3: endpoint@3 { + reg = <3>; + remote-endpoint = <&vin3csi20>; + }; + csi20vin4: endpoint@4 { + reg = <4>; + remote-endpoint = <&vin4csi20>; + }; + csi20vin5: endpoint@5 { + reg = <5>; + remote-endpoint = <&vin5csi20>; + }; + csi20vin6: endpoint@6 { + reg = <6>; + remote-endpoint = <&vin6csi20>; + }; + csi20vin7: endpoint@7 { + reg = <7>; + remote-endpoint = <&vin7csi20>; + }; }; }; }; csi40: csi2@feaa0000 { + compatible = "renesas,r8a77961-csi2"; reg = <0 0xfeaa0000 0 0x10000>; - /* placeholder */ + interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 716>; + power-domains = <&sysc R8A77961_PD_ALWAYS_ON>; + resets = <&cpg 716>; + status = "disabled"; ports { #address-cells = <1>; @@ -2277,7 +2559,41 @@ #size-cells = <0>; reg = <1>; + + csi40vin0: endpoint@0 { + reg = <0>; + remote-endpoint = <&vin0csi40>; + }; + csi40vin1: endpoint@1 { + reg = <1>; + remote-endpoint = <&vin1csi40>; + }; + csi40vin2: endpoint@2 { + reg = <2>; + remote-endpoint = <&vin2csi40>; + }; + csi40vin3: endpoint@3 { + reg = <3>; + remote-endpoint = <&vin3csi40>; + }; + csi40vin4: endpoint@4 { + reg = <4>; + remote-endpoint = <&vin4csi40>; + }; + csi40vin5: endpoint@5 { + reg = <5>; + remote-endpoint = <&vin5csi40>; + }; + csi40vin6: endpoint@6 { + reg = <6>; + remote-endpoint = <&vin6csi40>; + }; + csi40vin7: endpoint@7 { + reg = <7>; + remote-endpoint = <&vin7csi40>; + }; }; + }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts index 660a0240eec5..f84c64ed4df7 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts @@ -30,48 +30,3 @@ clock-names = "du.0", "du.1", "du.3", "dclkin.0", "dclkin.1", "dclkin.3"; }; - -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - -&rcar_sound { - ports { - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; - }; -}; - -&sound_card { - dais = <&rsnd_port0 /* ak4613 */ - &rsnd_port1>; /* HDMI0 */ -}; diff --git a/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts index d7e621101af7..729756c24c74 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts @@ -31,29 +31,6 @@ "dclkin.0", "dclkin.1", "dclkin.3"; }; -&hdmi0 { - status = "okay"; - - ports { - port@1 { - reg = <1>; - rcar_dw_hdmi0_out: endpoint { - remote-endpoint = <&hdmi0_con>; - }; - }; - port@2 { - reg = <2>; - dw_hdmi0_snd_in: endpoint { - remote-endpoint = <&rsnd_endpoint1>; - }; - }; - }; -}; - -&hdmi0_con { - remote-endpoint = <&rcar_dw_hdmi0_out>; -}; - &pca9654 { pcie-sata-switch-hog { gpio-hog; @@ -63,29 +40,7 @@ }; }; -&rcar_sound { - ports { - rsnd_port1: port@1 { - reg = <1>; - rsnd_endpoint1: endpoint { - remote-endpoint = <&dw_hdmi0_snd_in>; - - dai-format = "i2s"; - bitclock-master = <&rsnd_endpoint1>; - frame-master = <&rsnd_endpoint1>; - - playback = <&ssi2>; - }; - }; - }; -}; - /* SW12-7 must be set 'Off' (MD12 set to 1) which is not the default! */ &sata { status = "okay"; }; - -&sound_card { - dais = <&rsnd_port0 /* ak4613 */ - &rsnd_port1>; /* HDMI0 */ -}; diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi index ec7ca72399ec..1ffa4a995a7a 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi @@ -992,8 +992,8 @@ reg = <1>; - vin4csi41: endpoint@2 { - reg = <2>; + vin4csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin4>; }; }; @@ -1020,8 +1020,8 @@ reg = <1>; - vin5csi41: endpoint@2 { - reg = <2>; + vin5csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin5>; }; }; @@ -1048,8 +1048,8 @@ reg = <1>; - vin6csi41: endpoint@2 { - reg = <2>; + vin6csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin6>; }; }; @@ -1076,8 +1076,8 @@ reg = <1>; - vin7csi41: endpoint@2 { - reg = <2>; + vin7csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin7>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts index f74f8b9993f1..295d34f1d216 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts @@ -16,6 +16,9 @@ aliases { serial0 = &scif2; ethernet0 = &avb; + mmc0 = &sdhi3; + mmc1 = &sdhi0; + mmc2 = &sdhi1; }; chosen { @@ -621,7 +624,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -653,8 +656,8 @@ rcar_sound,dai { dai0 { - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + playback = <&ssi0>, <&src0>, <&dvc0>; + capture = <&ssi1>, <&src1>, <&dvc1>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi index fa284a7260d6..a0a1a1da0d87 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi @@ -6,12 +6,45 @@ */ #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/leds/common.h> + #include "r8a779a0.dtsi" / { model = "Renesas Falcon CPU board"; compatible = "renesas,falcon-cpu", "renesas,r8a779a0"; + aliases { + serial0 = &scif0; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + compatible = "gpio-leds"; + + led-1 { + gpios = <&gpio4 18 GPIO_ACTIVE_HIGH>; + color = <LED_COLOR_ID_GREEN>; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <1>; + }; + led-2 { + gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>; + color = <LED_COLOR_ID_GREEN>; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <2>; + }; + led-3 { + gpios = <&gpio4 20 GPIO_ACTIVE_HIGH>; + color = <LED_COLOR_ID_GREEN>; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <3>; + }; + }; + memory@48000000 { device_type = "memory"; /* first 128MB is reserved for secure area. */ @@ -52,22 +85,6 @@ }; }; -&avb0 { - pinctrl-0 = <&avb0_pins>; - pinctrl-names = "default"; - phy-handle = <&phy0>; - tx-internal-delay-ps = <2000>; - status = "okay"; - - phy0: ethernet-phy@0 { - rxc-skew-ps = <1500>; - reg = <0>; - interrupt-parent = <&gpio4>; - interrupts = <16 IRQ_TYPE_LEVEL_LOW>; - reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; - }; -}; - &extal_clk { clock-frequency = <16666666>; }; @@ -82,6 +99,13 @@ status = "okay"; clock-frequency = <400000>; + + eeprom@50 { + compatible = "rohm,br24g01", "atmel,24c01"; + label = "cpu-board"; + reg = <0x50>; + pagesize = <8>; + }; }; &i2c1 { @@ -121,24 +145,6 @@ pinctrl-0 = <&scif_clk_pins>; pinctrl-names = "default"; - avb0_pins: avb0 { - mux { - groups = "avb0_link", "avb0_mdio", "avb0_rgmii", "avb0_txcrefclk"; - function = "avb0"; - }; - - pins_mdio { - groups = "avb0_mdio"; - drive-strength = <21>; - }; - - pins_mii { - groups = "avb0_rgmii"; - drive-strength = <21>; - }; - - }; - i2c0_pins: i2c0 { groups = "i2c0"; function = "i2c0"; @@ -171,6 +177,11 @@ }; }; +&rwdt { + timeout-sec = <60>; + status = "okay"; +}; + &scif0 { pinctrl-0 = <&scif0_pins>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi new file mode 100644 index 000000000000..14d3db5d6c16 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the Falcon CSI/DSI sub-board + * + * Copyright (C) 2021 Glider bv + */ + +&i2c0 { + eeprom@52 { + compatible = "rohm,br24g01", "atmel,24c01"; + label = "csi-dsi-sub-board-id"; + reg = <0x52>; + pagesize = <8>; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi new file mode 100644 index 000000000000..e11bf9ace776 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the Falcon Ethernet sub-board + * + * Copyright (C) 2021 Glider bv + */ + +&i2c0 { + eeprom@53 { + compatible = "rohm,br24g01", "atmel,24c01"; + label = "ethernet-sub-board-id"; + reg = <0x53>; + pagesize = <8>; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts index 5617b81dd7dc..687f019e79f0 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts @@ -7,6 +7,8 @@ /dts-v1/; #include "r8a779a0-falcon-cpu.dtsi" +#include "r8a779a0-falcon-csi-dsi.dtsi" +#include "r8a779a0-falcon-ethernet.dtsi" / { model = "Renesas Falcon CPU and Breakout boards based on r8a779a0"; @@ -14,15 +16,51 @@ aliases { ethernet0 = &avb0; - serial0 = &scif0; }; +}; + +&avb0 { + pinctrl-0 = <&avb0_pins>; + pinctrl-names = "default"; + phy-handle = <&phy0>; + tx-internal-delay-ps = <2000>; + status = "okay"; - chosen { - stdout-path = "serial0:115200n8"; + phy0: ethernet-phy@0 { + rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio4>; + interrupts = <16 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; }; }; -&rwdt { - timeout-sec = <60>; - status = "okay"; +&i2c0 { + eeprom@51 { + compatible = "rohm,br24g01", "atmel,24c01"; + label = "breakout-board"; + reg = <0x51>; + pagesize = <8>; + }; +}; + +&pfc { + avb0_pins: avb0 { + mux { + groups = "avb0_link", "avb0_mdio", "avb0_rgmii", + "avb0_txcrefclk"; + function = "avb0"; + }; + + pins_mdio { + groups = "avb0_mdio"; + drive-strength = <21>; + }; + + pins_mii { + groups = "avb0_rgmii"; + drive-strength = <21>; + }; + + }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi index dfd6ae8b564f..70b3604e56cd 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi @@ -60,10 +60,7 @@ pmu_a76 { compatible = "arm,cortex-a76-pmu"; - interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>, - <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, - <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>, - <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>; }; /* External SCIF clock - to be overridden by boards that provide it */ @@ -239,6 +236,76 @@ #interrupt-cells = <2>; }; + cmt0: timer@e60f0000 { + compatible = "renesas,r8a779a0-cmt0", + "renesas,rcar-gen3-cmt0"; + reg = <0 0xe60f0000 0 0x1004>; + interrupts = <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 910>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 910>; + status = "disabled"; + }; + + cmt1: timer@e6130000 { + compatible = "renesas,r8a779a0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6130000 0 0x1004>; + interrupts = <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 911>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 911>; + status = "disabled"; + }; + + cmt2: timer@e6140000 { + compatible = "renesas,r8a779a0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6140000 0 0x1004>; + interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 912>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 912>; + status = "disabled"; + }; + + cmt3: timer@e6148000 { + compatible = "renesas,r8a779a0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6148000 0 0x1004>; + interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 913>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 913>; + status = "disabled"; + }; + cpg: clock-controller@e6150000 { compatible = "renesas,r8a779a0-cpg-mssr"; reg = <0 0xe6150000 0 0x4000>; @@ -260,6 +327,84 @@ #power-domain-cells = <1>; }; + tmu0: timer@e61e0000 { + compatible = "renesas,tmu-r8a779a0", "renesas,tmu"; + reg = <0 0xe61e0000 0 0x30>; + interrupts = <GIC_SPI 512 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 514 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 713>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 713>; + status = "disabled"; + }; + + tmu1: timer@e6fc0000 { + compatible = "renesas,tmu-r8a779a0", "renesas,tmu"; + reg = <0 0xe6fc0000 0 0x30>; + interrupts = <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 714>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 714>; + status = "disabled"; + }; + + tmu2: timer@e6fd0000 { + compatible = "renesas,tmu-r8a779a0", "renesas,tmu"; + reg = <0 0xe6fd0000 0 0x30>; + interrupts = <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 510 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 715>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 715>; + status = "disabled"; + }; + + tmu3: timer@e6fe0000 { + compatible = "renesas,tmu-r8a779a0", "renesas,tmu"; + reg = <0 0xe6fe0000 0 0x30>; + interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 716>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 716>; + status = "disabled"; + }; + + tmu4: timer@ffc00000 { + compatible = "renesas,tmu-r8a779a0", "renesas,tmu"; + reg = <0 0xffc00000 0 0x30>; + interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 717>; + clock-names = "fck"; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 717>; + status = "disabled"; + }; + + tsc: thermal@e6190000 { + compatible = "renesas,r8a779a0-thermal"; + reg = <0 0xe6190000 0 0x200>, + <0 0xe6198000 0 0x200>, + <0 0xe61a0000 0 0x200>, + <0 0xe61a8000 0 0x200>, + <0 0xe61b0000 0 0x200>; + clocks = <&cpg CPG_MOD 919>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 919>; + #thermal-sensor-cells = <1>; + }; + i2c0: i2c@e6500000 { compatible = "renesas,i2c-r8a779a0", "renesas,rcar-gen3-i2c"; @@ -954,12 +1099,122 @@ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; }; + fcpvd0: fcp@fea10000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea10000 0 0x200>; + clocks = <&cpg CPG_MOD 508>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 508>; + }; + + fcpvd1: fcp@fea11000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea11000 0 0x200>; + clocks = <&cpg CPG_MOD 509>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 509>; + }; + + vspd0: vsp@fea20000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea20000 0 0x5000>; + interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 830>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 830>; + + renesas,fcp = <&fcpvd0>; + }; + + vspd1: vsp@fea28000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea28000 0 0x5000>; + interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 831>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 831>; + + renesas,fcp = <&fcpvd1>; + }; + prr: chipid@fff00044 { compatible = "renesas,prr"; reg = <0 0xfff00044 0 4>; }; }; + thermal-zones { + sensor_thermal1: sensor-thermal1 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 0>; + + trips { + sensor1_crit: sensor1-crit { + temperature = <120000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + + sensor_thermal2: sensor-thermal2 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 1>; + + trips { + sensor2_crit: sensor2-crit { + temperature = <120000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + + sensor_thermal3: sensor-thermal3 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 2>; + + trips { + sensor3_crit: sensor3-crit { + temperature = <120000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + + sensor_thermal4: sensor-thermal4 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 3>; + + trips { + sensor4_crit: sensor4-crit { + temperature = <120000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + + sensor_thermal5: sensor-thermal5 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 4>; + + trips { + sensor5_crit: sensor5-crit { + temperature = <120000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index c22bb38994e8..e18747df219f 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -36,6 +36,9 @@ serial0 = &scif2; serial1 = &hscif1; ethernet0 = &avb; + mmc0 = &sdhi2; + mmc1 = &sdhi0; + mmc2 = &sdhi3; }; chosen { @@ -198,7 +201,8 @@ label = "rcar-sound"; - dais = <&rsnd_port0>; + dais = <&rsnd_port0 /* ak4613 */ + &rsnd_port1>; /* HDMI0 */ }; vbus0_usb2: regulator-vbus0-usb2 { @@ -316,6 +320,10 @@ }; }; +&a57_0 { + cpu-supply = <&dvfs>; +}; + &audio_clk_a { clock-frequency = <22579200>; }; @@ -394,6 +402,29 @@ clock-frequency = <32768>; }; +&hdmi0 { + status = "okay"; + + ports { + port@1 { + reg = <1>; + rcar_dw_hdmi0_out: endpoint { + remote-endpoint = <&hdmi0_con>; + }; + }; + port@2 { + reg = <2>; + dw_hdmi0_snd_in: endpoint { + remote-endpoint = <&rsnd_endpoint1>; + }; + }; + }; +}; + +&hdmi0_con { + remote-endpoint = <&rcar_dw_hdmi0_out>; +}; + &hscif1 { pinctrl-0 = <&hscif1_pins>; pinctrl-names = "default"; @@ -730,7 +761,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -773,8 +804,21 @@ bitclock-master = <&rsnd_endpoint0>; frame-master = <&rsnd_endpoint0>; - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + playback = <&ssi0>, <&src0>, <&dvc0>; + capture = <&ssi1>, <&src1>, <&dvc1>; + }; + }; + + rsnd_port1: port@1 { + reg = <1>; + rsnd_endpoint1: endpoint { + remote-endpoint = <&dw_hdmi0_snd_in>; + + dai-format = "i2s"; + bitclock-master = <&rsnd_endpoint1>; + frame-master = <&rsnd_endpoint1>; + + playback = <&ssi2>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi index e9ed2597f1c2..61bd4df09df0 100644 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi @@ -16,6 +16,7 @@ aliases { serial1 = &hscif0; serial2 = &scif1; + mmc2 = &sdhi3; }; clksndsel: clksndsel { diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index a04eae55dd6c..1f177af3eb9d 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -23,6 +23,8 @@ aliases { serial0 = &scif2; ethernet0 = &avb; + mmc0 = &sdhi2; + mmc1 = &sdhi0; }; chosen { @@ -136,6 +138,10 @@ }; }; +&a57_0 { + cpu-supply = <&dvfs>; +}; + &audio_clk_a { clock-frequency = <22579200>; }; @@ -365,7 +371,7 @@ }; &rcar_sound { - pinctrl-0 = <&sound_pins &sound_clk_pins>; + pinctrl-0 = <&sound_pins>, <&sound_clk_pins>; pinctrl-names = "default"; /* Single DAI */ @@ -408,8 +414,8 @@ bitclock-master = <&rsnd_for_ak4613>; frame-master = <&rsnd_for_ak4613>; - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + playback = <&ssi0>, <&src0>, <&dvc0>; + capture = <&ssi1>, <&src1>, <&dvc1>; }; }; rsnd_port1: port@1 { diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile index 62d3abc17a24..c3e00c0e2db7 100644 --- a/arch/arm64/boot/dts/rockchip/Makefile +++ b/arch/arm64/boot/dts/rockchip/Makefile @@ -36,6 +36,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4b.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-neo4.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-r4s.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinebook-pro.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi index 08b0b9fbcbc9..3429e124d95a 100644 --- a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi @@ -6,6 +6,11 @@ */ / { + aliases { + mmc1 = &sdmmc; + mmc2 = &sdio; + }; + vcc5v0_sys: vcc5v0-sys { compatible = "regulator-fixed"; regulator-name = "vcc5v0_sys"; /* +5V */ diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi index cdacd3483600..7249871530ab 100644 --- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi @@ -11,6 +11,10 @@ / { compatible = "engicam,px30-core", "rockchip,px30"; + + aliases { + mmc0 = &emmc; + }; }; &cpu0 { diff --git a/arch/arm64/boot/dts/rockchip/px30-evb.dts b/arch/arm64/boot/dts/rockchip/px30-evb.dts index 5fe905fae9a8..c1ce9c295e5b 100644 --- a/arch/arm64/boot/dts/rockchip/px30-evb.dts +++ b/arch/arm64/boot/dts/rockchip/px30-evb.dts @@ -13,6 +13,12 @@ model = "Rockchip PX30 EVB"; compatible = "rockchip,px30-evb", "rockchip,px30"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdio; + mmc2 = &emmc; + }; + chosen { stdout-path = "serial5:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi index c45b0cfcae09..09baa8a167ce 100644 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -25,9 +25,6 @@ i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; - mmc0 = &sdmmc; - mmc1 = &sdio; - mmc2 = &emmc; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -603,7 +600,7 @@ }; wdt: watchdog@ff1e0000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,px30-wdt", "snps,dw-wdt"; reg = <0x0 0xff1e0000 0x0 0x100>; clocks = <&cru PCLK_WDT_NS>; interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; @@ -984,6 +981,27 @@ status = "disabled"; }; + gpu_opp_table: opp-table2 { + compatible = "operating-points-v2"; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <950000>; + }; + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <975000>; + }; + opp-400000000 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <1050000>; + }; + opp-480000000 { + opp-hz = /bits/ 64 <480000000>; + opp-microvolt = <1125000>; + }; + }; + gpu: gpu@ff400000 { compatible = "rockchip,px30-mali", "arm,mali-bifrost"; reg = <0x0 0xff400000 0x0 0x4000>; @@ -994,6 +1012,7 @@ clocks = <&cru SCLK_GPU>; #cooling-cells = <2>; power-domains = <&power PX30_PD_GPU>; + operating-points-v2 = <&gpu_opp_table>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index 7a96be10eaf0..3dddd4742c3a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -9,6 +9,12 @@ / { model = "Firefly ROC-RK3308-CC board"; compatible = "firefly,roc-rk3308-cc", "rockchip,rk3308"; + + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi index 3a035a189450..0c5fa9801e6f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi @@ -24,9 +24,6 @@ i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; - mmc0 = &sdmmc; - mmc1 = &emmc; - mmc2 = &sdio; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -247,7 +244,7 @@ }; wdt: watchdog@ff080000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,rk3308-wdt", "snps,dw-wdt"; reg = <0x0 0xff080000 0x0 0x100>; clocks = <&cru PCLK_WDT>; interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts index 30c73ef25370..763cf9b4620e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts @@ -8,6 +8,12 @@ model = "A95X Z2"; compatible = "zkmagic,a95x-z2", "rockchip,rk3318"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdio; + mmc2 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; @@ -357,6 +363,11 @@ status = "okay"; }; +&usbdrd3 { + dr_mode = "host"; + status = "okay"; +}; + &usb_host0_ehci { status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts index 97fb93e1cc00..49c97f76df77 100644 --- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts @@ -14,6 +14,10 @@ model = "ODROID-GO Advance"; compatible = "hardkernel,rk3326-odroid-go2", "rockchip,rk3326"; + aliases { + mmc0 = &sdmmc; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts index 37f307cfa4cc..de2d3e88e27f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts @@ -8,6 +8,11 @@ model = "Beelink A1"; compatible = "azw,beelink-a1", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + /* * UART pins, as viewed with bottom of case removed: * diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts index a48767931af6..ff6b466e0e07 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts @@ -10,6 +10,12 @@ model = "Rockchip RK3328 EVB"; compatible = "rockchip,rk3328-evb", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdio; + mmc2 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts index faf496d789cf..f807bc066ccb 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts @@ -13,6 +13,10 @@ model = "FriendlyElec NanoPi R2S"; compatible = "friendlyarm,nanopi-r2s", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 19959bfba451..a05732b59f38 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -10,6 +10,11 @@ model = "Firefly roc-rk3328-cc"; compatible = "firefly,roc-rk3328-cc", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts index 2d71ca7e429c..c7e31efdd2e1 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts @@ -20,6 +20,11 @@ model = "Radxa ROCK Pi E"; compatible = "radxa,rockpi-e", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index c984662043da..3bef1f39bc6e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -10,6 +10,11 @@ model = "Pine64 Rock64"; compatible = "pine64,rock64", "rockchip,rk3328"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 063ed0adbec4..3ed69ecbcf3c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -27,9 +27,6 @@ i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; - mmc0 = &sdmmc; - mmc1 = &sdio; - mmc2 = &emmc; ethernet0 = &gmac2io; ethernet1 = &gmac2phy; }; @@ -438,7 +435,7 @@ }; wdt: watchdog@ff1a0000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,rk3328-wdt", "snps,dw-wdt"; reg = <0x0 0xff1a0000 0x0 0x100>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru PCLK_WDT>; @@ -919,8 +916,8 @@ "mac_clk_tx", "clk_mac_ref", "aclk_mac", "pclk_mac", "clk_macphy"; - resets = <&cru SRST_GMAC2PHY_A>, <&cru SRST_MACPHY>; - reset-names = "stmmaceth", "mac-phy"; + resets = <&cru SRST_GMAC2PHY_A>; + reset-names = "stmmaceth"; phy-mode = "rmii"; phy-handle = <&phy>; snps,txpbl = <0x4>; @@ -980,6 +977,25 @@ status = "disabled"; }; + usbdrd3: usb@ff600000 { + compatible = "rockchip,rk3328-dwc3", "snps,dwc3"; + reg = <0x0 0xff600000 0x0 0x100000>; + interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_USB3OTG_REF>, <&cru SCLK_USB3OTG_SUSPEND>, + <&cru ACLK_USB3OTG>; + clock-names = "ref_clk", "suspend_clk", + "bus_clk"; + dr_mode = "otg"; + phy_type = "utmi_wide"; + snps,dis-del-phy-power-chg-quirk; + snps,dis_enblslpm_quirk; + snps,dis-tx-ipgap-linecheck-quirk; + snps,dis-u2-freeclk-exists-quirk; + snps,dis_u2_susphy_quirk; + snps,dis_u3_susphy_quirk; + status = "disabled"; + }; + gic: interrupt-controller@ff811000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi index 87fabc64cc39..15d1fc541c38 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi @@ -8,6 +8,10 @@ #include "rk3368.dtsi" / { + aliases { + mmc0 = &emmc; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts index 46357d1d77cd..62aa97a0b8c9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts @@ -11,6 +11,10 @@ model = "GeekBox"; compatible = "geekbuying,geekbox", "rockchip,rk3368"; + aliases { + mmc0 = &emmc; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts index 7fcb1eacea8a..cae01d35b93d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts @@ -10,6 +10,10 @@ model = "Theobroma Systems RK3368-uQ7 Baseboard"; compatible = "tsd,rk3368-lion-haikou", "rockchip,rk3368"; + aliases { + mmc1 = &sdmmc; + }; + chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index 24d28be4736c..bcd7977fb0f8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -7,6 +7,10 @@ #include "rk3368.dtsi" / { + aliases { + mmc0 = &emmc; + }; + chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts index ecce16ecc9c3..3ebe15e03cf4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts @@ -11,6 +11,11 @@ model = "Rockchip Orion R68"; compatible = "tronsmart,orion-r68-meta", "rockchip,rk3368"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts index 5ffd7b4d3036..5ccaa5f7a370 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts @@ -11,6 +11,11 @@ model = "Rockchip PX5 EVB"; compatible = "rockchip,px5-evb", "rockchip,px5", "rockchip,rk3368"; + aliases { + mmc0 = &sdmmc; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial4:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts index 2582fa4b90e2..959d3cc801f2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts @@ -11,6 +11,11 @@ model = "Rockchip R88"; compatible = "rockchip,r88", "rockchip,rk3368"; + aliases { + mmc0 = &sdio0; + mmc1 = &emmc; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 7af68ec3feae..dfc6376171d0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -25,9 +25,6 @@ i2c3 = &i2c3; i2c4 = &i2c4; i2c5 = &i2c5; - mmc0 = &sdmmc; - mmc1 = &sdio0; - mmc2 = &emmc; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -561,7 +558,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -572,7 +568,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -581,7 +576,6 @@ reg = <0x0 0xff680020 0x0 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -592,7 +586,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -667,7 +660,7 @@ status = "disabled"; }; - timer@ff810000 { + timer0: timer@ff810000 { compatible = "rockchip,rk3368-timer", "rockchip,rk3288-timer"; reg = <0x0 0xff810000 0x0 0x20>; interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts index 694b0d08d644..7b717ebec8ff 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts @@ -11,6 +11,10 @@ model = "Rockchip RK3399 Evaluation Board"; compatible = "rockchip,rk3399-evb", "rockchip,rk3399"; + aliases { + mmc0 = &sdhci; + }; + backlight: backlight { compatible = "pwm-backlight"; brightness-levels = < diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index 6db18808b9c5..45254be1350d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -13,6 +13,12 @@ model = "Firefly-RK3399 Board"; compatible = "firefly,firefly-rk3399", "rockchip,rk3399"; + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 32dcaf210085..4002742fed4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -10,6 +10,11 @@ #include "rk3399-op1-opp.dtsi" / { + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + chosen { stdout-path = "serial2:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts index 341d074ed996..bee45c17e2ca 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts @@ -9,6 +9,12 @@ model = "Hugsun X99 TV BOX"; compatible = "hugsun,x99", "rockchip,rk3399"; + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi index 635afdd99122..d5c7648c841d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi @@ -11,6 +11,12 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts index 66c725a34220..19485b552bc4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts @@ -18,6 +18,11 @@ model = "Kobol Helios64"; compatible = "kobol,helios64", "rockchip,rk3399"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + avdd_1v8_s0: avdd-1v8-s0 { compatible = "regulator-fixed"; regulator-name = "avdd_1v8_s0"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts index 1fa80ac15464..7c93f840bc64 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts @@ -13,6 +13,12 @@ model = "Leez RK3399 P710"; compatible = "leez,p710", "rockchip,rk3399"; + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts new file mode 100644 index 000000000000..fa5809887643 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * FriendlyElec NanoPC-T4 board device tree source + * + * Copyright (c) 2020 FriendlyElec Computer Tech. Co., Ltd. + * (http://www.friendlyarm.com) + * + * Copyright (c) 2018 Collabora Ltd. + * + * Copyright (c) 2020 Jensen Huang <jensenhuang@friendlyarm.com> + * Copyright (c) 2020 Marty Jones <mj8263788@gmail.com> + * Copyright (c) 2021 Tianling Shen <cnsztl@gmail.com> + */ + +/dts-v1/; +#include "rk3399-nanopi4.dtsi" + +/ { + model = "FriendlyElec NanoPi R4S"; + compatible = "friendlyarm,nanopi-r4s", "rockchip,rk3399"; + + /delete-node/ display-subsystem; + + gpio-leds { + pinctrl-0 = <&lan_led_pin>, <&sys_led_pin>, <&wan_led_pin>; + + /delete-node/ led-0; + + lan_led: led-lan { + gpios = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>; + label = "green:lan"; + }; + + sys_led: led-sys { + gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>; + label = "red:sys"; + default-state = "on"; + }; + + wan_led: led-wan { + gpios = <&gpio1 RK_PA0 GPIO_ACTIVE_HIGH>; + label = "green:wan"; + }; + }; + + gpio-keys { + pinctrl-0 = <&reset_button_pin>; + + /delete-node/ power; + + reset { + debounce-interval = <50>; + gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>; + label = "reset"; + linux,code = <KEY_RESTART>; + }; + }; + + vdd_5v: vdd-5v { + compatible = "regulator-fixed"; + regulator-name = "vdd_5v"; + regulator-always-on; + regulator-boot-on; + }; +}; + +&emmc_phy { + status = "disabled"; +}; + +&i2c4 { + status = "disabled"; +}; + +&pcie0 { + max-link-speed = <1>; + num-lanes = <1>; + vpcie3v3-supply = <&vcc3v3_sys>; +}; + +&pinctrl { + gpio-leds { + /delete-node/ status-led-pin; + + lan_led_pin: lan-led-pin { + rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + sys_led_pin: sys-led-pin { + rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + wan_led_pin: wan-led-pin { + rockchip,pins = <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + rockchip-key { + /delete-node/ power-key; + + reset_button_pin: reset-button-pin { + rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&sdhci { + status = "disabled"; +}; + +&sdio0 { + status = "disabled"; +}; + +&u2phy0_host { + phy-supply = <&vdd_5v>; +}; + +&u2phy1_host { + status = "disabled"; +}; + +&uart0 { + status = "disabled"; +}; + +&usbdrd_dwc3_0 { + dr_mode = "host"; +}; + +&vcc3v3_sys { + vin-supply = <&vcc5v0_sys>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi index 48ed4aaa37f3..16fd58c4a80f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi @@ -17,6 +17,12 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi index d6f1095abb04..da41cd81ebb7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi @@ -10,28 +10,28 @@ opp00 { opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1250000>; clock-latency-ns = <40000>; }; opp01 { opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1250000>; }; opp02 { opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <850000>; + opp-microvolt = <850000 850000 1250000>; }; opp03 { opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <925000>; + opp-microvolt = <925000 925000 1250000>; }; opp04 { opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <1000000>; + opp-microvolt = <1000000 1000000 1250000>; }; opp05 { opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1125000>; + opp-microvolt = <1125000 1125000 1250000>; }; }; @@ -41,36 +41,36 @@ opp00 { opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1250000>; clock-latency-ns = <40000>; }; opp01 { opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1250000>; }; opp02 { opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <825000>; + opp-microvolt = <825000 825000 1250000>; }; opp03 { opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <875000>; + opp-microvolt = <875000 875000 1250000>; }; opp04 { opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <950000>; + opp-microvolt = <950000 950000 1250000>; }; opp05 { opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1025000>; + opp-microvolt = <1025000 1025000 1250000>; }; opp06 { opp-hz = /bits/ 64 <1608000000>; - opp-microvolt = <1100000>; + opp-microvolt = <1100000 1100000 1250000>; }; opp07 { opp-hz = /bits/ 64 <1800000000>; - opp-microvolt = <1200000>; + opp-microvolt = <1200000 1200000 1250000>; }; }; @@ -79,27 +79,27 @@ opp00 { opp-hz = /bits/ 64 <200000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1150000>; }; opp01 { opp-hz = /bits/ 64 <297000000>; - opp-microvolt = <800000>; + opp-microvolt = <825000 825000 1150000>; }; opp02 { opp-hz = /bits/ 64 <400000000>; - opp-microvolt = <825000>; + opp-microvolt = <825000 825000 1150000>; }; opp03 { opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <875000>; + opp-microvolt = <875000 875000 1150000>; }; opp04 { opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <925000>; + opp-microvolt = <925000 925000 1150000>; }; opp05 { opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <1100000>; + opp-microvolt = <1100000 1100000 1150000>; }; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts index ad7c4d00888f..04b54abea3cc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts @@ -15,6 +15,12 @@ model = "Orange Pi RK3399 Board"; compatible = "rockchip,rk3399-orangepi", "rockchip,rk3399"; + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts index 219b7507a10f..2b5f001ff4a6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts @@ -18,6 +18,12 @@ model = "Pine64 Pinebook Pro"; compatible = "pine64,pinebook-pro", "rockchip,rk3399"; + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index a8d363568fd6..292bb7e80cf3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -10,6 +10,10 @@ model = "Theobroma Systems RK3399-Q7 SoM"; compatible = "tsd,rk3399-puma-haikou", "rockchip,rk3399"; + aliases { + mmc1 = &sdmmc; + }; + chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 4660416c8f38..fb67db4619ea 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -8,6 +8,10 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdhci; + }; + leds { compatible = "gpio-leds"; pinctrl-names = "default"; @@ -21,57 +25,6 @@ }; }; - /* - * Overwrite the opp-table for CPUB as this board uses a different - * regulator (FAN53555) that only allows 10mV steps and therefore - * can't reach the operation point target voltages from rk3399-opp.dtsi - */ - /delete-node/ opp-table1; - cluster1_opp: opp-table1 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <800000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <800000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <830000>; - opp-suspend; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <880000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <950000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1030000>; - }; - opp06 { - opp-hz = /bits/ 64 <1608000000>; - opp-microvolt = <1100000>; - }; - opp07 { - opp-hz = /bits/ 64 <1800000000>; - opp-microvolt = <1200000>; - }; - opp08 { - opp-hz = /bits/ 64 <1992000000>; - opp-microvolt = <1230000>; - turbo-mode; - }; - }; - clkin_gmac: external-gmac-clock { compatible = "fixed-clock"; clock-frequency = <125000000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts index 754627d97144..9447c8724b65 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts @@ -11,6 +11,10 @@ model = "Firefly ROC-RK3399-PC Mezzanine Board"; compatible = "firefly,roc-rk3399-pc-mezzanine", "rockchip,rk3399"; + aliases { + mmc2 = &sdio0; + }; + /* MP8009 PoE PD */ poe_12v: poe-12v { compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi index 20309076dbac..c172f5a803e7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi @@ -13,6 +13,11 @@ model = "Firefly ROC-RK3399-PC Board"; compatible = "firefly,roc-rk3399-pc", "rockchip,rk3399"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index fb7599f07af4..7d0a7c697703 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -11,6 +11,11 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts index f0055ce2fda0..6c63e617063c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts @@ -10,6 +10,10 @@ / { model = "Radxa ROCK Pi 4B"; compatible = "radxa,rockpi4b", "radxa,rockpi4", "rockchip,rk3399"; + + aliases { + mmc2 = &sdio0; + }; }; &sdio0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts index 4c7ebb1c5d2d..99169bcd51c0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts @@ -11,6 +11,10 @@ / { model = "Radxa ROCK Pi 4C"; compatible = "radxa,rockpi4c", "radxa,rockpi4", "rockchip,rk3399"; + + aliases { + mmc2 = &sdio0; + }; }; &sdio0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi index 5e3ac589bc54..25dc61c26a94 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi @@ -9,6 +9,12 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; clocks = <&rk808 1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi index 5ab0b9edfc88..6bff8db7d33e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi @@ -10,6 +10,12 @@ #include "rk3399-opp.dtsi" / { + aliases { + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; @@ -36,6 +42,13 @@ }; }; + ir-receiver { + compatible = "gpio-ir-receiver"; + gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>; + pinctrl-0 = <&ir_int>; + pinctrl-names = "default"; + }; + leds { compatible = "gpio-leds"; pinctrl-names = "default"; @@ -604,6 +617,12 @@ }; }; + ir { + ir_int: ir-int { + rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + leds { work_led_pin: work-led-pin { rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 73e269a8ae0c..f6b2199a42bd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -10,6 +10,10 @@ model = "Excavator-RK3399 Board"; compatible = "rockchip,rk3399-sapphire-excavator", "rockchip,rk3399"; + aliases { + mmc2 = &sdio0; + }; + adc-keys { compatible = "adc-keys"; io-channels = <&saradc 1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 701a567d7638..46b0f97a0b1c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -11,6 +11,11 @@ / { compatible = "rockchip,rk3399-sapphire", "rockchip,rk3399"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + chosen { stdout-path = "serial2:1500000n8"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index edbbf35fe19e..634a91af8e83 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -29,9 +29,6 @@ i2c6 = &i2c6; i2c7 = &i2c7; i2c8 = &i2c8; - mmc0 = &sdio0; - mmc1 = &sdmmc; - mmc2 = &sdhci; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -1185,7 +1182,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1196,7 +1192,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1207,7 +1202,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1218,7 +1212,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3a_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1531,7 +1524,7 @@ }; watchdog@ff848000 { - compatible = "snps,dw-wdt"; + compatible = "rockchip,rk3399-wdt", "snps,dw-wdt"; reg = <0x0 0xff848000 0x0 0x100>; clocks = <&cru PCLK_WDT>; interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH 0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi index 7257494d2831..c0074b3ed4af 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi @@ -12,6 +12,11 @@ / { compatible = "vamrs,rk3399pro-vmarc-som", "rockchip,rk3399pro"; + aliases { + mmc0 = &sdmmc; + mmc1 = &sdhci; + }; + vcc3v3_pcie: vcc-pcie-regulator { compatible = "regulator-fixed"; enable-active-high; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index a87b8a678719..8f2c1c1e2c64 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi @@ -734,7 +734,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index 0e52dadf54b3..be97da132258 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -564,7 +564,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; @@ -585,7 +585,7 @@ clocks = <&sys_clk 7>; reset-names = "ether"; resets = <&sys_rst 7>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 1>; diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile index 65506f21ba30..d56c742f5a10 100644 --- a/arch/arm64/boot/dts/ti/Makefile +++ b/arch/arm64/boot/dts/ti/Makefile @@ -3,11 +3,17 @@ # Make file to build device tree binaries for boards based on # Texas Instruments Inc processors # -# Copyright (C) 2016-2020 Texas Instruments Incorporated - https://www.ti.com/ +# Copyright (C) 2016-2021 Texas Instruments Incorporated - https://www.ti.com/ # dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board.dtb +dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic.dtb +dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced.dtb dtb-$(CONFIG_ARCH_K3) += k3-j721e-common-proc-board.dtb dtb-$(CONFIG_ARCH_K3) += k3-j7200-common-proc-board.dtb + +dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb + +dtb-$(CONFIG_ARCH_K3) += k3-am642-sk.dtb diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi new file mode 100644 index 000000000000..b2bcbf23eefd --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi @@ -0,0 +1,675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for AM642 SoC Family Main Domain peripherals + * + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +&cbass_main { + oc_sram: sram@70000000 { + compatible = "mmio-sram"; + reg = <0x00 0x70000000 0x00 0x200000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x70000000 0x200000>; + + atf-sram@0 { + reg = <0x0 0x1a000>; + }; + }; + + gic500: interrupt-controller@1800000 { + compatible = "arm,gic-v3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ + <0x00 0x01840000 0x00 0xC0000>; /* GICR */ + /* + * vcpumntirq: + * virtual CPU interface maintenance interrupt + */ + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>; + + gic_its: msi-controller@1820000 { + compatible = "arm,gic-v3-its"; + reg = <0x00 0x01820000 0x00 0x10000>; + socionext,synquacer-pre-its = <0x1000000 0x400000>; + msi-controller; + #msi-cells = <1>; + }; + }; + + dmss: dmss { + compatible = "simple-mfd"; + #address-cells = <2>; + #size-cells = <2>; + dma-ranges; + ranges; + + ti,sci-dev-id = <25>; + + secure_proxy_main: mailbox@4d000000 { + compatible = "ti,am654-secure-proxy"; + #mbox-cells = <1>; + reg-names = "target_data", "rt", "scfg"; + reg = <0x00 0x4d000000 0x00 0x80000>, + <0x00 0x4a600000 0x00 0x80000>, + <0x00 0x4a400000 0x00 0x80000>; + interrupt-names = "rx_012"; + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; + }; + + inta_main_dmss: interrupt-controller@48000000 { + compatible = "ti,sci-inta"; + reg = <0x00 0x48000000 0x00 0x100000>; + #interrupt-cells = <0>; + interrupt-controller; + interrupt-parent = <&gic500>; + msi-controller; + ti,sci = <&dmsc>; + ti,sci-dev-id = <28>; + ti,interrupt-ranges = <4 68 36>; + ti,unmapped-event-sources = <&main_bcdma>, <&main_pktdma>; + }; + + main_bcdma: dma-controller@485c0100 { + compatible = "ti,am64-dmss-bcdma"; + reg = <0x00 0x485c0100 0x00 0x100>, + <0x00 0x4c000000 0x00 0x20000>, + <0x00 0x4a820000 0x00 0x20000>, + <0x00 0x4aa40000 0x00 0x20000>, + <0x00 0x4bc00000 0x00 0x100000>; + reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt"; + msi-parent = <&inta_main_dmss>; + #dma-cells = <3>; + + ti,sci = <&dmsc>; + ti,sci-dev-id = <26>; + ti,sci-rm-range-bchan = <0x20>; /* BLOCK_COPY_CHAN */ + ti,sci-rm-range-rchan = <0x21>; /* SPLIT_TR_RX_CHAN */ + ti,sci-rm-range-tchan = <0x22>; /* SPLIT_TR_TX_CHAN */ + }; + + main_pktdma: dma-controller@485c0000 { + compatible = "ti,am64-dmss-pktdma"; + reg = <0x00 0x485c0000 0x00 0x100>, + <0x00 0x4a800000 0x00 0x20000>, + <0x00 0x4aa00000 0x00 0x40000>, + <0x00 0x4b800000 0x00 0x400000>; + reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt"; + msi-parent = <&inta_main_dmss>; + #dma-cells = <2>; + + ti,sci = <&dmsc>; + ti,sci-dev-id = <30>; + ti,sci-rm-range-tchan = <0x23>, /* UNMAPPED_TX_CHAN */ + <0x24>, /* CPSW_TX_CHAN */ + <0x25>, /* SAUL_TX_0_CHAN */ + <0x26>, /* SAUL_TX_1_CHAN */ + <0x27>, /* ICSSG_0_TX_CHAN */ + <0x28>; /* ICSSG_1_TX_CHAN */ + ti,sci-rm-range-tflow = <0x10>, /* RING_UNMAPPED_TX_CHAN */ + <0x11>, /* RING_CPSW_TX_CHAN */ + <0x12>, /* RING_SAUL_TX_0_CHAN */ + <0x13>, /* RING_SAUL_TX_1_CHAN */ + <0x14>, /* RING_ICSSG_0_TX_CHAN */ + <0x15>; /* RING_ICSSG_1_TX_CHAN */ + ti,sci-rm-range-rchan = <0x29>, /* UNMAPPED_RX_CHAN */ + <0x2b>, /* CPSW_RX_CHAN */ + <0x2d>, /* SAUL_RX_0_CHAN */ + <0x2f>, /* SAUL_RX_1_CHAN */ + <0x31>, /* SAUL_RX_2_CHAN */ + <0x33>, /* SAUL_RX_3_CHAN */ + <0x35>, /* ICSSG_0_RX_CHAN */ + <0x37>; /* ICSSG_1_RX_CHAN */ + ti,sci-rm-range-rflow = <0x2a>, /* FLOW_UNMAPPED_RX_CHAN */ + <0x2c>, /* FLOW_CPSW_RX_CHAN */ + <0x2e>, /* FLOW_SAUL_RX_0/1_CHAN */ + <0x32>, /* FLOW_SAUL_RX_2/3_CHAN */ + <0x36>, /* FLOW_ICSSG_0_RX_CHAN */ + <0x38>; /* FLOW_ICSSG_1_RX_CHAN */ + }; + }; + + dmsc: dmsc@44043000 { + compatible = "ti,k2g-sci"; + ti,host-id = <12>; + mbox-names = "rx", "tx"; + mboxes= <&secure_proxy_main 12>, + <&secure_proxy_main 13>; + reg-names = "debug_messages"; + reg = <0x00 0x44043000 0x00 0xfe0>; + + k3_pds: power-controller { + compatible = "ti,sci-pm-domain"; + #power-domain-cells = <2>; + }; + + k3_clks: clocks { + compatible = "ti,k2g-sci-clk"; + #clock-cells = <2>; + }; + + k3_reset: reset-controller { + compatible = "ti,sci-reset"; + #reset-cells = <2>; + }; + }; + + main_pmx0: pinctrl@f4000 { + compatible = "pinctrl-single"; + reg = <0x00 0xf4000 0x00 0x2d0>; + #pinctrl-cells = <1>; + pinctrl-single,register-width = <32>; + pinctrl-single,function-mask = <0xffffffff>; + }; + + main_conf: syscon@43000000 { + compatible = "syscon", "simple-mfd"; + reg = <0x00 0x43000000 0x00 0x20000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x00 0x00 0x43000000 0x20000>; + + chipid@14 { + compatible = "ti,am654-chipid"; + reg = <0x00000014 0x4>; + }; + + phy_gmii_sel: phy@4044 { + compatible = "ti,am654-phy-gmii-sel"; + reg = <0x4044 0x8>; + #phy-cells = <1>; + }; + }; + + main_uart0: serial@2800000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02800000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 146 0>; + clock-names = "fclk"; + }; + + main_uart1: serial@2810000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02810000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 152 0>; + clock-names = "fclk"; + }; + + main_uart2: serial@2820000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02820000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 153 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 153 0>; + clock-names = "fclk"; + }; + + main_uart3: serial@2830000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02830000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 154 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 154 0>; + clock-names = "fclk"; + }; + + main_uart4: serial@2840000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02840000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 155 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 155 0>; + clock-names = "fclk"; + }; + + main_uart5: serial@2850000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02850000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 156 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 156 0>; + clock-names = "fclk"; + }; + + main_uart6: serial@2860000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x02860000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 158 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 158 0>; + clock-names = "fclk"; + }; + + main_i2c0: i2c@20000000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x20000000 0x00 0x100>; + interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 102 2>; + clock-names = "fck"; + }; + + main_i2c1: i2c@20010000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x20010000 0x00 0x100>; + interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 103 2>; + clock-names = "fck"; + }; + + main_i2c2: i2c@20020000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x20020000 0x00 0x100>; + interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 104 2>; + clock-names = "fck"; + }; + + main_i2c3: i2c@20030000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x20030000 0x00 0x100>; + interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 105 2>; + clock-names = "fck"; + }; + + main_spi0: spi@20100000 { + compatible = "ti,am654-mcspi", "ti,omap4-mcspi"; + reg = <0x00 0x20100000 0x00 0x400>; + interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 141 0>; + dmas = <&main_pktdma 0xc300 0>, <&main_pktdma 0x4300 0>; + dma-names = "tx0", "rx0"; + }; + + main_spi1: spi@20110000 { + compatible = "ti,am654-mcspi","ti,omap4-mcspi"; + reg = <0x00 0x20110000 0x00 0x400>; + interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 142 0>; + }; + + main_spi2: spi@20120000 { + compatible = "ti,am654-mcspi","ti,omap4-mcspi"; + reg = <0x00 0x20120000 0x00 0x400>; + interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 143 0>; + }; + + main_spi3: spi@20130000 { + compatible = "ti,am654-mcspi","ti,omap4-mcspi"; + reg = <0x00 0x20130000 0x00 0x400>; + interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 144 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 144 0>; + }; + + main_spi4: spi@20140000 { + compatible = "ti,am654-mcspi","ti,omap4-mcspi"; + reg = <0x00 0x20140000 0x00 0x400>; + interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 145 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 145 0>; + }; + + main_gpio_intr: interrupt-controller0 { + compatible = "ti,sci-intr"; + ti,intr-trigger-type = <1>; + interrupt-controller; + interrupt-parent = <&gic500>; + #interrupt-cells = <1>; + ti,sci = <&dmsc>; + ti,sci-dev-id = <3>; + ti,interrupt-ranges = <0 32 16>; + }; + + main_gpio0: gpio@600000 { + compatible = "ti,am64-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00600000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <190>, <191>, <192>, + <193>, <194>, <195>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <87>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 77 0>; + clock-names = "gpio"; + }; + + main_gpio1: gpio@601000 { + compatible = "ti,am64-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00601000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <180>, <181>, <182>, + <183>, <184>, <185>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <88>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 78 0>; + clock-names = "gpio"; + }; + + sdhci0: mmc@fa10000 { + compatible = "ti,am64-sdhci-8bit"; + reg = <0x00 0xfa10000 0x00 0x260>, <0x00 0xfa18000 0x00 0x134>; + interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 57 0>, <&k3_clks 57 1>; + clock-names = "clk_ahb", "clk_xin"; + mmc-ddr-1_8v; + mmc-hs200-1_8v; + mmc-hs400-1_8v; + ti,trm-icp = <0x2>; + ti,otap-del-sel-legacy = <0x0>; + ti,otap-del-sel-mmc-hs = <0x0>; + ti,otap-del-sel-ddr52 = <0x6>; + ti,otap-del-sel-hs200 = <0x7>; + ti,otap-del-sel-hs400 = <0x4>; + }; + + sdhci1: mmc@fa00000 { + compatible = "ti,am64-sdhci-4bit"; + reg = <0x00 0xfa00000 0x00 0x260>, <0x00 0xfa08000 0x00 0x134>; + interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&k3_pds 58 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 58 3>, <&k3_clks 58 4>; + clock-names = "clk_ahb", "clk_xin"; + ti,trm-icp = <0x2>; + ti,otap-del-sel-legacy = <0x0>; + ti,otap-del-sel-sd-hs = <0xf>; + ti,otap-del-sel-sdr12 = <0xf>; + ti,otap-del-sel-sdr25 = <0xf>; + ti,otap-del-sel-sdr50 = <0xc>; + ti,otap-del-sel-sdr104 = <0x6>; + ti,otap-del-sel-ddr50 = <0x9>; + ti,clkbuf-sel = <0x7>; + }; + + cpsw3g: ethernet@8000000 { + compatible = "ti,am642-cpsw-nuss"; + #address-cells = <2>; + #size-cells = <2>; + reg = <0x0 0x8000000 0x0 0x200000>; + reg-names = "cpsw_nuss"; + ranges = <0x0 0x0 0x0 0x8000000 0x0 0x200000>; + clocks = <&k3_clks 13 0>; + assigned-clocks = <&k3_clks 13 1>; + assigned-clock-parents = <&k3_clks 13 9>; + clock-names = "fck"; + power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>; + + dmas = <&main_pktdma 0xC500 15>, + <&main_pktdma 0xC501 15>, + <&main_pktdma 0xC502 15>, + <&main_pktdma 0xC503 15>, + <&main_pktdma 0xC504 15>, + <&main_pktdma 0xC505 15>, + <&main_pktdma 0xC506 15>, + <&main_pktdma 0xC507 15>, + <&main_pktdma 0x4500 15>; + dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", + "tx7", "rx"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + cpsw_port1: port@1 { + reg = <1>; + ti,mac-only; + label = "port1"; + phys = <&phy_gmii_sel 1>; + mac-address = [00 00 de ad be ef]; + }; + + cpsw_port2: port@2 { + reg = <2>; + ti,mac-only; + label = "port2"; + phys = <&phy_gmii_sel 2>; + mac-address = [00 01 de ad be ef]; + }; + }; + + cpsw3g_mdio: mdio@f00 { + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + reg = <0x0 0xf00 0x0 0x100>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&k3_clks 13 0>; + clock-names = "fck"; + bus_freq = <1000000>; + }; + + cpts@3d000 { + compatible = "ti,j721e-cpts"; + reg = <0x0 0x3d000 0x0 0x400>; + clocks = <&k3_clks 13 1>; + clock-names = "cpts"; + interrupts-extended = <&gic500 GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "cpts"; + ti,cpts-ext-ts-inputs = <4>; + ti,cpts-periodic-outputs = <2>; + }; + }; + + cpts@39000000 { + compatible = "ti,j721e-cpts"; + reg = <0x0 0x39000000 0x0 0x400>; + reg-names = "cpts"; + power-domains = <&k3_pds 84 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 84 0>; + clock-names = "cpts"; + assigned-clocks = <&k3_clks 84 0>; + assigned-clock-parents = <&k3_clks 84 8>; + interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "cpts"; + ti,cpts-periodic-outputs = <6>; + ti,cpts-ext-ts-inputs = <8>; + }; + + usbss0: cdns-usb@f900000{ + compatible = "ti,am64-usb"; + reg = <0x00 0xf900000 0x00 0x100>; + power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 161 9>, <&k3_clks 161 1>; + clock-names = "ref", "lpm"; + assigned-clocks = <&k3_clks 161 9>; /* USB2_REFCLK */ + assigned-clock-parents = <&k3_clks 161 10>; /* HF0SC0 */ + #address-cells = <2>; + #size-cells = <2>; + ranges; + usb0: usb@f400000{ + compatible = "cdns,usb3"; + reg = <0x00 0xf400000 0x00 0x10000>, + <0x00 0xf410000 0x00 0x10000>, + <0x00 0xf420000 0x00 0x10000>; + reg-names = "otg", + "xhci", + "dev"; + interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */ + <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */ + <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>; /* otgirq */ + interrupt-names = "host", + "peripheral", + "otg"; + maximum-speed = "super-speed"; + dr_mode = "otg"; + }; + }; + + tscadc0: tscadc@28001000 { + compatible = "ti,am654-tscadc", "ti,am3359-tscadc"; + reg = <0x00 0x28001000 0x00 0x1000>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&k3_pds 0 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 0 0>; + assigned-clocks = <&k3_clks 0 0>; + assigned-clock-parents = <&k3_clks 0 3>; + assigned-clock-rates = <60000000>; + clock-names = "adc_tsc_fck"; + + adc { + #io-channel-cells = <1>; + compatible = "ti,am654-adc", "ti,am3359-adc"; + }; + }; + + fss: bus@fc00000 { + compatible = "simple-bus"; + reg = <0x00 0x0fc00000 0x00 0x70000>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + ospi0: spi@fc40000 { + compatible = "ti,am654-ospi", "cdns,qspi-nor"; + reg = <0x00 0x0fc40000 0x00 0x100>, + <0x05 0x00000000 0x01 0x00000000>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + cdns,fifo-depth = <256>; + cdns,fifo-width = <4>; + cdns,trigger-address = <0x0>; + #address-cells = <0x1>; + #size-cells = <0x0>; + clocks = <&k3_clks 75 6>; + assigned-clocks = <&k3_clks 75 6>; + assigned-clock-parents = <&k3_clks 75 7>; + assigned-clock-rates = <166666666>; + power-domains = <&k3_pds 75 TI_SCI_PD_EXCLUSIVE>; + }; + }; + + hwspinlock: spinlock@2a000000 { + compatible = "ti,am64-hwspinlock"; + reg = <0x00 0x2a000000 0x00 0x1000>; + #hwlock-cells = <1>; + }; + + mailbox0_cluster2: mailbox@29020000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29020000 0x00 0x200>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; + + mailbox0_cluster3: mailbox@29030000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29030000 0x00 0x200>; + interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; + + mailbox0_cluster4: mailbox@29040000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29040000 0x00 0x200>; + interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; + + mailbox0_cluster5: mailbox@29050000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29050000 0x00 0x200>; + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; + + mailbox0_cluster6: mailbox@29060000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29060000 0x00 0x200>; + interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; + + mailbox0_cluster7: mailbox@29070000 { + compatible = "ti,am64-mailbox"; + reg = <0x00 0x29070000 0x00 0x200>; + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + #mbox-cells = <1>; + ti,mbox-num-users = <4>; + ti,mbox-num-fifos = <16>; + }; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi new file mode 100644 index 000000000000..99e94dee1bd4 --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for AM64 SoC Family MCU Domain peripherals + * + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +&cbass_mcu { + mcu_uart0: serial@4a00000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x04a00000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 149 0>; + clock-names = "fclk"; + }; + + mcu_uart1: serial@4a10000 { + compatible = "ti,am64-uart", "ti,am654-uart"; + reg = <0x00 0x04a10000 0x00 0x100>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <48000000>; + current-speed = <115200>; + power-domains = <&k3_pds 160 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 160 0>; + clock-names = "fclk"; + }; + + mcu_i2c0: i2c@4900000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x04900000 0x00 0x100>; + interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 106 2>; + clock-names = "fck"; + }; + + mcu_i2c1: i2c@4910000 { + compatible = "ti,am64-i2c", "ti,omap4-i2c"; + reg = <0x00 0x04910000 0x00 0x100>; + interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 107 2>; + clock-names = "fck"; + }; + + mcu_spi0: spi@4b00000 { + compatible = "ti,am654-mcspi", "ti,omap4-mcspi"; + reg = <0x00 0x04b00000 0x00 0x400>; + interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 147 0>; + }; + + mcu_spi1: spi@4b10000 { + compatible = "ti,am654-mcspi","ti,omap4-mcspi"; + reg = <0x00 0x04b10000 0x00 0x400>; + interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 148 0>; + }; + + mcu_gpio_intr: interrupt-controller1 { + compatible = "ti,sci-intr"; + ti,intr-trigger-type = <1>; + interrupt-controller; + interrupt-parent = <&gic500>; + #interrupt-cells = <1>; + ti,sci = <&dmsc>; + ti,sci-dev-id = <5>; + ti,interrupt-ranges = <0 104 4>; + }; + + mcu_gpio0: gpio@4201000 { + compatible = "ti,am64-gpio", "keystone-gpio"; + reg = <0x0 0x4201000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&mcu_gpio_intr>; + interrupts = <30>, <31>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <23>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 79 0>; + clock-names = "gpio"; + }; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am64.dtsi b/arch/arm64/boot/dts/ti/k3-am64.dtsi new file mode 100644 index 000000000000..de6805b0c72c --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am64.dtsi @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for AM642 SoC Family + * + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/pinctrl/k3.h> +#include <dt-bindings/soc/ti,sci_pm_domain.h> + +/ { + model = "Texas Instruments K3 AM642 SoC"; + compatible = "ti,am642"; + interrupt-parent = <&gic500>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + serial0 = &mcu_uart0; + serial1 = &mcu_uart1; + serial2 = &main_uart0; + serial3 = &main_uart1; + serial4 = &main_uart2; + serial5 = &main_uart3; + serial6 = &main_uart4; + serial7 = &main_uart5; + serial8 = &main_uart6; + ethernet0 = &cpsw_port1; + ethernet1 = &cpsw_port2; + }; + + chosen { }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + + psci: psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + }; + + a53_timer0: timer-cl0-cpu0 { + compatible = "arm,armv8-timer"; + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */ + <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */ + <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */ + <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */ + }; + + pmu: pmu { + compatible = "arm,cortex-a53-pmu"; + interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; + }; + + cbass_main: bus@f4000 { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x00 0x000f4000 0x00 0x000f4000 0x00 0x000002d0>, /* PINCTRL */ + <0x00 0x00600000 0x00 0x00600000 0x00 0x00001100>, /* GPIO */ + <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* Timesync router */ + <0x00 0x01000000 0x00 0x01000000 0x00 0x02330400>, /* First peripheral window */ + <0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */ + <0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIE_CORE */ + <0x00 0x0f000000 0x00 0x0f000000 0x00 0x00c44200>, /* Second peripheral window */ + <0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */ + <0x00 0x30000000 0x00 0x30000000 0x00 0x000bc100>, /* ICSSG0/1 */ + <0x00 0x37000000 0x00 0x37000000 0x00 0x00040000>, /* TIMERMGR0 TIMERS */ + <0x00 0x39000000 0x00 0x39000000 0x00 0x00000400>, /* CPTS0 */ + <0x00 0x3b000000 0x00 0x3b000000 0x00 0x00000400>, /* GPMC0_CFG */ + <0x00 0x3cd00000 0x00 0x3cd00000 0x00 0x00000200>, /* TIMERMGR0_CONFIG */ + <0x00 0x3f004000 0x00 0x3f004000 0x00 0x00000400>, /* GICSS0_REGS */ + <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>, /* CTRL_MMR0 */ + <0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */ + <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMASS */ + <0x00 0x50000000 0x00 0x50000000 0x00 0x08000000>, /* GPMC0 DATA */ + <0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */ + <0x00 0x68000000 0x00 0x68000000 0x00 0x08000000>, /* PCIe DAT0 */ + <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>, /* OC SRAM */ + <0x00 0x78000000 0x00 0x78000000 0x00 0x00800000>, /* Main R5FSS */ + <0x06 0x00000000 0x06 0x00000000 0x01 0x00000000>, /* PCIe DAT1 */ + <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS0 DAT3 */ + + /* MCU Domain Range */ + <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>; + + cbass_mcu: bus@4000000 { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>; /* Peripheral window */ + }; + }; +}; + +/* Now include the peripherals for each bus segments */ +#include "k3-am64-main.dtsi" +#include "k3-am64-mcu.dtsi" diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts new file mode 100644 index 000000000000..dad0efa961ed --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +/dts-v1/; + +#include <dt-bindings/leds/common.h> +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/net/ti-dp83867.h> +#include "k3-am642.dtsi" + +/ { + compatible = "ti,am642-evm", "ti,am642"; + model = "Texas Instruments AM642 EVM"; + + chosen { + stdout-path = "serial2:115200n8"; + bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000"; + }; + + memory@80000000 { + device_type = "memory"; + /* 2G RAM */ + reg = <0x00000000 0x80000000 0x00000000 0x80000000>; + + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + secure_ddr: optee@9e800000 { + reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */ + alignment = <0x1000>; + no-map; + }; + }; + + evm_12v0: fixedregulator-evm12v0 { + /* main DC jack */ + compatible = "regulator-fixed"; + regulator-name = "evm_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_5v0: fixedregulator-vsys5v0 { + /* output of LM5140 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&evm_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_3v3: fixedregulator-vsys3v3 { + /* output of LM5140 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&evm_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vdd_mmc1: fixed-regulator-sd { + /* TPS2051BD */ + compatible = "regulator-fixed"; + regulator-name = "vdd_mmc1"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + enable-active-high; + vin-supply = <&vsys_3v3>; + gpio = <&exp1 6 GPIO_ACTIVE_HIGH>; + }; + + vddb: fixedregulator-vddb { + compatible = "regulator-fixed"; + regulator-name = "vddb_3v3_display"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vsys_3v3>; + regulator-always-on; + regulator-boot-on; + }; + + leds { + compatible = "gpio-leds"; + + led-0 { + label = "am64-evm:red:heartbeat"; + gpios = <&exp1 16 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + function = LED_FUNCTION_HEARTBEAT; + default-state = "off"; + }; + }; + + mdio_mux: mux-controller { + compatible = "gpio-mux"; + #mux-control-cells = <0>; + + mux-gpios = <&exp1 12 GPIO_ACTIVE_HIGH>; + }; + + mdio-mux-1 { + compatible = "mdio-mux-multiplexer"; + mux-controls = <&mdio_mux>; + mdio-parent-bus = <&cpsw3g_mdio>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@1 { + reg = <0x1>; + #address-cells = <1>; + #size-cells = <0>; + + cpsw3g_phy3: ethernet-phy@3 { + reg = <3>; + }; + }; + }; +}; + +&main_pmx0 { + main_mmc1_pins_default: main-mmc1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0294, PIN_INPUT_PULLUP, 0) /* (J19) MMC1_CMD */ + AM64X_IOPAD(0x028c, PIN_INPUT_PULLDOWN, 0) /* (L20) MMC1_CLK */ + AM64X_IOPAD(0x0288, PIN_INPUT_PULLUP, 0) /* (K21) MMC1_DAT0 */ + AM64X_IOPAD(0x0284, PIN_INPUT_PULLUP, 0) /* (L21) MMC1_DAT1 */ + AM64X_IOPAD(0x0280, PIN_INPUT_PULLUP, 0) /* (K19) MMC1_DAT2 */ + AM64X_IOPAD(0x027c, PIN_INPUT_PULLUP, 0) /* (K18) MMC1_DAT3 */ + AM64X_IOPAD(0x0298, PIN_INPUT_PULLUP, 0) /* (D19) MMC1_SDCD */ + AM64X_IOPAD(0x029c, PIN_INPUT, 0) /* (C20) MMC1_SDWP */ + AM64X_IOPAD(0x0290, PIN_INPUT, 0) /* MMC1_CLKLB */ + >; + }; + + main_uart0_pins_default: main-uart0-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0238, PIN_INPUT, 0) /* (B16) UART0_CTSn */ + AM64X_IOPAD(0x023c, PIN_OUTPUT, 0) /* (A16) UART0_RTSn */ + AM64X_IOPAD(0x0230, PIN_INPUT, 0) /* (D15) UART0_RXD */ + AM64X_IOPAD(0x0234, PIN_OUTPUT, 0) /* (C16) UART0_TXD */ + >; + }; + + main_spi0_pins_default: main-spi0-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0210, PIN_INPUT, 0) /* (D13) SPI0_CLK */ + AM64X_IOPAD(0x0208, PIN_OUTPUT, 0) /* (D12) SPI0_CS0 */ + AM64X_IOPAD(0x0214, PIN_OUTPUT, 0) /* (A13) SPI0_D0 */ + AM64X_IOPAD(0x0218, PIN_INPUT, 0) /* (A14) SPI0_D1 */ + >; + }; + + main_i2c1_pins_default: main-i2c1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */ + AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */ + >; + }; + + mdio1_pins_default: mdio1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */ + AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */ + >; + }; + + rgmii1_pins_default: rgmii1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x01cc, PIN_INPUT, 4) /* (W5) PRG0_PRU1_GPO7.RGMII1_RD0 */ + AM64X_IOPAD(0x01d4, PIN_INPUT, 4) /* (Y5) PRG0_PRU1_GPO9.RGMII1_RD1 */ + AM64X_IOPAD(0x01d8, PIN_INPUT, 4) /* (V6) PRG0_PRU1_GPO10.RGMII1_RD2 */ + AM64X_IOPAD(0x01f4, PIN_INPUT, 4) /* (V5) PRG0_PRU1_GPO17.RGMII1_RD3 */ + AM64X_IOPAD(0x0188, PIN_INPUT, 4) /* (AA5) PRG0_PRU0_GPO10.RGMII1_RXC */ + AM64X_IOPAD(0x0184, PIN_INPUT, 4) /* (W6) PRG0_PRU0_GPO9.RGMII1_RX_CTL */ + AM64X_IOPAD(0x0124, PIN_OUTPUT, 4) /* (V15) PRG1_PRU1_GPO7.RGMII1_TD0 */ + AM64X_IOPAD(0x012c, PIN_OUTPUT, 4) /* (V14) PRG1_PRU1_GPO9.RGMII1_TD1 */ + AM64X_IOPAD(0x0130, PIN_OUTPUT, 4) /* (W14) PRG1_PRU1_GPO10.RGMII1_TD2 */ + AM64X_IOPAD(0x014c, PIN_OUTPUT, 4) /* (AA14) PRG1_PRU1_GPO17.RGMII1_TD3 */ + AM64X_IOPAD(0x00e0, PIN_OUTPUT, 4) /* (U14) PRG1_PRU0_GPO10.RGMII1_TXC */ + AM64X_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (U15) PRG1_PRU0_GPO9.RGMII1_TX_CTL */ + >; + }; + + rgmii2_pins_default: rgmii2-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0108, PIN_INPUT, 4) /* (W11) PRG1_PRU1_GPO0.RGMII2_RD0 */ + AM64X_IOPAD(0x010c, PIN_INPUT, 4) /* (V11) PRG1_PRU1_GPO1.RGMII2_RD1 */ + AM64X_IOPAD(0x0110, PIN_INPUT, 4) /* (AA12) PRG1_PRU1_GPO2.RGMII2_RD2 */ + AM64X_IOPAD(0x0114, PIN_INPUT, 4) /* (Y12) PRG1_PRU1_GPO3.RGMII2_RD3 */ + AM64X_IOPAD(0x0120, PIN_INPUT, 4) /* (U11) PRG1_PRU1_GPO6.RGMII2_RXC */ + AM64X_IOPAD(0x0118, PIN_INPUT, 4) /* (W12) PRG1_PRU1_GPO4.RGMII2_RX_CTL */ + AM64X_IOPAD(0x0134, PIN_OUTPUT, 4) /* (AA10) PRG1_PRU1_GPO11.RGMII2_TD0 */ + AM64X_IOPAD(0x0138, PIN_OUTPUT, 4) /* (V10) PRG1_PRU1_GPO12.RGMII2_TD1 */ + AM64X_IOPAD(0x013c, PIN_OUTPUT, 4) /* (U10) PRG1_PRU1_GPO13.RGMII2_TD2 */ + AM64X_IOPAD(0x0140, PIN_OUTPUT, 4) /* (AA11) PRG1_PRU1_GPO14.RGMII2_TD3 */ + AM64X_IOPAD(0x0148, PIN_OUTPUT, 4) /* (Y10) PRG1_PRU1_GPO16.RGMII2_TXC */ + AM64X_IOPAD(0x0144, PIN_OUTPUT, 4) /* (Y11) PRG1_PRU1_GPO15.RGMII2_TX_CTL */ + >; + }; + + main_usb0_pins_default: main-usb0-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x02a8, PIN_OUTPUT, 0) /* (E19) USB0_DRVVBUS */ + >; + }; + + ospi0_pins_default: ospi0-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0000, PIN_OUTPUT, 0) /* (N20) OSPI0_CLK */ + AM64X_IOPAD(0x002c, PIN_OUTPUT, 0) /* (L19) OSPI0_CSn0 */ + AM64X_IOPAD(0x000c, PIN_INPUT, 0) /* (M19) OSPI0_D0 */ + AM64X_IOPAD(0x0010, PIN_INPUT, 0) /* (M18) OSPI0_D1 */ + AM64X_IOPAD(0x0014, PIN_INPUT, 0) /* (M20) OSPI0_D2 */ + AM64X_IOPAD(0x0018, PIN_INPUT, 0) /* (M21) OSPI0_D3 */ + AM64X_IOPAD(0x001c, PIN_INPUT, 0) /* (P21) OSPI0_D4 */ + AM64X_IOPAD(0x0020, PIN_INPUT, 0) /* (P20) OSPI0_D5 */ + AM64X_IOPAD(0x0024, PIN_INPUT, 0) /* (N18) OSPI0_D6 */ + AM64X_IOPAD(0x0028, PIN_INPUT, 0) /* (M17) OSPI0_D7 */ + AM64X_IOPAD(0x0008, PIN_INPUT, 0) /* (N19) OSPI0_DQS */ + >; + }; +}; + +&main_uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_uart0_pins_default>; +}; + +/* main_uart1 is reserved for firmware usage */ +&main_uart1 { + status = "reserved"; +}; + +&main_uart2 { + status = "disabled"; +}; + +&main_uart3 { + status = "disabled"; +}; + +&main_uart4 { + status = "disabled"; +}; + +&main_uart5 { + status = "disabled"; +}; + +&main_uart6 { + status = "disabled"; +}; + +&mcu_uart0 { + status = "disabled"; +}; + +&mcu_uart1 { + status = "disabled"; +}; + +&main_i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c1_pins_default>; + clock-frequency = <400000>; + + exp1: gpio@22 { + compatible = "ti,tca6424"; + reg = <0x22>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "GPIO_eMMC_RSTn", "CAN_MUX_SEL", + "GPIO_CPSW1_RST", "GPIO_RGMII1_RST", + "GPIO_RGMII2_RST", "GPIO_PCIe_RST_OUT", + "MMC1_SD_EN", "FSI_FET_SEL", + "MCAN0_STB_3V3", "MCAN1_STB_3V3", + "CPSW_FET_SEL", "CPSW_FET2_SEL", + "PRG1_RGMII2_FET_SEL", "TEST_GPIO2", + "GPIO_OLED_RESETn", "VPP_LDO_EN", + "TEST_LED1", "TP92", "TP90", "TP88", + "TP87", "TP86", "TP89", "TP91"; + }; + + /* osd9616p0899-10 */ + display@3c { + compatible = "solomon,ssd1306fb-i2c"; + reg = <0x3c>; + reset-gpios = <&exp1 14 GPIO_ACTIVE_LOW>; + vbat-supply = <&vddb>; + solomon,height = <16>; + solomon,width = <96>; + solomon,com-seq; + solomon,com-invdir; + solomon,page-offset = <0>; + solomon,prechargep1 = <2>; + solomon,prechargep2 = <13>; + }; +}; + +/* mcu_gpio0 is reserved for mcu firmware usage */ +&mcu_gpio0 { + status = "reserved"; +}; + +&mcu_i2c0 { + status = "disabled"; +}; + +&mcu_i2c1 { + status = "disabled"; +}; + +&mcu_spi0 { + status = "disabled"; +}; + +&mcu_spi1 { + status = "disabled"; +}; + +&main_spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_spi0_pins_default>; + ti,pindir-d0-out-d1-in = <1>; + eeprom@0 { + compatible = "microchip,93lc46b"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-cs-high; + data-size = <16>; + }; +}; + +&sdhci0 { + /* emmc */ + bus-width = <8>; + non-removable; + ti,driver-strength-ohm = <50>; + disable-wp; +}; + +&sdhci1 { + /* SD/MMC */ + vmmc-supply = <&vdd_mmc1>; + pinctrl-names = "default"; + bus-width = <4>; + pinctrl-0 = <&main_mmc1_pins_default>; + ti,driver-strength-ohm = <50>; + disable-wp; +}; + +&usbss0 { + ti,vbus-divider; + ti,usb2-only; +}; + +&usb0 { + dr_mode = "otg"; + maximum-speed = "high-speed"; + pinctrl-names = "default"; + pinctrl-0 = <&main_usb0_pins_default>; +}; + +&cpsw3g { + pinctrl-names = "default"; + pinctrl-0 = <&mdio1_pins_default + &rgmii1_pins_default + &rgmii2_pins_default>; +}; + +&cpsw_port1 { + phy-mode = "rgmii-rxid"; + phy-handle = <&cpsw3g_phy0>; +}; + +&cpsw_port2 { + phy-mode = "rgmii-rxid"; + phy-handle = <&cpsw3g_phy3>; +}; + +&cpsw3g_mdio { + cpsw3g_phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + }; +}; + +&tscadc0 { + /* ADC is reserved for R5 usage */ + status = "reserved"; +}; + +&ospi0 { + pinctrl-names = "default"; + pinctrl-0 = <&ospi0_pins_default>; + + flash@0{ + compatible = "jedec,spi-nor"; + reg = <0x0>; + spi-tx-bus-width = <8>; + spi-rx-bus-width = <8>; + spi-max-frequency = <25000000>; + cdns,tshsl-ns = <60>; + cdns,tsd2d-ns = <60>; + cdns,tchsh-ns = <60>; + cdns,tslch-ns = <60>; + cdns,read-delay = <4>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; + +&mailbox0_cluster2 { + mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; + + mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 { + ti,mbox-rx = <2 0 2>; + ti,mbox-tx = <3 0 2>; + }; +}; + +&mailbox0_cluster3 { + status = "disabled"; +}; + +&mailbox0_cluster4 { + mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; + + mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 { + ti,mbox-rx = <2 0 2>; + ti,mbox-tx = <3 0 2>; + }; +}; + +&mailbox0_cluster5 { + status = "disabled"; +}; + +&mailbox0_cluster6 { + mbox_m4_0: mbox-m4-0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; +}; + +&mailbox0_cluster7 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts new file mode 100644 index 000000000000..8424cd071955 --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +/dts-v1/; + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/net/ti-dp83867.h> +#include "k3-am642.dtsi" + +/ { + compatible = "ti,am642-sk", "ti,am642"; + model = "Texas Instruments AM642 SK"; + + chosen { + stdout-path = "serial2:115200n8"; + bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000"; + }; + + memory@80000000 { + device_type = "memory"; + /* 2G RAM */ + reg = <0x00000000 0x80000000 0x00000000 0x80000000>; + + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + secure_ddr: optee@9e800000 { + reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */ + alignment = <0x1000>; + no-map; + }; + }; + + vusb_main: fixed-regulator-vusb-main5v0 { + /* USB MAIN INPUT 5V DC */ + compatible = "regulator-fixed"; + regulator-name = "vusb_main5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + }; + + vcc_3v3_sys: fixedregulator-vcc-3v3-sys { + /* output of LP8733xx */ + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3_sys"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vusb_main>; + regulator-always-on; + regulator-boot-on; + }; + + vdd_mmc1: fixed-regulator-sd { + /* TPS2051BD */ + compatible = "regulator-fixed"; + regulator-name = "vdd_mmc1"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + enable-active-high; + vin-supply = <&vcc_3v3_sys>; + gpio = <&exp1 3 GPIO_ACTIVE_HIGH>; + }; +}; + +&main_pmx0 { + main_mmc1_pins_default: main-mmc1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0294, PIN_INPUT, 0) /* (J19) MMC1_CMD */ + AM64X_IOPAD(0x0290, PIN_INPUT, 0) /* (#N/A) MMC1_CLKLB */ + AM64X_IOPAD(0x028c, PIN_INPUT, 0) /* (L20) MMC1_CLK */ + AM64X_IOPAD(0x0288, PIN_INPUT, 0) /* (K21) MMC1_DAT0 */ + AM64X_IOPAD(0x0284, PIN_INPUT, 0) /* (L21) MMC1_DAT1 */ + AM64X_IOPAD(0x0280, PIN_INPUT, 0) /* (K19) MMC1_DAT2 */ + AM64X_IOPAD(0x027c, PIN_INPUT, 0) /* (K18) MMC1_DAT3 */ + AM64X_IOPAD(0x0298, PIN_INPUT, 0) /* (D19) MMC1_SDCD */ + >; + }; + + main_i2c1_pins_default: main-i2c1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */ + AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */ + >; + }; + + mdio1_pins_default: mdio1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */ + AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */ + >; + }; + + rgmii1_pins_default: rgmii1-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x011c, PIN_INPUT, 4) /* (AA13) PRG1_PRU1_GPO5.RGMII1_RD0 */ + AM64X_IOPAD(0x0128, PIN_INPUT, 4) /* (U12) PRG1_PRU1_GPO8.RGMII1_RD1 */ + AM64X_IOPAD(0x0150, PIN_INPUT, 4) /* (Y13) PRG1_PRU1_GPO18.RGMII1_RD2 */ + AM64X_IOPAD(0x0154, PIN_INPUT, 4) /* (V12) PRG1_PRU1_GPO19.RGMII1_RD3 */ + AM64X_IOPAD(0x00d8, PIN_INPUT, 4) /* (W13) PRG1_PRU0_GPO8.RGMII1_RXC */ + AM64X_IOPAD(0x00cc, PIN_INPUT, 4) /* (V13) PRG1_PRU0_GPO5.RGMII1_RX_CTL */ + AM64X_IOPAD(0x0124, PIN_OUTPUT, 4) /* (V15) PRG1_PRU1_GPO7.RGMII1_TD0 */ + AM64X_IOPAD(0x012c, PIN_OUTPUT, 4) /* (V14) PRG1_PRU1_GPO9.RGMII1_TD1 */ + AM64X_IOPAD(0x0130, PIN_OUTPUT, 4) /* (W14) PRG1_PRU1_GPO10.RGMII1_TD2 */ + AM64X_IOPAD(0x014c, PIN_OUTPUT, 4) /* (AA14) PRG1_PRU1_GPO17.RGMII1_TD3 */ + AM64X_IOPAD(0x00e0, PIN_OUTPUT, 4) /* (U14) PRG1_PRU0_GPO10.RGMII1_TXC */ + AM64X_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (U15) PRG1_PRU0_GPO9.RGMII1_TX_CTL */ + >; + }; + + rgmii2_pins_default: rgmii2-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0108, PIN_INPUT, 4) /* (W11) PRG1_PRU1_GPO0.RGMII2_RD0 */ + AM64X_IOPAD(0x010c, PIN_INPUT, 4) /* (V11) PRG1_PRU1_GPO1.RGMII2_RD1 */ + AM64X_IOPAD(0x0110, PIN_INPUT, 4) /* (AA12) PRG1_PRU1_GPO2.RGMII2_RD2 */ + AM64X_IOPAD(0x0114, PIN_INPUT, 4) /* (Y12) PRG1_PRU1_GPO3.RGMII2_RD3 */ + AM64X_IOPAD(0x0120, PIN_INPUT, 4) /* (U11) PRG1_PRU1_GPO6.RGMII2_RXC */ + AM64X_IOPAD(0x0118, PIN_INPUT, 4) /* (W12) PRG1_PRU1_GPO4.RGMII2_RX_CTL */ + AM64X_IOPAD(0x0134, PIN_OUTPUT, 4) /* (AA10) PRG1_PRU1_GPO11.RGMII2_TD0 */ + AM64X_IOPAD(0x0138, PIN_OUTPUT, 4) /* (V10) PRG1_PRU1_GPO12.RGMII2_TD1 */ + AM64X_IOPAD(0x013c, PIN_OUTPUT, 4) /* (U10) PRG1_PRU1_GPO13.RGMII2_TD2 */ + AM64X_IOPAD(0x0140, PIN_OUTPUT, 4) /* (AA11) PRG1_PRU1_GPO14.RGMII2_TD3 */ + AM64X_IOPAD(0x0148, PIN_OUTPUT, 4) /* (Y10) PRG1_PRU1_GPO16.RGMII2_TXC */ + AM64X_IOPAD(0x0144, PIN_OUTPUT, 4) /* (Y11) PRG1_PRU1_GPO15.RGMII2_TX_CTL */ + >; + }; + + ospi0_pins_default: ospi0-pins-default { + pinctrl-single,pins = < + AM64X_IOPAD(0x0000, PIN_OUTPUT, 0) /* (N20) OSPI0_CLK */ + AM64X_IOPAD(0x002c, PIN_OUTPUT, 0) /* (L19) OSPI0_CSn0 */ + AM64X_IOPAD(0x000c, PIN_INPUT, 0) /* (M19) OSPI0_D0 */ + AM64X_IOPAD(0x0010, PIN_INPUT, 0) /* (M18) OSPI0_D1 */ + AM64X_IOPAD(0x0014, PIN_INPUT, 0) /* (M20) OSPI0_D2 */ + AM64X_IOPAD(0x0018, PIN_INPUT, 0) /* (M21) OSPI0_D3 */ + AM64X_IOPAD(0x001c, PIN_INPUT, 0) /* (P21) OSPI0_D4 */ + AM64X_IOPAD(0x0020, PIN_INPUT, 0) /* (P20) OSPI0_D5 */ + AM64X_IOPAD(0x0024, PIN_INPUT, 0) /* (N18) OSPI0_D6 */ + AM64X_IOPAD(0x0028, PIN_INPUT, 0) /* (M17) OSPI0_D7 */ + AM64X_IOPAD(0x0008, PIN_INPUT, 0) /* (N19) OSPI0_DQS */ + >; + }; +}; + +&mcu_uart0 { + status = "disabled"; +}; + +&mcu_uart1 { + status = "disabled"; +}; + +&main_uart1 { + /* main_uart1 is reserved for firmware usage */ + status = "reserved"; +}; + +&main_uart2 { + status = "disabled"; +}; + +&main_uart3 { + status = "disabled"; +}; + +&main_uart4 { + status = "disabled"; +}; + +&main_uart5 { + status = "disabled"; +}; + +&main_uart6 { + status = "disabled"; +}; + +&mcu_i2c0 { + status = "disabled"; +}; + +&mcu_i2c1 { + status = "disabled"; +}; + +&main_i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c1_pins_default>; + clock-frequency = <400000>; + + exp1: gpio@70 { + compatible = "nxp,pca9538"; + reg = <0x70>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "GPIO_CPSW2_RST", "GPIO_CPSW1_RST", + "PRU_DETECT", "MMC1_SD_EN", + "VPP_LDO_EN", "RPI_PS_3V3_En", + "RPI_PS_5V0_En", "RPI_HAT_DETECT"; + }; +}; + +&main_i2c3 { + status = "disabled"; +}; + +&mcu_spi0 { + status = "disabled"; +}; + +&mcu_spi1 { + status = "disabled"; +}; + +/* mcu_gpio0 is reserved for mcu firmware usage */ +&mcu_gpio0 { + status = "reserved"; +}; + +&sdhci1 { + /* SD/MMC */ + vmmc-supply = <&vdd_mmc1>; + pinctrl-names = "default"; + bus-width = <4>; + pinctrl-0 = <&main_mmc1_pins_default>; + ti,driver-strength-ohm = <50>; + disable-wp; +}; + +&cpsw3g { + pinctrl-names = "default"; + pinctrl-0 = <&mdio1_pins_default + &rgmii1_pins_default + &rgmii2_pins_default>; +}; + +&cpsw_port1 { + phy-mode = "rgmii-rxid"; + phy-handle = <&cpsw3g_phy0>; +}; + +&cpsw_port2 { + phy-mode = "rgmii-rxid"; + phy-handle = <&cpsw3g_phy1>; +}; + +&cpsw3g_mdio { + cpsw3g_phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + }; + + cpsw3g_phy1: ethernet-phy@1 { + reg = <1>; + ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; + ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; + }; +}; + +&tscadc0 { + status = "disabled"; +}; + +&ospi0 { + pinctrl-names = "default"; + pinctrl-0 = <&ospi0_pins_default>; + + flash@0{ + compatible = "jedec,spi-nor"; + reg = <0x0>; + spi-tx-bus-width = <8>; + spi-rx-bus-width = <8>; + spi-max-frequency = <25000000>; + cdns,tshsl-ns = <60>; + cdns,tsd2d-ns = <60>; + cdns,tchsh-ns = <60>; + cdns,tslch-ns = <60>; + cdns,read-delay = <4>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; + +&mailbox0_cluster2 { + mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; + + mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 { + ti,mbox-rx = <2 0 2>; + ti,mbox-tx = <3 0 2>; + }; +}; + +&mailbox0_cluster3 { + status = "disabled"; +}; + +&mailbox0_cluster4 { + mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; + + mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 { + ti,mbox-rx = <2 0 2>; + ti,mbox-tx = <3 0 2>; + }; +}; + +&mailbox0_cluster5 { + status = "disabled"; +}; + +&mailbox0_cluster6 { + mbox_m4_0: mbox-m4-0 { + ti,mbox-rx = <0 0 2>; + ti,mbox-tx = <1 0 2>; + }; +}; + +&mailbox0_cluster7 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am642.dtsi b/arch/arm64/boot/dts/ti/k3-am642.dtsi new file mode 100644 index 000000000000..e2b397c88401 --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am642.dtsi @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for AM642 SoC family in Dual core configuration + * + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/ + */ + +/dts-v1/; + +#include "k3-am64.dtsi" + +/ { + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu-map { + cluster0: cluster0 { + core0 { + cpu = <&cpu0>; + }; + + core1 { + cpu = <&cpu1>; + }; + }; + }; + + cpu0: cpu@0 { + compatible = "arm,cortex-a53"; + reg = <0x000>; + device_type = "cpu"; + enable-method = "psci"; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&L2_0>; + }; + + cpu1: cpu@1 { + compatible = "arm,cortex-a53"; + reg = <0x001>; + device_type = "cpu"; + enable-method = "psci"; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&L2_0>; + }; + }; + + L2_0: l2-cache0 { + compatible = "cache"; + cache-level = <2>; + cache-size = <0x40000>; + cache-line-size = <64>; + cache-sets = <512>; + }; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi new file mode 100644 index 000000000000..de763ca9251c --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Siemens AG, 2018-2021 + * + * Authors: + * Le Jin <le.jin@siemens.com> + * Jan Kiszka <jan.kiszk@siemens.com> + * + * Common bits of the IOT2050 Basic and Advanced variants + */ + +/dts-v1/; + +#include "k3-am654.dtsi" +#include <dt-bindings/phy/phy.h> + +/ { + aliases { + spi0 = &mcu_spi0; + }; + + chosen { + stdout-path = "serial3:115200n8"; + bootargs = "earlycon=ns16550a,mmio32,0x02810000"; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + secure_ddr: secure-ddr@9e800000 { + reg = <0 0x9e800000 0 0x01800000>; /* for OP-TEE */ + alignment = <0x1000>; + no-map; + }; + + mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 { + compatible = "shared-dma-pool"; + reg = <0 0xa0000000 0 0x100000>; + no-map; + }; + + mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 { + compatible = "shared-dma-pool"; + reg = <0 0xa0100000 0 0xf00000>; + no-map; + }; + + mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 { + compatible = "shared-dma-pool"; + reg = <0 0xa1000000 0 0x100000>; + no-map; + }; + + mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 { + compatible = "shared-dma-pool"; + reg = <0 0xa1100000 0 0xf00000>; + no-map; + }; + + rtos_ipc_memory_region: ipc-memories@a2000000 { + reg = <0x00 0xa2000000 0x00 0x00200000>; + alignment = <0x1000>; + no-map; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&leds_pins_default>; + + status-led-red { + gpios = <&wkup_gpio0 32 GPIO_ACTIVE_HIGH>; + panic-indicator; + }; + + status-led-green { + gpios = <&wkup_gpio0 24 GPIO_ACTIVE_HIGH>; + }; + + user-led1-red { + gpios = <&pcal9535_3 14 GPIO_ACTIVE_HIGH>; + }; + + user-led1-green { + gpios = <&pcal9535_2 15 GPIO_ACTIVE_HIGH>; + }; + + user-led2-red { + gpios = <&wkup_gpio0 17 GPIO_ACTIVE_HIGH>; + }; + + user-led2-green { + gpios = <&wkup_gpio0 22 GPIO_ACTIVE_HIGH>; + }; + }; + + dp_refclk: clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <19200000>; + }; +}; + +&wkup_pmx0 { + wkup_i2c0_pins_default: wkup-i2c0-pins-default { + pinctrl-single,pins = < + /* (AC7) WKUP_I2C0_SCL */ + AM65X_WKUP_IOPAD(0x00e0, PIN_INPUT, 0) + /* (AD6) WKUP_I2C0_SDA */ + AM65X_WKUP_IOPAD(0x00e4, PIN_INPUT, 0) + >; + }; + + mcu_i2c0_pins_default: mcu-i2c0-pins-default { + pinctrl-single,pins = < + /* (AD8) MCU_I2C0_SCL */ + AM65X_WKUP_IOPAD(0x00e8, PIN_INPUT, 0) + /* (AD7) MCU_I2C0_SDA */ + AM65X_WKUP_IOPAD(0x00ec, PIN_INPUT, 0) + >; + }; + + arduino_i2c_aio_switch_pins_default: arduino-i2c-aio-switch-pins-default { + pinctrl-single,pins = < + /* (R2) WKUP_GPIO0_21 */ + AM65X_WKUP_IOPAD(0x0024, PIN_OUTPUT, 7) + >; + }; + + push_button_pins_default: push-button-pins-default { + pinctrl-single,pins = < + /* (T1) MCU_OSPI1_CLK.WKUP_GPIO0_25 */ + AM65X_WKUP_IOPAD(0x0034, PIN_INPUT, 7) + >; + }; + + arduino_uart_pins_default: arduino-uart-pins-default { + pinctrl-single,pins = < + /* (P4) MCU_UART0_RXD */ + AM65X_WKUP_IOPAD(0x0044, PIN_INPUT, 4) + /* (P5) MCU_UART0_TXD */ + AM65X_WKUP_IOPAD(0x0048, PIN_OUTPUT, 4) + >; + }; + + arduino_io_d2_to_d3_pins_default: arduino-io-d2-to-d3-pins-default { + pinctrl-single,pins = < + /* (P1) WKUP_GPIO0_31 */ + AM65X_WKUP_IOPAD(0x004C, PIN_OUTPUT, 7) + /* (N3) WKUP_GPIO0_33 */ + AM65X_WKUP_IOPAD(0x0054, PIN_OUTPUT, 7) + >; + }; + + arduino_io_oe_pins_default: arduino-io-oe-pins-default { + pinctrl-single,pins = < + /* (N4) WKUP_GPIO0_34 */ + AM65X_WKUP_IOPAD(0x0058, PIN_OUTPUT, 7) + /* (M2) WKUP_GPIO0_36 */ + AM65X_WKUP_IOPAD(0x0060, PIN_OUTPUT, 7) + /* (M3) WKUP_GPIO0_37 */ + AM65X_WKUP_IOPAD(0x0064, PIN_OUTPUT, 7) + /* (M4) WKUP_GPIO0_38 */ + AM65X_WKUP_IOPAD(0x0068, PIN_OUTPUT, 7) + /* (M1) WKUP_GPIO0_41 */ + AM65X_WKUP_IOPAD(0x0074, PIN_OUTPUT, 7) + >; + }; + + mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins-default { + pinctrl-single,pins = < + /* (V1) MCU_OSPI0_CLK */ + AM65X_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) + /* (U2) MCU_OSPI0_DQS */ + AM65X_WKUP_IOPAD(0x0008, PIN_INPUT, 0) + /* (U4) MCU_OSPI0_D0 */ + AM65X_WKUP_IOPAD(0x000c, PIN_INPUT, 0) + /* (U5) MCU_OSPI0_D1 */ + AM65X_WKUP_IOPAD(0x0010, PIN_INPUT, 0) + /* (R4) MCU_OSPI0_CSn0 */ + AM65X_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0) + >; + }; + + db9_com_mode_pins_default: db9-com-mode-pins-default { + pinctrl-single,pins = < + /* (AD3) WKUP_GPIO0_5, used as uart0 mode 0 */ + AM65X_WKUP_IOPAD(0x00c4, PIN_OUTPUT, 7) + /* (AC3) WKUP_GPIO0_4, used as uart0 mode 1 */ + AM65X_WKUP_IOPAD(0x00c0, PIN_OUTPUT, 7) + /* (AC1) WKUP_GPIO0_7, used as uart0 term */ + AM65X_WKUP_IOPAD(0x00cc, PIN_OUTPUT, 7) + /* (AC2) WKUP_GPIO0_6, used as uart0 en */ + AM65X_WKUP_IOPAD(0x00c8, PIN_OUTPUT, 7) + >; + }; + + leds_pins_default: leds-pins-default { + pinctrl-single,pins = < + /* (T2) WKUP_GPIO0_17, used as user led1 red */ + AM65X_WKUP_IOPAD(0x0014, PIN_OUTPUT, 7) + /* (R3) WKUP_GPIO0_22, used as user led1 green */ + AM65X_WKUP_IOPAD(0x0028, PIN_OUTPUT, 7) + /* (R5) WKUP_GPIO0_24, used as status led red */ + AM65X_WKUP_IOPAD(0x0030, PIN_OUTPUT, 7) + /* (N2) WKUP_GPIO0_32, used as status led green */ + AM65X_WKUP_IOPAD(0x0050, PIN_OUTPUT, 7) + >; + }; + + mcu_spi0_pins_default: mcu-spi0-pins-default { + pinctrl-single,pins = < + /* (Y1) MCU_SPI0_CLK */ + AM65X_WKUP_IOPAD(0x0090, PIN_INPUT, 0) + /* (Y3) MCU_SPI0_D0 */ + AM65X_WKUP_IOPAD(0x0094, PIN_INPUT, 0) + /* (Y2) MCU_SPI0_D1 */ + AM65X_WKUP_IOPAD(0x0098, PIN_INPUT, 0) + /* (Y4) MCU_SPI0_CS0 */ + AM65X_WKUP_IOPAD(0x009c, PIN_OUTPUT, 0) + >; + }; + + minipcie_pins_default: minipcie-pins-default { + pinctrl-single,pins = < + /* (P2) MCU_OSPI1_DQS.WKUP_GPIO0_27 */ + AM65X_WKUP_IOPAD(0x003C, PIN_OUTPUT, 7) + >; + }; +}; + +&main_pmx0 { + main_uart1_pins_default: main-uart1-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0174, PIN_INPUT, 6) /* (AE23) UART1_RXD */ + AM65X_IOPAD(0x014c, PIN_OUTPUT, 6) /* (AD23) UART1_TXD */ + AM65X_IOPAD(0x0178, PIN_INPUT, 6) /* (AD22) UART1_CTSn */ + AM65X_IOPAD(0x017c, PIN_OUTPUT, 6) /* (AC21) UART1_RTSn */ + >; + }; + + main_i2c3_pins_default: main-i2c3-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x01c0, PIN_INPUT, 2) /* (AF13) I2C3_SCL */ + AM65X_IOPAD(0x01d4, PIN_INPUT, 2) /* (AG12) I2C3_SDA */ + >; + }; + + main_mmc1_pins_default: main-mmc1-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x02d4, PIN_INPUT_PULLDOWN, 0) /* (C27) MMC1_CLK */ + AM65X_IOPAD(0x02d8, PIN_INPUT_PULLUP, 0) /* (C28) MMC1_CMD */ + AM65X_IOPAD(0x02d0, PIN_INPUT_PULLUP, 0) /* (D28) MMC1_DAT0 */ + AM65X_IOPAD(0x02cc, PIN_INPUT_PULLUP, 0) /* (E27) MMC1_DAT1 */ + AM65X_IOPAD(0x02c8, PIN_INPUT_PULLUP, 0) /* (D26) MMC1_DAT2 */ + AM65X_IOPAD(0x02c4, PIN_INPUT_PULLUP, 0) /* (D27) MMC1_DAT3 */ + AM65X_IOPAD(0x02dc, PIN_INPUT_PULLUP, 0) /* (B24) MMC1_SDCD */ + AM65X_IOPAD(0x02e0, PIN_INPUT_PULLUP, 0) /* (C24) MMC1_SDWP */ + >; + }; + + usb0_pins_default: usb0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x02bc, PIN_OUTPUT, 0) /* (AD9) USB0_DRVVBUS */ + >; + }; + + usb1_pins_default: usb1-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x02c0, PIN_OUTPUT, 0) /* (AC8) USB1_DRVVBUS */ + >; + }; + + arduino_io_d4_to_d9_pins_default: arduino-io-d4-to-d9-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0084, PIN_OUTPUT, 7) /* (AG18) GPIO0_33 */ + AM65X_IOPAD(0x008C, PIN_OUTPUT, 7) /* (AF17) GPIO0_35 */ + AM65X_IOPAD(0x0098, PIN_OUTPUT, 7) /* (AH16) GPIO0_38 */ + AM65X_IOPAD(0x00AC, PIN_OUTPUT, 7) /* (AH15) GPIO0_43 */ + AM65X_IOPAD(0x00C0, PIN_OUTPUT, 7) /* (AG15) GPIO0_48 */ + AM65X_IOPAD(0x00CC, PIN_OUTPUT, 7) /* (AD15) GPIO0_51 */ + >; + }; + + dss_vout1_pins_default: dss-vout1-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0000, PIN_OUTPUT, 1) /* VOUT1_DATA0 */ + AM65X_IOPAD(0x0004, PIN_OUTPUT, 1) /* VOUT1_DATA1 */ + AM65X_IOPAD(0x0008, PIN_OUTPUT, 1) /* VOUT1_DATA2 */ + AM65X_IOPAD(0x000c, PIN_OUTPUT, 1) /* VOUT1_DATA3 */ + AM65X_IOPAD(0x0010, PIN_OUTPUT, 1) /* VOUT1_DATA4 */ + AM65X_IOPAD(0x0014, PIN_OUTPUT, 1) /* VOUT1_DATA5 */ + AM65X_IOPAD(0x0018, PIN_OUTPUT, 1) /* VOUT1_DATA6 */ + AM65X_IOPAD(0x001c, PIN_OUTPUT, 1) /* VOUT1_DATA7 */ + AM65X_IOPAD(0x0020, PIN_OUTPUT, 1) /* VOUT1_DATA8 */ + AM65X_IOPAD(0x0024, PIN_OUTPUT, 1) /* VOUT1_DATA9 */ + AM65X_IOPAD(0x0028, PIN_OUTPUT, 1) /* VOUT1_DATA10 */ + AM65X_IOPAD(0x002c, PIN_OUTPUT, 1) /* VOUT1_DATA11 */ + AM65X_IOPAD(0x0030, PIN_OUTPUT, 1) /* VOUT1_DATA12 */ + AM65X_IOPAD(0x0034, PIN_OUTPUT, 1) /* VOUT1_DATA13 */ + AM65X_IOPAD(0x0038, PIN_OUTPUT, 1) /* VOUT1_DATA14 */ + AM65X_IOPAD(0x003c, PIN_OUTPUT, 1) /* VOUT1_DATA15 */ + AM65X_IOPAD(0x0040, PIN_OUTPUT, 1) /* VOUT1_DATA16 */ + AM65X_IOPAD(0x0044, PIN_OUTPUT, 1) /* VOUT1_DATA17 */ + AM65X_IOPAD(0x0048, PIN_OUTPUT, 1) /* VOUT1_DATA18 */ + AM65X_IOPAD(0x004c, PIN_OUTPUT, 1) /* VOUT1_DATA19 */ + AM65X_IOPAD(0x0050, PIN_OUTPUT, 1) /* VOUT1_DATA20 */ + AM65X_IOPAD(0x0054, PIN_OUTPUT, 1) /* VOUT1_DATA21 */ + AM65X_IOPAD(0x0058, PIN_OUTPUT, 1) /* VOUT1_DATA22 */ + AM65X_IOPAD(0x005c, PIN_OUTPUT, 1) /* VOUT1_DATA23 */ + AM65X_IOPAD(0x0060, PIN_OUTPUT, 1) /* VOUT1_VSYNC */ + AM65X_IOPAD(0x0064, PIN_OUTPUT, 1) /* VOUT1_HSYNC */ + AM65X_IOPAD(0x0068, PIN_OUTPUT, 1) /* VOUT1_PCLK */ + AM65X_IOPAD(0x006c, PIN_OUTPUT, 1) /* VOUT1_DE */ + >; + }; + + dp_pins_default: dp-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0078, PIN_OUTPUT, 7) /* (AF18) DP rst_n */ + >; + }; + + main_i2c2_pins_default: main-i2c2-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0074, PIN_INPUT, 5) /* (T27) I2C2_SCL */ + AM65X_IOPAD(0x0070, PIN_INPUT, 5) /* (R25) I2C2_SDA */ + >; + }; +}; + +&main_pmx1 { + main_i2c0_pins_default: main-i2c0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0000, PIN_INPUT, 0) /* (D20) I2C0_SCL */ + AM65X_IOPAD(0x0004, PIN_INPUT, 0) /* (C21) I2C0_SDA */ + >; + }; + + main_i2c1_pins_default: main-i2c1-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0008, PIN_INPUT, 0) /* (B21) I2C1_SCL */ + AM65X_IOPAD(0x000c, PIN_INPUT, 0) /* (E21) I2C1_SDA */ + >; + }; + + ecap0_pins_default: ecap0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x0010, PIN_INPUT, 0) /* (D21) ECAP0_IN_APWM_OUT */ + >; + }; +}; + +&wkup_uart0 { + /* Wakeup UART is used by System firmware */ + status = "reserved"; +}; + +&main_uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&main_uart1_pins_default>; +}; + +&main_uart2 { + status = "disabled"; +}; + +&mcu_uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&arduino_uart_pins_default>; +}; + +&main_gpio0 { + pinctrl-names = "default"; + pinctrl-0 = <&arduino_io_d4_to_d9_pins_default>; + gpio-line-names = + "main_gpio0-base", "", "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", "", + "", "", "", "IO4", "", "IO5", "", "", "IO6", "", + "", "", "", "IO7", "", "", "", "", "IO8", "", + "", "IO9"; +}; + +&wkup_gpio0 { + pinctrl-names = "default"; + pinctrl-0 = < + &arduino_io_d2_to_d3_pins_default + &arduino_i2c_aio_switch_pins_default + &arduino_io_oe_pins_default + &push_button_pins_default + &db9_com_mode_pins_default + >; + gpio-line-names = + /* 0..9 */ + "wkup_gpio0-base", "", "", "", "UART0-mode1", "UART0-mode0", + "UART0-enable", "UART0-terminate", "", "WIFI-disable", + /* 10..19 */ + "", "", "", "", "", "", "", "", "", "", + /* 20..29 */ + "", "A4A5-I2C-mux", "", "", "", "USER-button", "", "", "","IO0", + /* 30..39 */ + "IO1", "IO2", "", "IO3", "IO17-direction", "A5", + "IO16-direction", "IO15-direction", "IO14-direction", "A3", + /* 40..49 */ + "", "IO18-direction", "A4", "A2", "A1", "A0", "", "", "IO13", + "IO11", + /* 50..51 */ + "IO12", "IO10"; +}; + +&wkup_i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&wkup_i2c0_pins_default>; + clock-frequency = <400000>; +}; + +&mcu_i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_i2c0_pins_default>; + clock-frequency = <400000>; + + psu: regulator@60 { + compatible = "ti,tps62363"; + reg = <0x60>; + regulator-name = "tps62363-vout"; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1500000>; + regulator-boot-on; + ti,vsel0-state-high; + ti,vsel1-state-high; + ti,enable-vout-discharge; + }; + + /* D4200 */ + pcal9535_1: gpio@20 { + compatible = "nxp,pcal9535"; + reg = <0x20>; + #gpio-cells = <2>; + gpio-controller; + gpio-line-names = + "A0-pull", "A1-pull", "A2-pull", "A3-pull", "A4-pull", + "A5-pull", "", "", + "IO14-enable", "IO15-enable", "IO16-enable", + "IO17-enable", "IO18-enable", "IO19-enable"; + }; + + /* D4201 */ + pcal9535_2: gpio@21 { + compatible = "nxp,pcal9535"; + reg = <0x21>; + #gpio-cells = <2>; + gpio-controller; + gpio-line-names = + "IO0-direction", "IO1-direction", "IO2-direction", + "IO3-direction", "IO4-direction", "IO5-direction", + "IO6-direction", "IO7-direction", + "IO8-direction", "IO9-direction", "IO10-direction", + "IO11-direction", "IO12-direction", "IO13-direction", + "IO19-direction"; + }; + + /* D4202 */ + pcal9535_3: gpio@25 { + compatible = "nxp,pcal9535"; + reg = <0x25>; + #gpio-cells = <2>; + gpio-controller; + gpio-line-names = + "IO0-pull", "IO1-pull", "IO2-pull", "IO3-pull", + "IO4-pull", "IO5-pull", "IO6-pull", "IO7-pull", + "IO8-pull", "IO9-pull", "IO10-pull", "IO11-pull", + "IO12-pull", "IO13-pull"; + }; +}; + +&main_i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c0_pins_default>; + clock-frequency = <400000>; + + rtc: rtc8564@51 { + compatible = "nxp,pcf8563"; + reg = <0x51>; + }; + + eeprom: eeprom@54 { + compatible = "atmel,24c08"; + reg = <0x54>; + pagesize = <16>; + }; +}; + +&main_i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c1_pins_default>; + clock-frequency = <400000>; +}; + +&main_i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c2_pins_default>; + clock-frequency = <400000>; +}; + +&main_i2c3 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c3_pins_default>; + clock-frequency = <400000>; + + #address-cells = <1>; + #size-cells = <0>; + + edp-bridge@f { + compatible = "toshiba,tc358767"; + reg = <0x0f>; + pinctrl-names = "default"; + pinctrl-0 = <&dp_pins_default>; + reset-gpios = <&main_gpio0 30 GPIO_ACTIVE_HIGH>; + + clock-names = "ref"; + clocks = <&dp_refclk>; + + toshiba,hpd-pin = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <1>; + + bridge_in: endpoint { + remote-endpoint = <&dpi_out>; + }; + }; + }; + }; +}; + +&mcu_cpsw { + status = "disabled"; +}; + +&ecap0 { + pinctrl-names = "default"; + pinctrl-0 = <&ecap0_pins_default>; +}; + +&sdhci1 { + pinctrl-names = "default"; + pinctrl-0 = <&main_mmc1_pins_default>; + ti,driver-strength-ohm = <50>; + disable-wp; +}; + +&usb0 { + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins_default>; + dr_mode = "host"; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins_default>; + dr_mode = "host"; +}; + +&mcu_spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_spi0_pins_default>; + + #address-cells = <1>; + #size-cells= <0>; + ti,pindir-d0-out-d1-in = <1>; +}; + +&tscadc0 { + status = "disabled"; +}; + +&tscadc1 { + adc { + ti,adc-channels = <0 1 2 3 4 5>; + }; +}; + +&ospi0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_fss0_ospi0_pins_default>; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0x0>; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <50000000>; + cdns,tshsl-ns = <60>; + cdns,tsd2d-ns = <60>; + cdns,tchsh-ns = <60>; + cdns,tslch-ns = <60>; + cdns,read-delay = <2>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; + +&dss { + pinctrl-names = "default"; + pinctrl-0 = <&dss_vout1_pins_default>; + + assigned-clocks = <&k3_clks 67 2>; + assigned-clock-parents = <&k3_clks 67 5>; +}; + +&dss_ports { + #address-cells = <1>; + #size-cells = <0>; + port@1 { + reg = <1>; + + dpi_out: endpoint { + remote-endpoint = <&bridge_in>; + }; + }; +}; + +&serdes0 { + status = "disabled"; +}; + +&pcie0_rc { + status = "disabled"; +}; + +&pcie0_ep { + status = "disabled"; +}; + +&pcie1_rc { + pinctrl-names = "default"; + pinctrl-0 = <&minipcie_pins_default>; + + num-lanes = <1>; + phys = <&serdes1 PHY_TYPE_PCIE 0>; + phy-names = "pcie-phy0"; + reset-gpios = <&wkup_gpio0 27 GPIO_ACTIVE_HIGH>; +}; + +&pcie1_ep { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index ceb579fb427d..cb340d1b401f 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -707,6 +707,7 @@ dma-coherent; interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>; msi-map = <0x0 &gic_its 0x0 0x10000>; + device_type = "pci"; }; pcie0_ep: pcie-ep@5500000 { @@ -739,6 +740,7 @@ dma-coherent; interrupts = <GIC_SPI 355 IRQ_TYPE_EDGE_RISING>; msi-map = <0x0 &gic_its 0x10000 0x10000>; + device_type = "pci"; }; pcie1_ep: pcie-ep@5600000 { @@ -919,4 +921,397 @@ clocks = <&ehrpwm_tbclk 5>, <&k3_clks 45 0>; clock-names = "tbclk", "fck"; }; + + icssg0: icssg@b000000 { + compatible = "ti,am654-icssg"; + reg = <0x00 0xb000000 0x00 0x80000>; + power-domains = <&k3_pds 62 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0xb000000 0x80000>; + + icssg0_mem: memories@0 { + reg = <0x0 0x2000>, + <0x2000 0x2000>, + <0x10000 0x10000>; + reg-names = "dram0", "dram1", + "shrdram2"; + }; + + icssg0_cfg: cfg@26000 { + compatible = "ti,pruss-cfg", "syscon"; + reg = <0x26000 0x200>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x26000 0x2000>; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + icssg0_coreclk_mux: coreclk-mux@3c { + reg = <0x3c>; + #clock-cells = <0>; + clocks = <&k3_clks 62 19>, /* icssg0_core_clk */ + <&k3_clks 62 3>; /* icssg0_iclk */ + assigned-clocks = <&icssg0_coreclk_mux>; + assigned-clock-parents = <&k3_clks 62 3>; + }; + + icssg0_iepclk_mux: iepclk-mux@30 { + reg = <0x30>; + #clock-cells = <0>; + clocks = <&k3_clks 62 10>, /* icssg0_iep_clk */ + <&icssg0_coreclk_mux>; /* core_clk */ + assigned-clocks = <&icssg0_iepclk_mux>; + assigned-clock-parents = <&icssg0_coreclk_mux>; + }; + }; + }; + + icssg0_mii_rt: mii-rt@32000 { + compatible = "ti,pruss-mii", "syscon"; + reg = <0x32000 0x100>; + }; + + icssg0_mii_g_rt: mii-g-rt@33000 { + compatible = "ti,pruss-mii-g", "syscon"; + reg = <0x33000 0x1000>; + }; + + icssg0_intc: interrupt-controller@20000 { + compatible = "ti,icssg-intc"; + reg = <0x20000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host_intr0", "host_intr1", + "host_intr2", "host_intr3", + "host_intr4", "host_intr5", + "host_intr6", "host_intr7"; + }; + + pru0_0: pru@34000 { + compatible = "ti,am654-pru"; + reg = <0x34000 0x4000>, + <0x22000 0x100>, + <0x22400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru0_0-fw"; + }; + + rtu0_0: rtu@4000 { + compatible = "ti,am654-rtu"; + reg = <0x4000 0x2000>, + <0x23000 0x100>, + <0x23400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu0_0-fw"; + }; + + tx_pru0_0: txpru@a000 { + compatible = "ti,am654-tx-pru"; + reg = <0xa000 0x1800>, + <0x25000 0x100>, + <0x25400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru0_0-fw"; + }; + + pru0_1: pru@38000 { + compatible = "ti,am654-pru"; + reg = <0x38000 0x4000>, + <0x24000 0x100>, + <0x24400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru0_1-fw"; + }; + + rtu0_1: rtu@6000 { + compatible = "ti,am654-rtu"; + reg = <0x6000 0x2000>, + <0x23800 0x100>, + <0x23c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu0_1-fw"; + }; + + tx_pru0_1: txpru@c000 { + compatible = "ti,am654-tx-pru"; + reg = <0xc000 0x1800>, + <0x25800 0x100>, + <0x25c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru0_1-fw"; + }; + }; + + icssg1: icssg@b100000 { + compatible = "ti,am654-icssg"; + reg = <0x00 0xb100000 0x00 0x80000>; + power-domains = <&k3_pds 63 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0xb100000 0x80000>; + + icssg1_mem: memories@0 { + reg = <0x0 0x2000>, + <0x2000 0x2000>, + <0x10000 0x10000>; + reg-names = "dram0", "dram1", + "shrdram2"; + }; + + icssg1_cfg: cfg@26000 { + compatible = "ti,pruss-cfg", "syscon"; + reg = <0x26000 0x200>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x26000 0x2000>; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + icssg1_coreclk_mux: coreclk-mux@3c { + reg = <0x3c>; + #clock-cells = <0>; + clocks = <&k3_clks 63 19>, /* icssg1_core_clk */ + <&k3_clks 63 3>; /* icssg1_iclk */ + assigned-clocks = <&icssg1_coreclk_mux>; + assigned-clock-parents = <&k3_clks 63 3>; + }; + + icssg1_iepclk_mux: iepclk-mux@30 { + reg = <0x30>; + #clock-cells = <0>; + clocks = <&k3_clks 63 10>, /* icssg1_iep_clk */ + <&icssg1_coreclk_mux>; /* core_clk */ + assigned-clocks = <&icssg1_iepclk_mux>; + assigned-clock-parents = <&icssg1_coreclk_mux>; + }; + }; + }; + + icssg1_mii_rt: mii-rt@32000 { + compatible = "ti,pruss-mii", "syscon"; + reg = <0x32000 0x100>; + }; + + icssg1_mii_g_rt: mii-g-rt@33000 { + compatible = "ti,pruss-mii-g", "syscon"; + reg = <0x33000 0x1000>; + }; + + icssg1_intc: interrupt-controller@20000 { + compatible = "ti,icssg-intc"; + reg = <0x20000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host_intr0", "host_intr1", + "host_intr2", "host_intr3", + "host_intr4", "host_intr5", + "host_intr6", "host_intr7"; + }; + + pru1_0: pru@34000 { + compatible = "ti,am654-pru"; + reg = <0x34000 0x4000>, + <0x22000 0x100>, + <0x22400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru1_0-fw"; + }; + + rtu1_0: rtu@4000 { + compatible = "ti,am654-rtu"; + reg = <0x4000 0x2000>, + <0x23000 0x100>, + <0x23400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu1_0-fw"; + }; + + tx_pru1_0: txpru@a000 { + compatible = "ti,am654-tx-pru"; + reg = <0xa000 0x1800>, + <0x25000 0x100>, + <0x25400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru1_0-fw"; + }; + + pru1_1: pru@38000 { + compatible = "ti,am654-pru"; + reg = <0x38000 0x4000>, + <0x24000 0x100>, + <0x24400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru1_1-fw"; + }; + + rtu1_1: rtu@6000 { + compatible = "ti,am654-rtu"; + reg = <0x6000 0x2000>, + <0x23800 0x100>, + <0x23c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu1_1-fw"; + }; + + tx_pru1_1: txpru@c000 { + compatible = "ti,am654-tx-pru"; + reg = <0xc000 0x1800>, + <0x25800 0x100>, + <0x25c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru1_1-fw"; + }; + }; + + icssg2: icssg@b200000 { + compatible = "ti,am654-icssg"; + reg = <0x00 0xb200000 0x00 0x80000>; + power-domains = <&k3_pds 64 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0xb200000 0x80000>; + + icssg2_mem: memories@0 { + reg = <0x0 0x2000>, + <0x2000 0x2000>, + <0x10000 0x10000>; + reg-names = "dram0", "dram1", + "shrdram2"; + }; + + icssg2_cfg: cfg@26000 { + compatible = "ti,pruss-cfg", "syscon"; + reg = <0x26000 0x200>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x26000 0x2000>; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + icssg2_coreclk_mux: coreclk-mux@3c { + reg = <0x3c>; + #clock-cells = <0>; + clocks = <&k3_clks 64 19>, /* icssg1_core_clk */ + <&k3_clks 64 3>; /* icssg1_iclk */ + assigned-clocks = <&icssg2_coreclk_mux>; + assigned-clock-parents = <&k3_clks 64 3>; + }; + + icssg2_iepclk_mux: iepclk-mux@30 { + reg = <0x30>; + #clock-cells = <0>; + clocks = <&k3_clks 64 10>, /* icssg1_iep_clk */ + <&icssg2_coreclk_mux>; /* core_clk */ + assigned-clocks = <&icssg2_iepclk_mux>; + assigned-clock-parents = <&icssg2_coreclk_mux>; + }; + }; + }; + + icssg2_mii_rt: mii-rt@32000 { + compatible = "ti,pruss-mii", "syscon"; + reg = <0x32000 0x100>; + }; + + icssg2_mii_g_rt: mii-g-rt@33000 { + compatible = "ti,pruss-mii-g", "syscon"; + reg = <0x33000 0x1000>; + }; + + icssg2_intc: interrupt-controller@20000 { + compatible = "ti,icssg-intc"; + reg = <0x20000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host_intr0", "host_intr1", + "host_intr2", "host_intr3", + "host_intr4", "host_intr5", + "host_intr6", "host_intr7"; + }; + + pru2_0: pru@34000 { + compatible = "ti,am654-pru"; + reg = <0x34000 0x4000>, + <0x22000 0x100>, + <0x22400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru2_0-fw"; + }; + + rtu2_0: rtu@4000 { + compatible = "ti,am654-rtu"; + reg = <0x4000 0x2000>, + <0x23000 0x100>, + <0x23400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu2_0-fw"; + }; + + tx_pru2_0: txpru@a000 { + compatible = "ti,am654-tx-pru"; + reg = <0xa000 0x1800>, + <0x25000 0x100>, + <0x25400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru2_0-fw"; + }; + + pru2_1: pru@38000 { + compatible = "ti,am654-pru"; + reg = <0x38000 0x4000>, + <0x24000 0x100>, + <0x24400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-pru2_1-fw"; + }; + + rtu2_1: rtu@6000 { + compatible = "ti,am654-rtu"; + reg = <0x6000 0x2000>, + <0x23800 0x100>, + <0x23c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-rtu2_1-fw"; + }; + + tx_pru2_1: txpru@c000 { + compatible = "ti,am654-tx-pru"; + reg = <0xc000 0x1800>, + <0x25800 0x100>, + <0x25c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "am65x-txpru2_1-fw"; + }; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 7454c8cec0cc..0388c02c2203 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -308,4 +308,13 @@ ti,loczrama = <1>; }; }; + + mcu_rti1: watchdog@40610000 { + compatible = "ti,j7-rti-wdt"; + reg = <0x0 0x40610000 0x0 0x100>; + clocks = <&k3_clks 135 0>; + power-domains = <&k3_pds 135 TI_SCI_PD_SHARED>; + assigned-clocks = <&k3_clks 135 0>; + assigned-clock-parents = <&k3_clks 135 4>; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts new file mode 100644 index 000000000000..4f7e3f2a6265 --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Siemens AG, 2018-2021 + * + * Authors: + * Le Jin <le.jin@siemens.com> + * Jan Kiszka <jan.kiszk@siemens.com> + * + * AM6528-based (dual-core) IOT2050 Basic variant + * 1 GB RAM, no eMMC, main_uart0 on connector X30 + */ + +/dts-v1/; + +#include "k3-am65-iot2050-common.dtsi" + +/ { + compatible = "siemens,iot2050-basic", "ti,am654"; + model = "SIMATIC IOT2050 Basic"; + + memory@80000000 { + device_type = "memory"; + /* 1G RAM */ + reg = <0x00000000 0x80000000 0x00000000 0x40000000>; + }; + + cpus { + cpu-map { + /delete-node/ cluster1; + }; + /delete-node/ cpu@100; + /delete-node/ cpu@101; + }; + + /delete-node/ l2-cache1; +}; + +/* eMMC */ +&sdhci0 { + status = "disabled"; +}; + +&main_pmx0 { + main_uart0_pins_default: main-uart0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x01e4, PIN_INPUT, 0) /* (AF11) UART0_RXD */ + AM65X_IOPAD(0x01e8, PIN_OUTPUT, 0) /* (AE11) UART0_TXD */ + AM65X_IOPAD(0x01ec, PIN_INPUT, 0) /* (AG11) UART0_CTSn */ + AM65X_IOPAD(0x01f0, PIN_OUTPUT, 0) /* (AD11) UART0_RTSn */ + AM65X_IOPAD(0x0188, PIN_INPUT, 1) /* (D25) UART0_DCDn */ + AM65X_IOPAD(0x018c, PIN_INPUT, 1) /* (B26) UART0_DSRn */ + AM65X_IOPAD(0x0190, PIN_OUTPUT, 1) /* (A24) UART0_DTRn */ + AM65X_IOPAD(0x0194, PIN_INPUT, 1) /* (E24) UART0_RIN */ + >; + }; +}; + +&main_uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_uart0_pins_default>; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index fe3043943906..9e87fb313a54 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -483,9 +483,9 @@ flash@0{ compatible = "jedec,spi-nor"; reg = <0x0>; - spi-tx-bus-width = <1>; + spi-tx-bus-width = <8>; spi-rx-bus-width = <8>; - spi-max-frequency = <40000000>; + spi-max-frequency = <25000000>; cdns,tshsl-ns = <60>; cdns,tsd2d-ns = <60>; cdns,tchsh-ns = <60>; diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts new file mode 100644 index 000000000000..ec9617c13cdb --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Siemens AG, 2018-2021 + * + * Authors: + * Le Jin <le.jin@siemens.com> + * Jan Kiszka <jan.kiszk@siemens.com> + * + * AM6548-based (quad-core) IOT2050 Advanced variant + * 2 GB RAM, 16 GB eMMC, USB-serial converter on connector X30 + */ + +/dts-v1/; + +#include "k3-am65-iot2050-common.dtsi" + +/ { + compatible = "siemens,iot2050-advanced", "ti,am654"; + model = "SIMATIC IOT2050 Advanced"; + + memory@80000000 { + device_type = "memory"; + /* 2G RAM */ + reg = <0x00000000 0x80000000 0x00000000 0x80000000>; + }; +}; + +&main_pmx0 { + main_mmc0_pins_default: main-mmc0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x01a8, PIN_INPUT_PULLDOWN, 0) /* (B25) MMC0_CLK */ + AM65X_IOPAD(0x01ac, PIN_INPUT_PULLUP, 0) /* (B27) MMC0_CMD */ + AM65X_IOPAD(0x01a4, PIN_INPUT_PULLUP, 0) /* (A26) MMC0_DAT0 */ + AM65X_IOPAD(0x01a0, PIN_INPUT_PULLUP, 0) /* (E25) MMC0_DAT1 */ + AM65X_IOPAD(0x019c, PIN_INPUT_PULLUP, 0) /* (C26) MMC0_DAT2 */ + AM65X_IOPAD(0x0198, PIN_INPUT_PULLUP, 0) /* (A25) MMC0_DAT3 */ + AM65X_IOPAD(0x0194, PIN_INPUT_PULLUP, 0) /* (E24) MMC0_DAT4 */ + AM65X_IOPAD(0x0190, PIN_INPUT_PULLUP, 0) /* (A24) MMC0_DAT5 */ + AM65X_IOPAD(0x018c, PIN_INPUT_PULLUP, 0) /* (B26) MMC0_DAT6 */ + AM65X_IOPAD(0x0188, PIN_INPUT_PULLUP, 0) /* (D25) MMC0_DAT7 */ + AM65X_IOPAD(0x01b8, PIN_OUTPUT_PULLUP, 7) /* (B23) MMC0_SDWP */ + AM65X_IOPAD(0x01b4, PIN_INPUT_PULLUP, 0) /* (A23) MMC0_SDCD */ + AM65X_IOPAD(0x01b0, PIN_INPUT, 0) /* (C25) MMC0_DS */ + >; + }; +}; + +/* eMMC */ +&sdhci0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_mmc0_pins_default>; + bus-width = <8>; + non-removable; + ti,driver-strength-ohm = <50>; + disable-wp; +}; + +&main_uart0 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts index 4a7182abccf5..bedd01b7a32c 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts @@ -16,6 +16,65 @@ stdout-path = "serial2:115200n8"; bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000"; }; + + evm_12v0: fixedregulator-evm12v0 { + /* main supply */ + compatible = "regulator-fixed"; + regulator-name = "evm_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_3v3: fixedregulator-vsys3v3 { + /* Output of LM5140 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&evm_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_5v0: fixedregulator-vsys5v0 { + /* Output of LM5140 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&evm_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vdd_mmc1: fixedregulator-sd { + /* Output of TPS22918 */ + compatible = "regulator-fixed"; + regulator-name = "vdd_mmc1"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + enable-active-high; + vin-supply = <&vsys_3v3>; + gpio = <&exp2 2 GPIO_ACTIVE_HIGH>; + }; + + vdd_sd_dv: gpio-regulator-TLV71033 { + /* Output of TLV71033 */ + compatible = "regulator-gpio"; + regulator-name = "tlv71033"; + pinctrl-names = "default"; + pinctrl-0 = <&vdd_sd_dv_pins_default>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + vin-supply = <&vsys_5v0>; + gpios = <&main_gpio0 55 GPIO_ACTIVE_HIGH>; + states = <1800000 0x0>, + <3300000 0x1>; + }; }; &wkup_pmx0 { @@ -45,6 +104,13 @@ }; &main_pmx0 { + main_i2c0_pins_default: main-i2c0-pins-default { + pinctrl-single,pins = < + J721E_IOPAD(0xd4, PIN_INPUT_PULLUP, 0) /* (V3) I2C0_SCL */ + J721E_IOPAD(0xd8, PIN_INPUT_PULLUP, 0) /* (W2) I2C0_SDA */ + >; + }; + main_i2c1_pins_default: main-i2c1-pins-default { pinctrl-single,pins = < J721E_IOPAD(0xdc, PIN_INPUT_PULLUP, 3) /* (U3) ECAP0_IN_APWM_OUT.I2C1_SCL */ @@ -70,6 +136,12 @@ J721E_IOPAD(0x120, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */ >; }; + + vdd_sd_dv_pins_default: vdd-sd-dv-pins-default { + pinctrl-single,pins = < + J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */ + >; + }; }; &wkup_uart0 { @@ -122,6 +194,22 @@ status = "disabled"; }; +&main_gpio2 { + status = "disabled"; +}; + +&main_gpio4 { + status = "disabled"; +}; + +&main_gpio6 { + status = "disabled"; +}; + +&wkup_gpio1 { + status = "disabled"; +}; + &mcu_cpsw { pinctrl-names = "default"; pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>; @@ -141,6 +229,10 @@ }; &main_i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_i2c0_pins_default>; + clock-frequency = <400000>; + exp1: gpio@20 { compatible = "ti,tca6416"; reg = <0x20>; @@ -190,6 +282,8 @@ /* SD card */ pinctrl-0 = <&main_mmc1_pins_default>; pinctrl-names = "default"; + vmmc-supply = <&vdd_mmc1>; + vqmmc-supply = <&vdd_sd_dv>; ti,driver-strength-ohm = <50>; disable-wp; }; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi index 17477ab0fd8e..f86c493a44f1 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi @@ -512,11 +512,16 @@ ti,otap-del-sel-mmc-hs = <0x0>; ti,otap-del-sel-ddr52 = <0x6>; ti,otap-del-sel-hs200 = <0x8>; - ti,otap-del-sel-hs400 = <0x0>; + ti,otap-del-sel-hs400 = <0x5>; + ti,itap-del-sel-legacy = <0x10>; + ti,itap-del-sel-mmc-hs = <0xa>; ti,strobe-sel = <0x77>; + ti,clkbuf-sel = <0x7>; ti,trm-icp = <0x8>; bus-width = <8>; mmc-ddr-1_8v; + mmc-hs200-1_8v; + mmc-hs400-1_8v; dma-coherent; }; @@ -534,7 +539,12 @@ ti,otap-del-sel-sdr50 = <0xc>; ti,otap-del-sel-sdr104 = <0x5>; ti,otap-del-sel-ddr50 = <0xc>; - no-1-8-v; + ti,itap-del-sel-legacy = <0x0>; + ti,itap-del-sel-sd-hs = <0x0>; + ti,itap-del-sel-sdr12 = <0x0>; + ti,itap-del-sel-sdr25 = <0x0>; + ti,clkbuf-sel = <0x7>; + ti,trm-icp = <0x8>; dma-coherent; }; @@ -672,6 +682,78 @@ }; }; + main_gpio0: gpio@600000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x00600000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <145>, <146>, <147>, <148>, + <149>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <69>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 105 0>; + clock-names = "gpio"; + }; + + main_gpio2: gpio@610000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x00610000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <154>, <155>, <156>, <157>, + <158>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <69>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 107 0>; + clock-names = "gpio"; + }; + + main_gpio4: gpio@620000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x00620000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <163>, <164>, <165>, <166>, + <167>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <69>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 109 0>; + clock-names = "gpio"; + }; + + main_gpio6: gpio@630000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x00630000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <172>, <173>, <174>, <175>, + <176>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <69>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 111 0>; + clock-names = "gpio"; + }; + main_r5fss0: r5fss@5c00000 { compatible = "ti,j7200-r5fss"; ti,cluster-mode = <1>; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi index 359e3e8a8cd0..5e74e43822c3 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi @@ -107,6 +107,40 @@ ti,interrupt-ranges = <16 960 16>; }; + wkup_gpio0: gpio@42110000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x42110000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&wkup_gpio_intr>; + interrupts = <103>, <104>, <105>, <106>, <107>, <108>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <85>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 113 0>; + clock-names = "gpio"; + }; + + wkup_gpio1: gpio@42100000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x00 0x42100000 0x00 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&wkup_gpio_intr>; + interrupts = <112>, <113>, <114>, <115>, <116>, <117>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <0>; + ti,ngpio = <85>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 114 0>; + clock-names = "gpio"; + }; + mcu_navss: bus@28380000 { compatible = "simple-mfd"; #address-cells = <2>; @@ -269,6 +303,23 @@ #size-cells = <1>; mux-controls = <&hbmc_mux 0>; }; + + ospi0: spi@47040000 { + compatible = "ti,am654-ospi", "cdns,qspi-nor"; + reg = <0x0 0x47040000 0x0 0x100>, + <0x5 0x00000000 0x1 0x0000000>; + interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>; + cdns,fifo-depth = <256>; + cdns,fifo-width = <4>; + cdns,trigger-address = <0x0>; + clocks = <&k3_clks 103 0>; + assigned-clocks = <&k3_clks 103 0>; + assigned-clock-parents = <&k3_clks 103 2>; + assigned-clock-rates = <166666666>; + power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <0>; + }; }; tscadc0: tscadc@40200000 { diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi index a988e2ab2ba1..34724440171a 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi @@ -100,6 +100,22 @@ J721E_WKUP_IOPAD(0x28, PIN_INPUT, 1) /* (A7) MCU_OSPI0_D7.MCU_HYPERBUS0_DQ7 */ >; }; + + mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins-default { + pinctrl-single,pins = < + J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_OSPI0_CLK */ + J721E_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0) /* MCU_OSPI0_CSn0 */ + J721E_WKUP_IOPAD(0x000c, PIN_INPUT, 0) /* MCU_OSPI0_D0 */ + J721E_WKUP_IOPAD(0x0010, PIN_INPUT, 0) /* MCU_OSPI0_D1 */ + J721E_WKUP_IOPAD(0x0014, PIN_INPUT, 0) /* MCU_OSPI0_D2 */ + J721E_WKUP_IOPAD(0x0018, PIN_INPUT, 0) /* MCU_OSPI0_D3 */ + J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0) /* MCU_OSPI0_D4 */ + J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0) /* MCU_OSPI0_D5 */ + J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0) /* MCU_OSPI0_D6 */ + J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0) /* MCU_OSPI0_D7 */ + J721E_WKUP_IOPAD(0x0008, PIN_INPUT, 0) /* MCU_OSPI0_DQS */ + >; + }; }; &main_pmx0 { @@ -235,3 +251,23 @@ "GPIO_LIN_EN", "CAN_STB"; }; }; + +&ospi0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_fss0_ospi0_pins_default>; + + flash@0{ + compatible = "jedec,spi-nor"; + reg = <0x0>; + spi-tx-bus-width = <8>; + spi-rx-bus-width = <8>; + spi-max-frequency = <25000000>; + cdns,tshsl-ns = <60>; + cdns,tsd2d-ns = <60>; + cdns,tchsh-ns = <60>; + cdns,tslch-ns = <60>; + cdns,read-delay = <4>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 8c84dafb7125..c2aa45a3ac79 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -1042,13 +1042,16 @@ assigned-clocks = <&k3_clks 91 1>; assigned-clock-parents = <&k3_clks 91 2>; bus-width = <8>; - mmc-hs400-1_8v; + mmc-hs200-1_8v; mmc-ddr-1_8v; ti,otap-del-sel-legacy = <0xf>; ti,otap-del-sel-mmc-hs = <0xf>; ti,otap-del-sel-ddr52 = <0x5>; ti,otap-del-sel-hs200 = <0x6>; ti,otap-del-sel-hs400 = <0x0>; + ti,itap-del-sel-legacy = <0x10>; + ti,itap-del-sel-mmc-hs = <0xa>; + ti,itap-del-sel-ddr52 = <0x3>; ti,trm-icp = <0x8>; ti,strobe-sel = <0x77>; dma-coherent; @@ -1069,9 +1072,15 @@ ti,otap-del-sel-sdr25 = <0xf>; ti,otap-del-sel-sdr50 = <0xc>; ti,otap-del-sel-ddr50 = <0xc>; + ti,itap-del-sel-legacy = <0x0>; + ti,itap-del-sel-sd-hs = <0x0>; + ti,itap-del-sel-sdr12 = <0x0>; + ti,itap-del-sel-sdr25 = <0x0>; + ti,itap-del-sel-ddr50 = <0x2>; ti,trm-icp = <0x8>; ti,clkbuf-sel = <0x7>; dma-coherent; + sdhci-caps-mask = <0x2 0x0>; }; main_sdhci2: mmc@4f98000 { @@ -1089,9 +1098,15 @@ ti,otap-del-sel-sdr25 = <0xf>; ti,otap-del-sel-sdr50 = <0xc>; ti,otap-del-sel-ddr50 = <0xc>; + ti,itap-del-sel-legacy = <0x0>; + ti,itap-del-sel-sd-hs = <0x0>; + ti,itap-del-sel-sdr12 = <0x0>; + ti,itap-del-sel-sdr25 = <0x0>; + ti,itap-del-sel-ddr50 = <0x2>; ti,trm-icp = <0x8>; ti,clkbuf-sel = <0x7>; dma-coherent; + sdhci-caps-mask = <0x2 0x0>; }; usbss0: cdns-usb@4104000 { @@ -1647,4 +1662,266 @@ resets = <&k3_reset 15 1>; firmware-name = "j7-c71_0-fw"; }; + + icssg0: icssg@b000000 { + compatible = "ti,j721e-icssg"; + reg = <0x00 0xb000000 0x00 0x80000>; + power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x0b000000 0x100000>; + + icssg0_mem: memories@0 { + reg = <0x0 0x2000>, + <0x2000 0x2000>, + <0x10000 0x10000>; + reg-names = "dram0", "dram1", + "shrdram2"; + }; + + icssg0_cfg: cfg@26000 { + compatible = "ti,pruss-cfg", "syscon"; + reg = <0x26000 0x200>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x26000 0x2000>; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + icssg0_coreclk_mux: coreclk-mux@3c { + reg = <0x3c>; + #clock-cells = <0>; + clocks = <&k3_clks 119 24>, /* icssg0_core_clk */ + <&k3_clks 119 1>; /* icssg0_iclk */ + assigned-clocks = <&icssg0_coreclk_mux>; + assigned-clock-parents = <&k3_clks 119 1>; + }; + + icssg0_iepclk_mux: iepclk-mux@30 { + reg = <0x30>; + #clock-cells = <0>; + clocks = <&k3_clks 119 3>, /* icssg0_iep_clk */ + <&icssg0_coreclk_mux>; /* core_clk */ + assigned-clocks = <&icssg0_iepclk_mux>; + assigned-clock-parents = <&icssg0_coreclk_mux>; + }; + }; + }; + + icssg0_mii_rt: mii-rt@32000 { + compatible = "ti,pruss-mii", "syscon"; + reg = <0x32000 0x100>; + }; + + icssg0_mii_g_rt: mii-g-rt@33000 { + compatible = "ti,pruss-mii-g", "syscon"; + reg = <0x33000 0x1000>; + }; + + icssg0_intc: interrupt-controller@20000 { + compatible = "ti,icssg-intc"; + reg = <0x20000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host_intr0", "host_intr1", + "host_intr2", "host_intr3", + "host_intr4", "host_intr5", + "host_intr6", "host_intr7"; + }; + + pru0_0: pru@34000 { + compatible = "ti,j721e-pru"; + reg = <0x34000 0x3000>, + <0x22000 0x100>, + <0x22400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-pru0_0-fw"; + }; + + rtu0_0: rtu@4000 { + compatible = "ti,j721e-rtu"; + reg = <0x4000 0x2000>, + <0x23000 0x100>, + <0x23400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-rtu0_0-fw"; + }; + + tx_pru0_0: txpru@a000 { + compatible = "ti,j721e-tx-pru"; + reg = <0xa000 0x1800>, + <0x25000 0x100>, + <0x25400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-txpru0_0-fw"; + }; + + pru0_1: pru@38000 { + compatible = "ti,j721e-pru"; + reg = <0x38000 0x3000>, + <0x24000 0x100>, + <0x24400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-pru0_1-fw"; + }; + + rtu0_1: rtu@6000 { + compatible = "ti,j721e-rtu"; + reg = <0x6000 0x2000>, + <0x23800 0x100>, + <0x23c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-rtu0_1-fw"; + }; + + tx_pru0_1: txpru@c000 { + compatible = "ti,j721e-tx-pru"; + reg = <0xc000 0x1800>, + <0x25800 0x100>, + <0x25c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-txpru0_1-fw"; + }; + }; + + icssg1: icssg@b100000 { + compatible = "ti,j721e-icssg"; + reg = <0x00 0xb100000 0x00 0x80000>; + power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x0b100000 0x100000>; + + icssg1_mem: memories@b100000 { + reg = <0x0 0x2000>, + <0x2000 0x2000>, + <0x10000 0x10000>; + reg-names = "dram0", "dram1", + "shrdram2"; + }; + + icssg1_cfg: cfg@26000 { + compatible = "ti,pruss-cfg", "syscon"; + reg = <0x26000 0x200>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x26000 0x2000>; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + icssg1_coreclk_mux: coreclk-mux@3c { + reg = <0x3c>; + #clock-cells = <0>; + clocks = <&k3_clks 120 54>, /* icssg1_core_clk */ + <&k3_clks 120 4>; /* icssg1_iclk */ + assigned-clocks = <&icssg1_coreclk_mux>; + assigned-clock-parents = <&k3_clks 120 4>; + }; + + icssg1_iepclk_mux: iepclk-mux@30 { + reg = <0x30>; + #clock-cells = <0>; + clocks = <&k3_clks 120 9>, /* icssg1_iep_clk */ + <&icssg1_coreclk_mux>; /* core_clk */ + assigned-clocks = <&icssg1_iepclk_mux>; + assigned-clock-parents = <&icssg1_coreclk_mux>; + }; + }; + }; + + icssg1_mii_rt: mii-rt@32000 { + compatible = "ti,pruss-mii", "syscon"; + reg = <0x32000 0x100>; + }; + + icssg1_mii_g_rt: mii-g-rt@33000 { + compatible = "ti,pruss-mii-g", "syscon"; + reg = <0x33000 0x1000>; + }; + + icssg1_intc: interrupt-controller@20000 { + compatible = "ti,icssg-intc"; + reg = <0x20000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host_intr0", "host_intr1", + "host_intr2", "host_intr3", + "host_intr4", "host_intr5", + "host_intr6", "host_intr7"; + }; + + pru1_0: pru@34000 { + compatible = "ti,j721e-pru"; + reg = <0x34000 0x4000>, + <0x22000 0x100>, + <0x22400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-pru1_0-fw"; + }; + + rtu1_0: rtu@4000 { + compatible = "ti,j721e-rtu"; + reg = <0x4000 0x2000>, + <0x23000 0x100>, + <0x23400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-rtu1_0-fw"; + }; + + tx_pru1_0: txpru@a000 { + compatible = "ti,j721e-tx-pru"; + reg = <0xa000 0x1800>, + <0x25000 0x100>, + <0x25400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-txpru1_0-fw"; + }; + + pru1_1: pru@38000 { + compatible = "ti,j721e-pru"; + reg = <0x38000 0x4000>, + <0x24000 0x100>, + <0x24400 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-pru1_1-fw"; + }; + + rtu1_1: rtu@6000 { + compatible = "ti,j721e-rtu"; + reg = <0x6000 0x2000>, + <0x23800 0x100>, + <0x23c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-rtu1_1-fw"; + }; + + tx_pru1_1: txpru@c000 { + compatible = "ti,j721e-tx-pru"; + reg = <0xc000 0x1800>, + <0x25800 0x100>, + <0x25c00 0x100>; + reg-names = "iram", "control", "debug"; + firmware-name = "j7-txpru1_1-fw"; + }; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 6c44afae9187..d56e3475aee7 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -180,7 +180,7 @@ ranges; ospi0: spi@47040000 { - compatible = "ti,am654-ospi"; + compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47040000 0x0 0x100>, <0x5 0x00000000 0x1 0x0000000>; interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>; @@ -197,7 +197,7 @@ }; ospi1: spi@47050000 { - compatible = "ti,am654-ospi"; + compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47050000 0x0 0x100>, <0x7 0x00000000 0x1 0x00000000>; interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi index 57720e6a04c5..2fee2906183d 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi @@ -174,9 +174,9 @@ flash@0{ compatible = "jedec,spi-nor"; reg = <0x0>; - spi-tx-bus-width = <1>; + spi-tx-bus-width = <8>; spi-rx-bus-width = <8>; - spi-max-frequency = <40000000>; + spi-max-frequency = <25000000>; cdns,tshsl-ns = <60>; cdns,tsd2d-ns = <60>; cdns,tchsh-ns = <60>; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts index 12e8bd48dc8c..eca6c2de84a7 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts @@ -580,25 +580,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <4>; - si5328: clock-generator@69 {/* SI5328 - u20 */ - reg = <0x69>; - /* - * Chip has interrupt present connected to PL - * interrupt-parent = <&>; - * interrupts = <>; - */ - #address-cells = <1>; - #size-cells = <0>; - #clock-cells = <1>; - clocks = <&refhdmi>; - clock-names = "xtal"; - clock-output-names = "si5328"; - - si5328_clk: clk0@0 { - reg = <0>; - clock-frequency = <27000000>; - }; - }; + /* SI5328 - u20 */ }; /* 5 - 7 unconnected */ }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts index 18771e868399..eff7c6447087 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts @@ -581,25 +581,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <4>; - si5328: clock-generator@69 {/* SI5328 - u20 */ - reg = <0x69>; - /* - * Chip has interrupt present connected to PL - * interrupt-parent = <&>; - * interrupts = <>; - */ - #address-cells = <1>; - #size-cells = <0>; - #clock-cells = <1>; - clocks = <&refhdmi>; - clock-names = "xtal"; - clock-output-names = "si5328"; - - si5328_clk: clk0@0 { - reg = <0>; - clock-frequency = <27000000>; - }; - }; + /* SI5328 - u20 */ }; i2c@5 { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index a3b391d18787..28dccb891a53 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -856,6 +856,7 @@ interrupts = <0 122 4>; interrupt-parent = <&gic>; clock-names = "axi_clk"; + power-domains = <&zynqmp_firmware PD_DP>; #dma-cells = <1>; }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index d612f633b771..08c6f769df9a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -29,8 +29,10 @@ CONFIG_KALLSYMS_ALL=y CONFIG_PROFILING=y CONFIG_ARCH_ACTIONS=y CONFIG_ARCH_AGILEX=y +CONFIG_ARCH_N5X=y CONFIG_ARCH_SUNXI=y CONFIG_ARCH_ALPINE=y +CONFIG_ARCH_APPLE=y CONFIG_ARCH_BCM2835=y CONFIG_ARCH_BCM4908=y CONFIG_ARCH_BCM_IPROC=y @@ -41,6 +43,7 @@ CONFIG_ARCH_K3=y CONFIG_ARCH_LAYERSCAPE=y CONFIG_ARCH_LG1K=y CONFIG_ARCH_HISI=y +CONFIG_ARCH_KEEMBAY=y CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MESON=y CONFIG_ARCH_MVEBU=y @@ -50,7 +53,7 @@ CONFIG_ARCH_RENESAS=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_S32=y CONFIG_ARCH_SEATTLE=y -CONFIG_ARCH_STRATIX10=y +CONFIG_ARCH_INTEL_SOCFPGA=y CONFIG_ARCH_SYNQUACER=y CONFIG_ARCH_TEGRA=y CONFIG_ARCH_SPRD=y @@ -225,6 +228,7 @@ CONFIG_PCI_HOST_THUNDER_PEM=y CONFIG_PCI_HOST_THUNDER_ECAM=y CONFIG_PCIE_ROCKCHIP_HOST=m CONFIG_PCIE_BRCMSTB=m +CONFIG_PCI_IMX6=y CONFIG_PCI_LAYERSCAPE=y CONFIG_PCIE_LAYERSCAPE_GEN4=y CONFIG_PCI_HISI=y @@ -359,7 +363,7 @@ CONFIG_MESON_GXL_PHY=m CONFIG_MICREL_PHY=y CONFIG_MICROSEMI_PHY=y CONFIG_AT803X_PHY=y -CONFIG_REALTEK_PHY=m +CONFIG_REALTEK_PHY=y CONFIG_ROCKCHIP_PHY=y CONFIG_VITESSE_PHY=y CONFIG_USB_PEGASUS=m @@ -448,6 +452,7 @@ CONFIG_I2C_GPIO=m CONFIG_I2C_IMX=y CONFIG_I2C_IMX_LPI2C=y CONFIG_I2C_MESON=y +CONFIG_I2C_MT65XX=y CONFIG_I2C_MV64XXX=y CONFIG_I2C_OMAP=y CONFIG_I2C_OWL=y @@ -465,6 +470,9 @@ CONFIG_SPI=y CONFIG_SPI_ARMADA_3700=y CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_DESIGNWARE=m +CONFIG_SPI_DW_DMA=y +CONFIG_SPI_DW_MMIO=m CONFIG_SPI_FSL_LPSPI=y CONFIG_SPI_FSL_QUADSPI=y CONFIG_SPI_NXP_FLEXSPI=y @@ -494,6 +502,7 @@ CONFIG_PINCTRL_IMX8MM=y CONFIG_PINCTRL_IMX8MN=y CONFIG_PINCTRL_IMX8MP=y CONFIG_PINCTRL_IMX8MQ=y +CONFIG_PINCTRL_IMX8QM=y CONFIG_PINCTRL_IMX8QXP=y CONFIG_PINCTRL_IMX8DXL=y CONFIG_PINCTRL_MSM=y @@ -510,6 +519,7 @@ CONFIG_PINCTRL_SC7180=y CONFIG_PINCTRL_SDM845=y CONFIG_PINCTRL_SM8150=y CONFIG_PINCTRL_SM8250=y +CONFIG_PINCTRL_SM8350=y CONFIG_PINCTRL_LPASS_LPI=m CONFIG_GPIO_ALTERA=m CONFIG_GPIO_DAVINCI=y @@ -594,6 +604,7 @@ CONFIG_MFD_EXYNOS_LPASS=m CONFIG_MFD_HI6421_PMIC=y CONFIG_MFD_HI655X_PMIC=y CONFIG_MFD_MAX77620=y +CONFIG_MFD_MT6397=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y @@ -611,6 +622,8 @@ CONFIG_REGULATOR_HI655X=y CONFIG_REGULATOR_MAX77620=y CONFIG_REGULATOR_MAX8973=y CONFIG_REGULATOR_MP8859=y +CONFIG_REGULATOR_MT6358=y +CONFIG_REGULATOR_MT6397=y CONFIG_REGULATOR_PCA9450=y CONFIG_REGULATOR_PF8X00=y CONFIG_REGULATOR_PFUZE100=y @@ -682,6 +695,7 @@ CONFIG_DRM_MSM=m CONFIG_DRM_TEGRA=m CONFIG_DRM_PANEL_LVDS=m CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m CONFIG_DRM_PANEL_RAYDIUM_RM67191=m CONFIG_DRM_PANEL_SITRONIX_ST7703=m @@ -689,6 +703,7 @@ CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m CONFIG_DRM_DISPLAY_CONNECTOR=m CONFIG_DRM_NWL_MIPI_DSI=m CONFIG_DRM_LONTIUM_LT9611=m +CONFIG_DRM_PARADE_PS8640=m CONFIG_DRM_SII902X=m CONFIG_DRM_SIMPLE_BRIDGE=m CONFIG_DRM_THINE_THC63LVD1024=m @@ -703,6 +718,8 @@ CONFIG_DRM_VC4=m CONFIG_DRM_ETNAVIV=m CONFIG_DRM_HISI_HIBMC=m CONFIG_DRM_HISI_KIRIN=m +CONFIG_DRM_MEDIATEK=m +CONFIG_DRM_MEDIATEK_HDMI=m CONFIG_DRM_MXSFB=m CONFIG_DRM_MESON=m CONFIG_DRM_PL111=m @@ -760,9 +777,11 @@ CONFIG_SND_SOC_GTM601=m CONFIG_SND_SOC_PCM3168A_I2C=m CONFIG_SND_SOC_RT5659=m CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m +CONFIG_SND_SOC_SIMPLE_MUX=m CONFIG_SND_SOC_TAS571X=m CONFIG_SND_SOC_WCD934X=m CONFIG_SND_SOC_WM8904=m +CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WSA881X=m CONFIG_SND_SOC_LPASS_WSA_MACRO=m @@ -787,6 +806,7 @@ CONFIG_USB_RENESAS_USBHS_HCD=m CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_ACM=m CONFIG_USB_STORAGE=y +CONFIG_USB_MTU3=y CONFIG_USB_MUSB_HDRC=y CONFIG_USB_MUSB_SUNXI=y CONFIG_USB_DWC3=y @@ -879,6 +899,7 @@ CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_PCF2127=m CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_CROS_EC=y +CONFIG_RTC_DRV_FSL_FTM_ALARM=m CONFIG_RTC_DRV_S3C=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_SUN6I=y @@ -915,6 +936,9 @@ CONFIG_VIRTIO_MMIO=y CONFIG_XEN_GNTDEV=y CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_MFD_CROS_EC_DEV=y +CONFIG_STAGING=y +CONFIG_STAGING_MEDIA=y +CONFIG_VIDEO_HANTRO=m CONFIG_CHROME_PLATFORMS=y CONFIG_CROS_EC=y CONFIG_CROS_EC_I2C=y @@ -957,6 +981,7 @@ CONFIG_SDM_VIDEOCC_845=y CONFIG_SDM_DISPCC_845=y CONFIG_SM_GCC_8150=y CONFIG_SM_GCC_8250=y +CONFIG_SM_GCC_8350=y CONFIG_SM_GPUCC_8150=y CONFIG_SM_GPUCC_8250=y CONFIG_SM_DISPCC_8250=y @@ -974,6 +999,7 @@ CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_V3=y +CONFIG_MTK_IOMMU=y CONFIG_QCOM_IOMMU=y CONFIG_REMOTEPROC=y CONFIG_QCOM_Q6V5_MSS=m @@ -988,6 +1014,8 @@ CONFIG_OWL_PM_DOMAINS=y CONFIG_RASPBERRYPI_POWER=y CONFIG_FSL_DPAA=y CONFIG_FSL_MC_DPIO=y +CONFIG_FSL_RCPM=y +CONFIG_MTK_PMIC_WRAP=y CONFIG_QCOM_AOSS_QMP=y CONFIG_QCOM_COMMAND_DB=y CONFIG_QCOM_GENI_SE=y @@ -1022,6 +1050,8 @@ CONFIG_ARCH_TEGRA_186_SOC=y CONFIG_ARCH_TEGRA_194_SOC=y CONFIG_ARCH_TEGRA_234_SOC=y CONFIG_TI_SCI_PM_DOMAINS=y +CONFIG_ARM_IMX_BUS_DEVFREQ=m +CONFIG_ARM_IMX8M_DDRC_DEVFREQ=m CONFIG_EXTCON_PTN5150=m CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y @@ -1045,6 +1075,8 @@ CONFIG_PWM_BCM2835=m CONFIG_PWM_CROS_EC=m CONFIG_PWM_IMX27=m CONFIG_PWM_MESON=m +CONFIG_PWM_MTK_DISP=m +CONFIG_PWM_MEDIATEK=m CONFIG_PWM_RCAR=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y @@ -1064,6 +1096,7 @@ CONFIG_PHY_HI6220_USB=y CONFIG_PHY_HISTB_COMBPHY=y CONFIG_PHY_HISI_INNO_USB2=y CONFIG_PHY_MVEBU_CP110_COMPHY=y +CONFIG_PHY_MTK_TPHY=y CONFIG_PHY_QCOM_QMP=m CONFIG_PHY_QCOM_QUSB2=m CONFIG_PHY_QCOM_USB_HS=y @@ -1088,6 +1121,7 @@ CONFIG_QCOM_L3_PMU=y CONFIG_NVMEM_IMX_OCOTP=y CONFIG_NVMEM_IMX_OCOTP_SCU=y CONFIG_QCOM_QFPROM=y +CONFIG_MTK_EFUSE=y CONFIG_ROCKCHIP_EFUSE=y CONFIG_NVMEM_SUNXI_SID=y CONFIG_UNIPHIER_EFUSE=y @@ -1156,6 +1190,7 @@ CONFIG_CRYPTO_DEV_HISI_TRNG=m CONFIG_CMA_SIZE_MBYTES=32 CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_REDUCED=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index bbdb54702aa7..b495de22bb38 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -359,6 +359,7 @@ ST5( mov v4.16b, vctr.16b ) ins vctr.d[0], x8 /* apply carry to N counter blocks for N := x12 */ + cbz x12, 2f adr x16, 1f sub x16, x16, x12, lsl #3 br x16 @@ -700,7 +701,7 @@ AES_FUNC_START(aes_mac_update) cbz w5, .Lmacout encrypt_block v0, w2, x1, x7, w8 st1 {v0.16b}, [x4] /* return dg */ - cond_yield .Lmacout, x7 + cond_yield .Lmacout, x7, x8 b .Lmacloop4x .Lmac1x: add w3, w3, #4 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index 683de671741a..9c3d86e397bf 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_arm64(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 8c02bbc2684e..889ca0f8972b 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -121,7 +121,7 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgav.4s, dgav.4s, dg0v.4s cbz w2, 2f - cond_yield 3f, x5 + cond_yield 3f, x5, x6 b 0b /* diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 6cdea7d56059..491179922f49 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -129,7 +129,7 @@ CPU_LE( rev32 v19.16b, v19.16b ) /* handled all input blocks? */ cbz w2, 2f - cond_yield 3f, x5 + cond_yield 3f, x5, x6 b 0b /* diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S index 6f5208414fe3..9c77313f5a60 100644 --- a/arch/arm64/crypto/sha3-ce-core.S +++ b/arch/arm64/crypto/sha3-ce-core.S @@ -184,11 +184,11 @@ SYM_FUNC_START(sha3_ce_transform) eor v0.16b, v0.16b, v31.16b cbnz w8, 3b - cond_yield 3f, x8 + cond_yield 4f, x8, x9 cbnz w2, 0b /* save state */ -3: st1 { v0.1d- v3.1d}, [x0], #32 +4: st1 { v0.1d- v3.1d}, [x0], #32 st1 { v4.1d- v7.1d}, [x0], #32 st1 { v8.1d-v11.1d}, [x0], #32 st1 {v12.1d-v15.1d}, [x0], #32 diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S index d6e7f6c95fa6..b6a3a36e15f5 100644 --- a/arch/arm64/crypto/sha512-ce-core.S +++ b/arch/arm64/crypto/sha512-ce-core.S @@ -195,7 +195,7 @@ CPU_LE( rev64 v19.16b, v19.16b ) add v10.2d, v10.2d, v2.2d add v11.2d, v11.2d, v3.2d - cond_yield 3f, x4 + cond_yield 3f, x4, x5 /* handled all input blocks? */ cbnz w2, 0b diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 5df500dcc627..8a078fc662ac 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -97,9 +97,9 @@ .popsection .subsection 1 663: \insn2 -664: .previous - .org . - (664b-663b) + (662b-661b) +664: .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .previous .endif .endm @@ -169,11 +169,11 @@ */ .macro alternative_endif 664: + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) .if .Lasm_alt_mode==0 .previous .endif - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) .endm /* diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 880b9054d75c..934b9be582d2 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -173,7 +173,7 @@ static inline void gic_pmr_mask_irqs(void) static inline void gic_arch_enable_irqs(void) { - asm volatile ("msr daifclr, #2" : : : "memory"); + asm volatile ("msr daifclr, #3" : : : "memory"); } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 9f0ec21d6327..88d20f04c64a 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) isb(); } -/* - * Ensure that reads of the counter are treated the same as memory reads - * for the purposes of ordering by subsequent memory barriers. - * - * This insanity brought to you by speculative system register reads, - * out-of-order memory accesses, sequence locks and Thomas Gleixner. - * - * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html - */ -#define arch_counter_enforce_ordering(val) do { \ - u64 tmp, _val = (val); \ - \ - asm volatile( \ - " eor %0, %1, %1\n" \ - " add %0, sp, %0\n" \ - " ldr xzr, [%0]" \ - : "=r" (tmp) : "r" (_val)); \ -} while (0) - static __always_inline u64 __arch_counter_get_cntpct_stable(void) { u64 cnt; @@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void) return cnt; } -#undef arch_counter_enforce_ordering - static inline int arch_timer_arch_init(void) { return 0; diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index 52dead2a8640..8ca2dc0661ee 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -13,30 +13,12 @@ * so use the base value of ldp as thread.keys_user and offset as * thread.keys_user.ap*. */ - .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 mov \tmp1, #THREAD_KEYS_USER add \tmp1, \tsk, \tmp1 -alternative_if_not ARM64_HAS_ADDRESS_AUTH - b .Laddr_auth_skip_\@ -alternative_else_nop_endif ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA] msr_s SYS_APIAKEYLO_EL1, \tmp2 msr_s SYS_APIAKEYHI_EL1, \tmp3 - ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB] - msr_s SYS_APIBKEYLO_EL1, \tmp2 - msr_s SYS_APIBKEYHI_EL1, \tmp3 - ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA] - msr_s SYS_APDAKEYLO_EL1, \tmp2 - msr_s SYS_APDAKEYHI_EL1, \tmp3 - ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB] - msr_s SYS_APDBKEYLO_EL1, \tmp2 - msr_s SYS_APDBKEYHI_EL1, \tmp3 -.Laddr_auth_skip_\@: -alternative_if ARM64_HAS_GENERIC_AUTH - ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA] - msr_s SYS_APGAKEYLO_EL1, \tmp2 - msr_s SYS_APGAKEYHI_EL1, \tmp3 -alternative_else_nop_endif .endm .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ca31594d3d6c..8418c1bd8f04 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -15,6 +15,8 @@ #include <asm-generic/export.h> #include <asm/asm-offsets.h> +#include <asm/alternative.h> +#include <asm/asm-bug.h> #include <asm/cpufeature.h> #include <asm/cputype.h> #include <asm/debug-monitors.h> @@ -23,6 +25,14 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> + /* + * Provide a wxN alias for each wN register so what we can paste a xN + * reference after a 'w' to obtain the 32-bit version. + */ + .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + wx\n .req w\n + .endr + .macro save_and_disable_daif, flags mrs \flags, daif msr daifset, #0xf @@ -40,9 +50,9 @@ msr daif, \flags .endm - /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ - .macro enable_da_f - msr daifclr, #(8 | 4 | 1) + /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */ + .macro enable_da + msr daifclr, #(8 | 4) .endm /* @@ -50,7 +60,7 @@ */ .macro save_and_disable_irq, flags mrs \flags, daif - msr daifset, #2 + msr daifset, #3 .endm .macro restore_irq, flags @@ -270,12 +280,24 @@ alternative_endif * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val */ .macro read_ctr, reg +#ifndef __KVM_NVHE_HYPERVISOR__ alternative_if_not ARM64_MISMATCHED_CACHE_TYPE mrs \reg, ctr_el0 // read CTR nop alternative_else ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL alternative_endif +#else +alternative_if_not ARM64_KVM_PROTECTED_MODE + ASM_BUG() +alternative_else_nop_endif +alternative_cb kvm_compute_final_ctr_el0 + movz \reg, #0 + movk \reg, #0, lsl #16 + movk \reg, #0, lsl #32 + movk \reg, #0, lsl #48 +alternative_cb_end +#endif .endm @@ -676,11 +698,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* - * Set SCTLR_EL1 to the passed value, and invalidate the local icache + * Set SCTLR_ELx to the @reg value, and invalidate the local icache * in the process. This is called when setting the MMU on. */ -.macro set_sctlr_el1, reg - msr sctlr_el1, \reg +.macro set_sctlr, sreg, reg + msr \sreg, \reg isb /* * Invalidate the local I-cache so that any instructions fetched @@ -692,90 +714,41 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU isb .endm -/* - * Check whether to yield to another runnable task from kernel mode NEON code - * (which runs with preemption disabled). - * - * if_will_cond_yield_neon - * // pre-yield patchup code - * do_cond_yield_neon - * // post-yield patchup code - * endif_yield_neon <label> - * - * where <label> is optional, and marks the point where execution will resume - * after a yield has been performed. If omitted, execution resumes right after - * the endif_yield_neon invocation. Note that the entire sequence, including - * the provided patchup code, will be omitted from the image if - * CONFIG_PREEMPTION is not defined. - * - * As a convenience, in the case where no patchup code is required, the above - * sequence may be abbreviated to - * - * cond_yield_neon <label> - * - * Note that the patchup code does not support assembler directives that change - * the output section, any use of such directives is undefined. - * - * The yield itself consists of the following: - * - Check whether the preempt count is exactly 1 and a reschedule is also - * needed. If so, calling of preempt_enable() in kernel_neon_end() will - * trigger a reschedule. If it is not the case, yielding is pointless. - * - Disable and re-enable kernel mode NEON, and branch to the yield fixup - * code. - * - * This macro sequence may clobber all CPU state that is not guaranteed by the - * AAPCS to be preserved across an ordinary function call. - */ - - .macro cond_yield_neon, lbl - if_will_cond_yield_neon - do_cond_yield_neon - endif_yield_neon \lbl - .endm - - .macro if_will_cond_yield_neon -#ifdef CONFIG_PREEMPTION - get_current_task x0 - ldr x0, [x0, #TSK_TI_PREEMPT] - sub x0, x0, #PREEMPT_DISABLE_OFFSET - cbz x0, .Lyield_\@ - /* fall through to endif_yield_neon */ - .subsection 1 -.Lyield_\@ : -#else - .section ".discard.cond_yield_neon", "ax" -#endif - .endm - - .macro do_cond_yield_neon - bl kernel_neon_end - bl kernel_neon_begin - .endm +.macro set_sctlr_el1, reg + set_sctlr sctlr_el1, \reg +.endm - .macro endif_yield_neon, lbl - .ifnb \lbl - b \lbl - .else - b .Lyield_out_\@ - .endif - .previous -.Lyield_out_\@ : - .endm +.macro set_sctlr_el2, reg + set_sctlr sctlr_el2, \reg +.endm /* - * Check whether preempt-disabled code should yield as soon as it - * is able. This is the case if re-enabling preemption a single - * time results in a preempt count of zero, and the TIF_NEED_RESCHED - * flag is set. (Note that the latter is stored negated in the - * top word of the thread_info::preempt_count field) + * Check whether preempt/bh-disabled asm code should yield as soon as + * it is able. This is the case if we are currently running in task + * context, and either a softirq is pending, or the TIF_NEED_RESCHED + * flag is set and re-enabling preemption a single time would result in + * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is + * stored negated in the top word of the thread_info::preempt_count + * field) */ - .macro cond_yield, lbl:req, tmp:req -#ifdef CONFIG_PREEMPTION + .macro cond_yield, lbl:req, tmp:req, tmp2:req get_current_task \tmp ldr \tmp, [\tmp, #TSK_TI_PREEMPT] + /* + * If we are serving a softirq, there is no point in yielding: the + * softirq will not be preempted no matter what we do, so we should + * run to completion as quickly as we can. + */ + tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@ +#ifdef CONFIG_PREEMPTION sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET cbz \tmp, \lbl #endif + adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING + this_cpu_offset \tmp2 + ldr w\tmp, [\tmp, \tmp2] + cbnz w\tmp, \lbl // yield on pending softirq in task context +.Lnoyield_\@: .endm /* diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index c3009b0e5239..2175ec0004ed 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -23,12 +23,9 @@ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory") +#define tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") -#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ - SB_BARRIER_INSN"nop\n", \ - ARM64_HAS_SB)) - #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ @@ -70,6 +67,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, return mask; } +/* + * Ensure that reads of the counter are treated the same as memory reads + * for the purposes of ordering by subsequent memory barriers. + * + * This insanity brought to you by speculative system register reads, + * out-of-order memory accesses, sequence locks and Thomas Gleixner. + * + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html + */ +#define arch_counter_enforce_ordering(val) do { \ + u64 tmp, _val = (val); \ + \ + asm volatile( \ + " eor %0, %1, %1\n" \ + " add %0, sp, %0\n" \ + " ldr xzr, [%0]" \ + : "=r" (tmp) : "r" (_val)); \ +} while (0) + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 93a161b3bf3f..dc52b733675d 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); - return csum_fold((__force u32)(sum >> 32)); + return csum_fold((__force __wsum)(sum >> 32)); } #define ip_fast_csum ip_fast_csum diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b77d997b173b..b0c5eda0498f 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -66,7 +66,9 @@ #define ARM64_WORKAROUND_1508412 58 #define ARM64_HAS_LDAPR 59 #define ARM64_KVM_PROTECTED_MODE 60 +#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61 +#define ARM64_HAS_EPAN 62 -#define ARM64_NCAPS 61 +#define ARM64_NCAPS 63 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 61177bac49fa..338840c00e8e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -63,6 +63,23 @@ struct arm64_ftr_bits { s64 safe_val; /* safe value for FTR_EXACT features */ }; +/* + * Describe the early feature override to the core override code: + * + * @val Values that are to be merged into the final + * sanitised value of the register. Only the bitfields + * set to 1 in @mask are valid + * @mask Mask of the features that are overridden by @val + * + * A @mask field set to full-1 indicates that the corresponding field + * in @val is a valid override. + * + * A @mask field set to full-0 with the corresponding @val field set + * to full-0 denotes that this field has no override + * + * A @mask field set to full-0 with the corresponding @val field set + * to full-1 denotes thath this field has an invalid override. + */ struct arm64_ftr_override { u64 val; u64 mask; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ef5b040dee44..6231e1f0abe7 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -59,6 +59,7 @@ #define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_IMP_HISI 0x48 +#define ARM_CPU_IMP_APPLE 0x61 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -99,6 +100,9 @@ #define HISI_CPU_PART_TSV110 0xD01 +#define APPLE_CPU_PART_M1_ICESTORM 0x022 +#define APPLE_CPU_PART_M1_FIRESTORM 0x023 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -127,6 +131,8 @@ #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) +#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) +#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 1c26d7baa67f..55f57dfa8e2f 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -13,8 +13,8 @@ #include <asm/ptrace.h> #define DAIF_PROCCTX 0 -#define DAIF_PROCCTX_NOIRQ PSR_I_BIT -#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) +#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT) +#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) #define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) @@ -47,7 +47,7 @@ static inline unsigned long local_daif_save_flags(void) if (system_uses_irq_prio_masking()) { /* If IRQs are masked with PMR, reflect it in the flags */ if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON) - flags |= PSR_I_BIT; + flags |= PSR_I_BIT | PSR_F_BIT; } return flags; @@ -69,7 +69,7 @@ static inline void local_daif_restore(unsigned long flags) bool irq_disabled = flags & PSR_I_BIT; WARN_ON(system_has_prio_mask_debugging() && - !(read_sysreg(daif) & PSR_I_BIT)); + (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT)); if (!irq_disabled) { trace_hardirqs_on(); @@ -86,7 +86,7 @@ static inline void local_daif_restore(unsigned long flags) * If interrupts are disabled but we can take * asynchronous errors, we can take NMIs */ - flags &= ~PSR_I_BIT; + flags &= ~(PSR_I_BIT | PSR_F_BIT); pmr = GIC_PRIO_IRQOFF; } else { pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET; @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs) if (interrupts_enabled(regs)) trace_hardirqs_on(); + if (system_uses_irq_prio_masking()) + gic_write_pmr(regs->pmr_save); + /* * We can't use local_daif_restore(regs->pstate) here as * system_has_prio_mask_debugging() won't restore the I bit if it can diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index d77d358f9395..21fa330f498d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -65,6 +65,19 @@ // use EL1&0 translation. .Lskip_spe_\@: + /* Trace buffer */ + ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 + cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present + + mrs_s x0, SYS_TRBIDR_EL1 + and x0, x0, TRBIDR_PROG + cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 + + mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) + orr x2, x2, x0 // allow the EL1&0 translation + // to own it. + +.Lskip_trace_\@: msr mdcr_el2, x2 // Configure debug traps .endm @@ -131,6 +144,26 @@ .Lskip_sve_\@: .endm +/* Disable any fine grained traps */ +.macro __init_el2_fgt + mrs x1, id_aa64mmfr0_el1 + ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 + cbz x1, .Lskip_fgt_\@ + + msr_s SYS_HDFGRTR_EL2, xzr + msr_s SYS_HDFGWTR_EL2, xzr + msr_s SYS_HFGRTR_EL2, xzr + msr_s SYS_HFGWTR_EL2, xzr + msr_s SYS_HFGITR_EL2, xzr + + mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU + ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 + cbz x1, .Lskip_fgt_\@ + + msr_s SYS_HAFGRTR_EL2, xzr +.Lskip_fgt_\@: +.endm + .macro __init_el2_nvhe_prepare_eret mov x0, #INIT_PSTATE_EL1 msr spsr_el2, x0 @@ -155,6 +188,7 @@ __init_el2_nvhe_idregs __init_el2_nvhe_cptr __init_el2_nvhe_sve + __init_el2_fgt __init_el2_nvhe_prepare_eret .endm diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index bec5f14b622a..2599504674b5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -73,6 +73,7 @@ extern void sve_flush_live(void); extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); +extern void sve_set_vq(unsigned long vq_minus_1); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); @@ -130,6 +131,15 @@ static inline void sve_user_enable(void) sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN); } +#define sve_cond_update_zcr_vq(val, reg) \ + do { \ + u64 __zcr = read_sysreg_s((reg)); \ + u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \ + __new |= (val) & ZCR_ELx_LEN_MASK; \ + if (__zcr != __new) \ + write_sysreg_s(__new, (reg)); \ + } while (0) + /* * Probing and setup functions. * Calls to these functions must be serialised with one another. @@ -159,6 +169,8 @@ static inline int sve_get_current_vl(void) static inline void sve_user_disable(void) { BUILD_BUG(); } static inline void sve_user_enable(void) { BUILD_BUG(); } +#define sve_cond_update_zcr_vq(val, reg) do { } while (0) + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index af43367534c7..a2563992d2dc 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -6,6 +6,8 @@ * Author: Catalin Marinas <catalin.marinas@arm.com> */ +#include <asm/assembler.h> + .macro fpsimd_save state, tmpnr stp q0, q1, [\state, #16 * 0] stp q2, q3, [\state, #16 * 2] @@ -230,8 +232,7 @@ str w\nxtmp, [\xpfpsr, #4] .endm -.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 - sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 +.macro __sve_load nxbase, xpfpsr, nxtmp _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _sve_ldr_p 0, \nxbase _sve_wrffr 0 @@ -242,3 +243,8 @@ ldr w\nxtmp, [\xpfpsr, #4] msr fpcr, x\nxtmp .endm + +.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 + sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 + __sve_load \nxbase, \xpfpsr, \nxtmp +.endm diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h index 737ded6b6d0d..b4b3076a76fb 100644 --- a/arch/arm64/include/asm/hyp_image.h +++ b/arch/arm64/include/asm/hyp_image.h @@ -10,11 +10,15 @@ #define __HYP_CONCAT(a, b) a ## b #define HYP_CONCAT(a, b) __HYP_CONCAT(a, b) +#ifndef __KVM_NVHE_HYPERVISOR__ /* * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * to separate it from the kernel proper. */ #define kvm_nvhe_sym(sym) __kvm_nvhe_##sym +#else +#define kvm_nvhe_sym(sym) sym +#endif #ifdef LINKER_SCRIPT @@ -56,6 +60,9 @@ */ #define KVM_NVHE_ALIAS(sym) kvm_nvhe_sym(sym) = sym; +/* Defines a linker script alias for KVM nVHE hyp symbols */ +#define KVM_NVHE_ALIAS_HYP(first, sec) kvm_nvhe_sym(first) = kvm_nvhe_sym(sec); + #endif /* LINKER_SCRIPT */ #endif /* __ARM64_HYP_IMAGE_H__ */ diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h index f9cc1d021791..0ae427f352c8 100644 --- a/arch/arm64/include/asm/hypervisor.h +++ b/arch/arm64/include/asm/hypervisor.h @@ -4,4 +4,7 @@ #include <asm/xen/hypervisor.h> +void kvm_init_hyp_services(void); +bool kvm_arm_hyp_service_available(u32 func_id); + #endif diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 5ea8656a2030..7fd836bea7eb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -169,16 +169,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) - -/* - * PCI configuration space mapping function. - * - * The PCI specification disallows posted write configuration transactions. - * Add an arch specific pci_remap_cfgspace() definition that is implemented - * through nGnRnE device memory attribute as recommended by the ARM v8 - * Architecture reference manual Issue A.k B2.8.2 "Device memory". - */ -#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) +#define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) /* * io{read,write}{16,32,64}be() macros diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index b2b0c6405eb0..fac08e18bcd5 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -8,6 +8,10 @@ struct pt_regs; +int set_handle_irq(void (*handle_irq)(struct pt_regs *)); +#define set_handle_irq set_handle_irq +int set_handle_fiq(void (*handle_fiq)(struct pt_regs *)); + static inline int nr_legacy_irqs(void) { return 0; diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h index a1020285ea75..81bbfa3a035b 100644 --- a/arch/arm64/include/asm/irq_work.h +++ b/arch/arm64/include/asm/irq_work.h @@ -2,6 +2,8 @@ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H +extern void arch_irq_work_raise(void); + static inline bool arch_irq_work_has_interrupt(void) { return true; diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index ff328e5bbb75..b57b9b1e4344 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -12,15 +12,13 @@ /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and - * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' + * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif' * order: * Masking debug exceptions causes all other exceptions to be masked too/ - * Masking SError masks irq, but not debug exceptions. Masking irqs has no - * side effects for other flags. Keeping to this order makes it easier for - * entry.S to know which exceptions should be unmasked. - * - * FIQ is never expected, but we mask it when we disable debug exceptions, and - * unmask it at all other times. + * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are + * always masked and unmasked together, and have no side effects for other + * flags. Keeping to this order makes it easier for entry.S to know which + * exceptions should be unmasked. */ /* @@ -35,7 +33,7 @@ static inline void arch_local_irq_enable(void) } asm volatile(ALTERNATIVE( - "msr daifclr, #2 // arch_local_irq_enable", + "msr daifclr, #3 // arch_local_irq_enable", __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : @@ -54,7 +52,7 @@ static inline void arch_local_irq_disable(void) } asm volatile(ALTERNATIVE( - "msr daifset, #2 // arch_local_irq_disable", + "msr daifset, #3 // arch_local_irq_disable", __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 587c504a4c8b..d44df9d62fc9 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -136,7 +136,7 @@ * has a direct correspondence, and needs to appear sufficiently aligned * in the virtual address space. */ -#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS #define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) #else #define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 9befcd87e9a8..00dbcc71aeb2 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -96,10 +96,6 @@ struct kimage_arch { void *dtb; phys_addr_t dtb_mem; phys_addr_t kern_reloc; - /* Core ELF header buffer */ - void *elf_headers; - unsigned long elf_headers_mem; - unsigned long elf_headers_sz; }; #ifdef CONFIG_KEXEC_FILE diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 4e90c2debf70..692c9049befa 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -278,6 +278,9 @@ #define CPTR_EL2_DEFAULT CPTR_EL2_RES1 /* Hyp Debug Configuration Register bits */ +#define MDCR_EL2_E2TB_MASK (UL(0x3)) +#define MDCR_EL2_E2TB_SHIFT (UL(24)) +#define MDCR_EL2_TTRF (1 << 19) #define MDCR_EL2_TPMS (1 << 14) #define MDCR_EL2_E2PB_MASK (UL(0x3)) #define MDCR_EL2_E2PB_SHIFT (UL(12)) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index a7ab84f781f7..cf8df032b9c3 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -57,6 +57,12 @@ #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14 +#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15 +#define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings 16 +#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17 +#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18 +#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19 +#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20 #ifndef __ASSEMBLY__ @@ -154,6 +160,9 @@ struct kvm_nvhe_init_params { unsigned long tpidr_el2; unsigned long stack_hyp_va; phys_addr_t pgd_pa; + unsigned long hcr_el2; + unsigned long vttbr; + unsigned long vtcr; }; /* Translate a kernel address @ptr into its equivalent linear mapping */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d10e6527f7d..7cd7d5c8c4bc 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -94,7 +94,7 @@ struct kvm_s2_mmu { /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; - struct kvm *kvm; + struct kvm_arch *arch; }; struct kvm_arch_memory_slot { @@ -315,6 +315,8 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch regs; /* Statistical profiling extension */ u64 pmscr_el1; + /* Self-hosted trace */ + u64 trfcr_el1; } host_debug_state; /* VGIC state */ @@ -372,8 +374,10 @@ struct kvm_vcpu_arch { }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ -#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ - sve_ffr_offset((vcpu)->arch.sve_max_vl))) +#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ + sve_ffr_offset((vcpu)->arch.sve_max_vl)) + +#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) #define vcpu_sve_state_size(vcpu) ({ \ size_t __size_ret; \ @@ -382,7 +386,7 @@ struct kvm_vcpu_arch { if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ __size_ret = 0; \ } else { \ - __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ + __vcpu_vq = vcpu_sve_max_vq(vcpu); \ __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ } \ \ @@ -400,7 +404,13 @@ struct kvm_vcpu_arch { #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ +#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ +#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | \ + KVM_GUESTDBG_USE_HW | \ + KVM_GUESTDBG_SINGLESTEP) /* * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can * take the following values: @@ -582,15 +592,11 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +#ifndef __KVM_NVHE_HYPERVISOR__ #define kvm_call_hyp_nvhe(f, ...) \ ({ \ struct arm_smccc_res res; \ @@ -630,9 +636,13 @@ void kvm_arm_resume_guest(struct kvm *kvm); \ ret; \ }) +#else /* __KVM_NVHE_HYPERVISOR__ */ +#define kvm_call_hyp(f, ...) f(__VA_ARGS__) +#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) +#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) +#endif /* __KVM_NVHE_HYPERVISOR__ */ void force_vm_exit(const cpumask_t *mask); -void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); int handle_exit(struct kvm_vcpu *vcpu, int exception_index); void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); @@ -692,19 +702,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); } -static inline bool kvm_arch_requires_vhe(void) -{ - /* - * The Arm architecture specifies that implementation of SVE - * requires VHE also to be implemented. The KVM code for arm64 - * relies on this when SVE is present: - */ - if (system_supports_sve()) - return true; - - return false; -} - void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); static inline void kvm_arch_hardware_unsetup(void) {} @@ -713,6 +710,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); @@ -734,6 +732,10 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) return (!has_vhe() && attr->exclude_host); } +/* Flags for host debug state */ +void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); + #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { @@ -771,5 +773,12 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) int kvm_trng_call(struct kvm_vcpu *vcpu); +#ifdef CONFIG_KVM +extern phys_addr_t hyp_mem_base; +extern phys_addr_t hyp_mem_size; +void __init kvm_hyp_reserve(void); +#else +static inline void kvm_hyp_reserve(void) { } +#endif #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 32ae676236b6..9d60b3006efc 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -90,6 +90,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +void __sve_save_state(void *sve_pffr, u32 *fpsr); +void __sve_restore_state(void *sve_pffr, u32 *fpsr); #ifndef __KVM_NVHE_HYPERVISOR__ void activate_traps_vhe_load(struct kvm_vcpu *vcpu); @@ -100,10 +102,20 @@ u64 __guest_enter(struct kvm_vcpu *vcpu); bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt); -void __noreturn hyp_panic(void); #ifdef __KVM_NVHE_HYPERVISOR__ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, u64 elr, u64 par); #endif +#ifdef __KVM_NVHE_HYPERVISOR__ +void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size, + phys_addr_t pgd, void *sp, void *cont_fn); +int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, + unsigned long *per_cpu_base, u32 hyp_va_bits); +void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); +#endif + +extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); +extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 90873851f677..25ed956f9af1 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -121,6 +121,8 @@ void kvm_update_va_mask(struct alt_instr *alt, void kvm_compute_layout(void); void kvm_apply_hyp_relocations(void); +#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) + static __always_inline unsigned long __kern_hyp_va(unsigned long v) { asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" @@ -166,7 +168,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_get_idmap_vector(void); -int kvm_mmu_init(void); +int kvm_mmu_init(u32 *hyp_va_bits); + +static inline void *__kvm_vector_slot2addr(void *base, + enum arm64_hyp_spectre_vector slot) +{ + int idx = slot - (slot != HYP_VECTOR_DIRECT); + + return base + (idx * SZ_2K); +} struct kvm; @@ -262,9 +272,9 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) * Must be called from hyp code running at EL2 with an updated VTTBR * and interrupts disabled. */ -static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) +static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr) { - write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); + write_sysreg(vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* @@ -275,5 +285,14 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); } +static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) +{ + __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr); +} + +static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) +{ + return container_of(mmu->arch, struct kvm, arch); +} #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 8886d43cfb11..c3674c47d48c 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -11,22 +11,79 @@ #include <linux/kvm_host.h> #include <linux/types.h> +#define KVM_PGTABLE_MAX_LEVELS 4U + +static inline u64 kvm_get_parange(u64 mmfr0) +{ + u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_PARANGE_SHIFT); + if (parange > ID_AA64MMFR0_PARANGE_MAX) + parange = ID_AA64MMFR0_PARANGE_MAX; + + return parange; +} + typedef u64 kvm_pte_t; /** + * struct kvm_pgtable_mm_ops - Memory management callbacks. + * @zalloc_page: Allocate a single zeroed memory page. The @arg parameter + * can be used by the walker to pass a memcache. The + * initial refcount of the page is 1. + * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. The + * @size parameter is in bytes, and is rounded-up to the + * next page boundary. The resulting allocation is + * physically contiguous. + * @free_pages_exact: Free an exact number of memory pages previously + * allocated by zalloc_pages_exact. + * @get_page: Increment the refcount on a page. + * @put_page: Decrement the refcount on a page. When the refcount + * reaches 0 the page is automatically freed. + * @page_count: Return the refcount of a page. + * @phys_to_virt: Convert a physical address into a virtual address mapped + * in the current context. + * @virt_to_phys: Convert a virtual address mapped in the current context + * into a physical address. + */ +struct kvm_pgtable_mm_ops { + void* (*zalloc_page)(void *arg); + void* (*zalloc_pages_exact)(size_t size); + void (*free_pages_exact)(void *addr, size_t size); + void (*get_page)(void *addr); + void (*put_page)(void *addr); + int (*page_count)(void *addr); + void* (*phys_to_virt)(phys_addr_t phys); + phys_addr_t (*virt_to_phys)(void *addr); +}; + +/** + * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. + * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have + * ARM64_HAS_STAGE2_FWB. + * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. + */ +enum kvm_pgtable_stage2_flags { + KVM_PGTABLE_S2_NOFWB = BIT(0), + KVM_PGTABLE_S2_IDMAP = BIT(1), +}; + +/** * struct kvm_pgtable - KVM page-table. * @ia_bits: Maximum input address size, in bits. * @start_level: Level at which the page-table walk starts. * @pgd: Pointer to the first top-level entry of the page-table. + * @mm_ops: Memory management callbacks. * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. */ struct kvm_pgtable { u32 ia_bits; u32 start_level; kvm_pte_t *pgd; + struct kvm_pgtable_mm_ops *mm_ops; /* Stage-2 only */ struct kvm_s2_mmu *mmu; + enum kvm_pgtable_stage2_flags flags; }; /** @@ -50,6 +107,16 @@ enum kvm_pgtable_prot { #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE) /** + * struct kvm_mem_range - Range of Intermediate Physical Addresses + * @start: Start of the range. + * @end: End of the range. + */ +struct kvm_mem_range { + u64 start; + u64 end; +}; + +/** * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid * entries. @@ -86,10 +153,12 @@ struct kvm_pgtable_walker { * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. * @pgt: Uninitialised page-table structure to initialise. * @va_bits: Maximum virtual address bits. + * @mm_ops: Memory management callbacks. * * Return: 0 on success, negative error code on failure. */ -int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits); +int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, + struct kvm_pgtable_mm_ops *mm_ops); /** * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table. @@ -123,17 +192,41 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot); /** - * kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. + * kvm_get_vtcr() - Helper to construct VTCR_EL2 + * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. + * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. + * @phys_shfit: Value to set in VTCR_EL2.T0SZ. + * + * The VTCR value is common across all the physical CPUs on the system. + * We use system wide sanitised values to fill in different fields, + * except for Hardware Management of Access Flags. HA Flag is set + * unconditionally on all CPUs, as it is safe to run with or without + * the feature and the bit is RES0 on CPUs that don't support it. + * + * Return: VTCR_EL2 value + */ +u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); + +/** + * kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table. * @pgt: Uninitialised page-table structure to initialise. - * @kvm: KVM structure representing the guest virtual machine. + * @arch: Arch-specific KVM structure representing the guest virtual + * machine. + * @mm_ops: Memory management callbacks. + * @flags: Stage-2 configuration flags. * * Return: 0 on success, negative error code on failure. */ -int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm); +int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch, + struct kvm_pgtable_mm_ops *mm_ops, + enum kvm_pgtable_stage2_flags flags); + +#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \ + kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0) /** * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * * The page-table is assumed to be unreachable by any hardware walkers prior * to freeing and therefore no TLB invalidation is performed. @@ -142,13 +235,13 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); /** * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address at which to place the mapping. * @size: Size of the mapping. * @phys: Physical address of the memory to map. * @prot: Permissions and attributes for the mapping. - * @mc: Cache of pre-allocated GFP_PGTABLE_USER memory from which to - * allocate page-table pages. + * @mc: Cache of pre-allocated and zeroed memory from which to allocate + * page-table pages. * * The offset of @addr within a page is ignored, @size is rounded-up to * the next page boundary and @phys is rounded-down to the previous page @@ -170,11 +263,31 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); */ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, - struct kvm_mmu_memory_cache *mc); + void *mc); + +/** + * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to + * track ownership. + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). + * @addr: Base intermediate physical address to annotate. + * @size: Size of the annotated range. + * @mc: Cache of pre-allocated and zeroed memory from which to allocate + * page-table pages. + * @owner_id: Unique identifier for the owner of the page. + * + * By default, all page-tables are owned by identifier 0. This function can be + * used to mark portions of the IPA space as owned by other entities. When a + * stage 2 is used with identity-mappings, these annotations allow to use the + * page-table data structure as a simple rmap. + * + * Return: 0 on success, negative error code on failure. + */ +int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, + void *mc, u8 owner_id); /** * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address from which to remove the mapping. * @size: Size of the mapping. * @@ -194,7 +307,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); /** * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range * without TLB invalidation. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address from which to write-protect, * @size: Size of the range. * @@ -211,7 +324,7 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); /** * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. * * The offset of @addr within a page is ignored. @@ -225,7 +338,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); /** * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. * * The offset of @addr within a page is ignored. @@ -244,7 +357,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); /** * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a * page-table entry. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. * @prot: Additional permissions to grant for the mapping. * @@ -263,7 +376,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, /** * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the * access flag set. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. * * The offset of @addr within a page is ignored. @@ -276,7 +389,7 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * of Coherency for guest stage-2 address * range. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address from which to flush. * @size: Size of the range. * @@ -311,4 +424,23 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker); +/** + * kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical + * Addresses with compatible permission + * attributes. + * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). + * @addr: Address that must be covered by the range. + * @prot: Protection attributes that the range must be compatible with. + * @range: Range structure used to limit the search space at call time and + * that will hold the result. + * + * The offset of @addr within a page is ignored. An IPA is compatible with @prot + * iff its corresponding stage-2 page-table entry has default ownership and, if + * valid, is mapped with protection attributes identical to @prot. + * + * Return: 0 on success, negative error code on failure. + */ +int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_prot prot, + struct kvm_mem_range *range); #endif /* __ARM64_KVM_PGTABLE_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0aabc3be9a75..87b90dc27a43 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -243,13 +243,15 @@ static inline const void *__tag_set(const void *addr, u8 tag) } #ifdef CONFIG_KASAN_HW_TAGS -#define arch_enable_tagging() mte_enable_kernel() +#define arch_enable_tagging_sync() mte_enable_kernel_sync() +#define arch_enable_tagging_async() mte_enable_kernel_async() #define arch_set_tagging_report_once(state) mte_set_report_once(state) +#define arch_force_async_tag_fault() mte_check_tfsr_exit() #define arch_init_tags(max_tag) mte_init_tags(max_tag) #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) -#define arch_set_mem_tag_range(addr, size, tag) \ - mte_set_mem_tag_range((addr), (size), (tag)) +#define arch_set_mem_tag_range(addr, size, tag, init) \ + mte_set_mem_tag_range((addr), (size), (tag), (init)) #endif /* CONFIG_KASAN_HW_TAGS */ /* @@ -321,13 +323,29 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) +#ifdef CONFIG_CFI_CLANG +/* + * With CONFIG_CFI_CLANG, the compiler replaces function address + * references with the address of the function's CFI jump table + * entry. The function_nocfi macro always returns the address of the + * actual function instead. + */ +#define function_nocfi(x) ({ \ + void *addr; \ + asm("adrp %0, " __stringify(x) "\n\t" \ + "add %0, %0, :lo12:" __stringify(x) \ + : "=r" (addr)); \ + addr; \ +}) +#endif + /* * virt_to_page(x) convert a _valid_ virtual address to struct page * * virt_addr_valid(x) indicates whether a virtual address is valid */ #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) -#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) +#if defined(CONFIG_DEBUG_VIRTUAL) #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ void *__addr = __va(page_to_phys(__page)); \ @@ -347,7 +365,7 @@ static inline void *phys_to_virt(phys_addr_t x) u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ (struct page *)__addr; \ }) -#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ +#endif /* CONFIG_DEBUG_VIRTUAL */ #define virt_addr_valid(addr) ({ \ __typeof__(addr) __addr = __tag_reset(addr); \ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index bd02e99b1a4c..d3cef9133539 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -119,7 +119,7 @@ static inline void cpu_install_idmap(void) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void cpu_replace_ttbr1(pgd_t *pgdp) +static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; @@ -140,7 +140,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) ttbr1 |= TTBR_CNP_BIT; } - replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); + replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1)); cpu_install_idmap(); replace_phys(ttbr1); diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 7ab500e2ad17..ddd4d17cf9a0 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -53,7 +53,8 @@ static inline u8 mte_get_random_tag(void) * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and * size must be non-zero and MTE_GRANULE_SIZE aligned. */ -static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +static inline void mte_set_mem_tag_range(void *addr, size_t size, + u8 tag, bool init) { u64 curr, end; @@ -63,21 +64,31 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) curr = (u64)__tag_set(addr, tag); end = curr + size; - do { - /* - * 'asm volatile' is required to prevent the compiler to move - * the statement outside of the loop. - */ - asm volatile(__MTE_PREAMBLE "stg %0, [%0]" - : - : "r" (curr) - : "memory"); - - curr += MTE_GRANULE_SIZE; - } while (curr != end); + /* + * 'asm volatile' is required to prevent the compiler to move + * the statement outside of the loop. + */ + if (init) { + do { + asm volatile(__MTE_PREAMBLE "stzg %0, [%0]" + : + : "r" (curr) + : "memory"); + curr += MTE_GRANULE_SIZE; + } while (curr != end); + } else { + do { + asm volatile(__MTE_PREAMBLE "stg %0, [%0]" + : + : "r" (curr) + : "memory"); + curr += MTE_GRANULE_SIZE; + } while (curr != end); + } } -void mte_enable_kernel(void); +void mte_enable_kernel_sync(void); +void mte_enable_kernel_async(void); void mte_init_tags(u64 max_tag); void mte_set_report_once(bool state); @@ -100,11 +111,16 @@ static inline u8 mte_get_random_tag(void) return 0xFF; } -static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +static inline void mte_set_mem_tag_range(void *addr, size_t size, + u8 tag, bool init) { } -static inline void mte_enable_kernel(void) +static inline void mte_enable_kernel_sync(void) +{ +} + +static inline void mte_enable_kernel_async(void) { } diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 9b557a457f24..bc88a1ced0d7 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -39,16 +39,15 @@ void mte_free_tag_storage(char *storage); void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); -void flush_mte_state(void); +void mte_thread_init_user(void); void mte_thread_switch(struct task_struct *next); +void mte_suspend_enter(void); void mte_suspend_exit(void); long set_mte_ctrl(struct task_struct *task, unsigned long arg); long get_mte_ctrl(struct task_struct *task); int mte_ptrace_copy_tags(struct task_struct *child, long request, unsigned long addr, unsigned long data); -void mte_assign_mem_tag_range(void *addr, size_t size); - #else /* CONFIG_ARM64_MTE */ /* unused if !CONFIG_ARM64_MTE, silence the compiler */ @@ -60,12 +59,15 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte) static inline void mte_copy_page_tags(void *kto, const void *kfrom) { } -static inline void flush_mte_state(void) +static inline void mte_thread_init_user(void) { } static inline void mte_thread_switch(struct task_struct *next) { } +static inline void mte_suspend_enter(void) +{ +} static inline void mte_suspend_exit(void) { } @@ -84,11 +86,51 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child, return -EIO; } -static inline void mte_assign_mem_tag_range(void *addr, size_t size) +#endif /* CONFIG_ARM64_MTE */ + +#ifdef CONFIG_KASAN_HW_TAGS +/* Whether the MTE asynchronous mode is enabled. */ +DECLARE_STATIC_KEY_FALSE(mte_async_mode); + +static inline bool system_uses_mte_async_mode(void) { + return static_branch_unlikely(&mte_async_mode); } -#endif /* CONFIG_ARM64_MTE */ +void mte_check_tfsr_el1(void); + +static inline void mte_check_tfsr_entry(void) +{ + mte_check_tfsr_el1(); +} + +static inline void mte_check_tfsr_exit(void) +{ + /* + * The asynchronous faults are sync'ed automatically with + * TFSR_EL1 on kernel entry but for exit an explicit dsb() + * is required. + */ + dsb(nsh); + isb(); + + mte_check_tfsr_el1(); +} +#else +static inline bool system_uses_mte_async_mode(void) +{ + return false; +} +static inline void mte_check_tfsr_el1(void) +{ +} +static inline void mte_check_tfsr_entry(void) +{ +} +static inline void mte_check_tfsr_exit(void) +{ +} +#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_MTE_H */ diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index cf3a0fd7c1a7..9aa193e0e8f2 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -3,23 +3,19 @@ #define _ASM_ARM64_PARAVIRT_H #ifdef CONFIG_PARAVIRT +#include <linux/static_call_types.h> + struct static_key; extern struct static_key paravirt_steal_enabled; extern struct static_key paravirt_steal_rq_enabled; -struct pv_time_ops { - unsigned long long (*steal_clock)(int cpu); -}; - -struct paravirt_patch_template { - struct pv_time_ops time; -}; +u64 dummy_steal_clock(int cpu); -extern struct paravirt_patch_template pv_ops; +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); static inline u64 paravirt_steal_clock(int cpu) { - return pv_ops.time.steal_clock(cpu); + return static_call(pv_steal_clock)(cpu); } int __init pv_time_init(void); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 3c6a7f5988b1..31fbab3d6f99 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -27,7 +27,10 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp) { - __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE); + pudval_t pudval = PUD_TYPE_TABLE; + + pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN; + __pud_populate(pudp, __pa(pmdp), pudval); } #else static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) @@ -45,7 +48,10 @@ static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot) static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp) { - __p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE); + p4dval_t p4dval = P4D_TYPE_TABLE; + + p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN; + __p4d_populate(p4dp, __pa(pudp), p4dval); } #else static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot) @@ -70,16 +76,15 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - /* - * The pmd must be loaded with the physical address of the PTE table - */ - __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE); + VM_BUG_ON(mm != &init_mm); + __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE); + VM_BUG_ON(mm == &init_mm); + __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 42442a0ae2ab..b82575a33f8b 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -94,6 +94,17 @@ /* * Hardware page table definitions. * + * Level 0 descriptor (P4D). + */ +#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0) +#define P4D_TABLE_BIT (_AT(p4dval_t, 1) << 1) +#define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0) +#define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0) +#define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */ +#define P4D_TABLE_PXN (_AT(p4dval_t, 1) << 59) +#define P4D_TABLE_UXN (_AT(p4dval_t, 1) << 60) + +/* * Level 1 descriptor (PUD). */ #define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0) @@ -101,6 +112,8 @@ #define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0) #define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0) #define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */ +#define PUD_TABLE_PXN (_AT(pudval_t, 1) << 59) +#define PUD_TABLE_UXN (_AT(pudval_t, 1) << 60) /* * Level 2 descriptor (PMD). @@ -122,6 +135,8 @@ #define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52) #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) #define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) +#define PMD_TABLE_PXN (_AT(pmdval_t, 1) << 59) +#define PMD_TABLE_UXN (_AT(pmdval_t, 1) << 60) /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 9a65fb528110..938092df76cf 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -71,10 +71,10 @@ extern bool arm64_use_ng_mappings; #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) #define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) -#define PAGE_S2_MEMATTR(attr) \ +#define PAGE_S2_MEMATTR(attr, has_fwb) \ ({ \ u64 __val; \ - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \ + if (has_fwb) \ __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ else \ __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \ @@ -87,12 +87,13 @@ extern bool arm64_use_ng_mappings; #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_READONLY_EXEC +#define __P100 PAGE_EXECONLY #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -101,7 +102,7 @@ extern bool arm64_use_ng_mappings; #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC +#define __S100 PAGE_EXECONLY #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 47027796c2f9..0b10204e72fc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -113,11 +113,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) +/* + * Execute-only user mappings do not have the PTE_USER bit set. All valid + * kernel mappings have the PTE_UXN bit set. + */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) -#define pte_valid_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) - + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending * so that we don't erroneously return false for pages that have been @@ -130,12 +131,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) /* - * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check). PROT_NONE mappings do not have the PTE_VALID bit - * set. + * p??_access_permitted() is true for valid user mappings (PTE_USER + * bit set, subject to the write permission check). For execute-only + * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits + * not set) must return false. PROT_NONE mappings do not have the + * PTE_VALID bit set. */ #define pte_access_permitted(pte, write) \ - (pte_valid_user(pte) && (!(write) || pte_write(pte))) + (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) #define pmd_access_permitted(pmd, write) \ (pte_access_permitted(pmd_pte(pmd), (write))) #define pud_access_permitted(pud, write) \ @@ -995,6 +998,18 @@ static inline bool arch_wants_old_prefaulted_pte(void) } #define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + if (cpus_have_const_cap(ARM64_HAS_EPAN)) + return prot; + + if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY)) + return prot; + + return PAGE_READONLY_EXEC; +} + + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index b112a11e9302..d50416be99be 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -3,6 +3,7 @@ #define __ASM_POINTER_AUTH_H #include <linux/bitops.h> +#include <linux/prctl.h> #include <linux/random.h> #include <asm/cpufeature.h> @@ -34,6 +35,25 @@ struct ptrauth_keys_kernel { struct ptrauth_key apia; }; +#define __ptrauth_key_install_nosync(k, v) \ +do { \ + struct ptrauth_key __pki_v = (v); \ + write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \ + write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ +} while (0) + +static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys) +{ + if (system_supports_address_auth()) { + __ptrauth_key_install_nosync(APIB, keys->apib); + __ptrauth_key_install_nosync(APDA, keys->apda); + __ptrauth_key_install_nosync(APDB, keys->apdb); + } + + if (system_supports_generic_auth()) + __ptrauth_key_install_nosync(APGA, keys->apga); +} + static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { @@ -45,14 +65,9 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) if (system_supports_generic_auth()) get_random_bytes(&keys->apga, sizeof(keys->apga)); -} -#define __ptrauth_key_install_nosync(k, v) \ -do { \ - struct ptrauth_key __pki_v = (v); \ - write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \ - write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ -} while (0) + ptrauth_keys_install_user(keys); +} static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) { @@ -71,6 +86,10 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); +extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, + unsigned long enabled); +extern int ptrauth_get_enabled_keys(struct task_struct *tsk); + static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) { return ptrauth_clear_pac(ptr); @@ -85,8 +104,23 @@ static __always_inline void ptrauth_enable(void) isb(); } -#define ptrauth_thread_init_user(tsk) \ - ptrauth_keys_init_user(&(tsk)->thread.keys_user) +#define ptrauth_suspend_exit() \ + ptrauth_keys_install_user(¤t->thread.keys_user) + +#define ptrauth_thread_init_user() \ + do { \ + ptrauth_keys_init_user(¤t->thread.keys_user); \ + \ + /* enable all keys */ \ + if (system_supports_address_auth()) \ + set_task_sctlr_el1(current->thread.sctlr_user | \ + SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \ + } while (0) + +#define ptrauth_thread_switch_user(tsk) \ + ptrauth_keys_install_user(&(tsk)->thread.keys_user) + #define ptrauth_thread_init_kernel(tsk) \ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) #define ptrauth_thread_switch_kernel(tsk) \ @@ -95,10 +129,17 @@ static __always_inline void ptrauth_enable(void) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_enable() #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) +#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL) +#define ptrauth_get_enabled_keys(tsk) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) -#define ptrauth_thread_init_user(tsk) +#define ptrauth_suspend_exit() +#define ptrauth_thread_init_user() #define ptrauth_thread_init_kernel(tsk) +#define ptrauth_thread_switch_user(tsk) #define ptrauth_thread_switch_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ +#define PR_PAC_ENABLED_KEYS_MASK \ + (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) + #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ca2cd75d3286..9df3feeee890 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -151,11 +151,15 @@ struct thread_struct { struct ptrauth_keys_kernel keys_kernel; #endif #ifdef CONFIG_ARM64_MTE - u64 sctlr_tcf0; u64 gcr_user_excl; #endif + u64 sctlr_user; }; +#define SCTLR_USER_MASK \ + (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \ + SCTLR_EL1_TCF0_MASK) + static inline void arch_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { @@ -247,10 +251,14 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +void set_task_sctlr_el1(u64 sctlr); + /* Thread switching */ extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); +asmlinkage void arm64_preempt_schedule_irq(void); + #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) @@ -301,6 +309,11 @@ extern void __init minsigstksz_setup(void); /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) +/* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */ +#define PAC_SET_ENABLED_KEYS(tsk, keys, enabled) \ + ptrauth_set_enabled_keys(tsk, keys, enabled) +#define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk) + #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 38187f74e089..b1dd7ecff7ef 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -23,7 +23,7 @@ struct ptdump_info { void ptdump_walk(struct seq_file *s, struct ptdump_info *info); #ifdef CONFIG_PTDUMP_DEBUGFS -void ptdump_debugfs_register(struct ptdump_info *info, const char *name); +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else static inline void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { } diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 2f36b16a5b5d..e4ad9db53af1 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -13,6 +13,7 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; extern char __hyp_rodata_start[], __hyp_rodata_end[]; extern char __hyp_reloc_begin[], __hyp_reloc_end[]; +extern char __hyp_bss_start[], __hyp_bss_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index bcb01ca15325..0e357757c0cc 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -145,6 +145,7 @@ bool cpus_are_stuck_in_kernel(void); extern void crash_smp_send_stop(void); extern bool smp_crash_stop_failed(void); +extern void panic_smp_self_stop(void); #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index eb4a75d720ed..4b73463423c3 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -5,7 +5,6 @@ #ifndef __ASM_SPARSEMEM_H #define __ASM_SPARSEMEM_H -#ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS /* @@ -27,6 +26,4 @@ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_ARM64_64K_PAGES */ -#endif /* CONFIG_SPARSEMEM*/ - #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index eb29b1fe8255..4b33ca620679 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -148,27 +148,7 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } -static inline void start_backtrace(struct stackframe *frame, - unsigned long fp, unsigned long pc) -{ - frame->fp = fp; - frame->pc = pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame->graph = 0; -#endif - - /* - * Prime the first unwind. - * - * In unwind_frame() we'll check that the FP points to a valid stack, - * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be - * treated as a transition to whichever stack that happens to be. The - * prev_fp value won't be used, but we set it to 0 such that it is - * definitely not an accessible stack address. - */ - bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); - frame->prev_fp = 0; - frame->prev_type = STACK_TYPE_UNKNOWN; -} +void start_backtrace(struct stackframe *frame, unsigned long fp, + unsigned long pc); #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d4a5fca984c3..65d15700a168 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -283,6 +283,8 @@ #define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL /* Filtering controls */ +#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) + #define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) #define SYS_PMSFCR_EL1_FE_SHIFT 0 #define SYS_PMSFCR_EL1_FT_SHIFT 1 @@ -333,6 +335,55 @@ /*** End of Statistical Profiling Extension ***/ +/* + * TRBE Registers + */ +#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0) +#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1) +#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2) +#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3) +#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4) +#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6) +#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7) + +#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0) +#define TRBLIMITR_LIMIT_SHIFT 12 +#define TRBLIMITR_NVM BIT(5) +#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0) +#define TRBLIMITR_TRIG_MODE_SHIFT 3 +#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0) +#define TRBLIMITR_FILL_MODE_SHIFT 1 +#define TRBLIMITR_ENABLE BIT(0) +#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0) +#define TRBPTR_PTR_SHIFT 0 +#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0) +#define TRBBASER_BASE_SHIFT 12 +#define TRBSR_EC_MASK GENMASK(5, 0) +#define TRBSR_EC_SHIFT 26 +#define TRBSR_IRQ BIT(22) +#define TRBSR_TRG BIT(21) +#define TRBSR_WRAP BIT(20) +#define TRBSR_ABORT BIT(18) +#define TRBSR_STOP BIT(17) +#define TRBSR_MSS_MASK GENMASK(15, 0) +#define TRBSR_MSS_SHIFT 0 +#define TRBSR_BSC_MASK GENMASK(5, 0) +#define TRBSR_BSC_SHIFT 0 +#define TRBSR_FSC_MASK GENMASK(5, 0) +#define TRBSR_FSC_SHIFT 0 +#define TRBMAR_SHARE_MASK GENMASK(1, 0) +#define TRBMAR_SHARE_SHIFT 8 +#define TRBMAR_OUTER_MASK GENMASK(3, 0) +#define TRBMAR_OUTER_SHIFT 4 +#define TRBMAR_INNER_MASK GENMASK(3, 0) +#define TRBMAR_INNER_SHIFT 0 +#define TRBTRG_TRG_MASK GENMASK(31, 0) +#define TRBTRG_TRG_SHIFT 0 +#define TRBIDR_FLAG BIT(5) +#define TRBIDR_PROG BIT(4) +#define TRBIDR_ALIGN_MASK GENMASK(3, 0) +#define TRBIDR_ALIGN_SHIFT 0 + #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) @@ -475,9 +526,15 @@ #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) +#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) +#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) +#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) +#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) +#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) +#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) @@ -565,8 +622,10 @@ #define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) #define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_ENIA_SHIFT 31 + #define SCTLR_ELx_ITFSB (BIT(37)) -#define SCTLR_ELx_ENIA (BIT(31)) +#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) #define SCTLR_ELx_ENIB (BIT(30)) #define SCTLR_ELx_ENDA (BIT(27)) #define SCTLR_ELx_EE (BIT(25)) @@ -579,9 +638,6 @@ #define SCTLR_ELx_A (BIT(1)) #define SCTLR_ELx_M (BIT(0)) -#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ - SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB) - /* SCTLR_EL2 specific flags. */ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ @@ -593,10 +649,15 @@ #define ENDIAN_SET_EL2 0 #endif +#define INIT_SCTLR_EL2_MMU_ON \ + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \ + SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) + #define INIT_SCTLR_EL2_MMU_OFF \ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) /* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_EPAN (BIT(57)) #define SCTLR_EL1_ATA0 (BIT(42)) #define SCTLR_EL1_TCF0_SHIFT 38 @@ -637,7 +698,7 @@ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ - SCTLR_EL1_RES1) + SCTLR_EL1_EPAN | SCTLR_EL1_RES1) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) @@ -840,6 +901,7 @@ #define ID_AA64MMFR2_CNP_SHIFT 0 /* id_aa64dfr0 */ +#define ID_AA64DFR0_TRBE_SHIFT 44 #define ID_AA64DFR0_TRACE_FILT_SHIFT 40 #define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 #define ID_AA64DFR0_PMSVER_SHIFT 32 @@ -1032,6 +1094,66 @@ #define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_E0TRE BIT(0) + +/* GIC Hypervisor interface registers */ +/* ICH_MISR_EL2 bit definitions */ +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +/* ICH_LR*_EL2 bit definitions */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) + +/* ICH_HCR_EL2 bit definitions */ +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) +#define ICH_HCR_NPIE (1 << 3) +#define ICH_HCR_TC (1 << 10) +#define ICH_HCR_TALL0 (1 << 11) +#define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_EOIcount_SHIFT 27 +#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) + +/* ICH_VMCR_EL2 bit definitions */ +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +/* ICH_VTR_EL2 bit definitions */ +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 9f4e3b266f21..6623c99f0984 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -55,6 +55,8 @@ void arch_setup_new_exec(void); #define arch_setup_new_exec arch_setup_new_exec void arch_release_task_struct(struct task_struct *tsk); +int arch_dup_task_struct(struct task_struct *dst, + struct task_struct *src); #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 3b8dca4eb08d..ec2db3419c41 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -17,17 +17,9 @@ int pcibus_to_node(struct pci_bus *bus); #include <linux/arch_topology.h> void update_freq_counters_refs(void); -void topology_scale_freq_tick(void); - -#ifdef CONFIG_ARM64_AMU_EXTN -/* - * Replace task scheduler's default counter-based - * frequency-invariance scale factor setting. - */ -#define arch_scale_freq_tick topology_scale_freq_tick -#endif /* CONFIG_ARM64_AMU_EXTN */ /* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_tick topology_scale_freq_tick #define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale #define arch_scale_freq_invariant topology_scale_freq_invariant diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0deb88467111..b5f08621fa29 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -20,6 +20,7 @@ #include <asm/cpufeature.h> #include <asm/mmu.h> +#include <asm/mte.h> #include <asm/ptrace.h> #include <asm/memory.h> #include <asm/extable.h> @@ -188,6 +189,23 @@ static inline void __uaccess_enable_tco(void) ARM64_MTE, CONFIG_KASAN_HW_TAGS)); } +/* + * These functions disable tag checking only if in MTE async mode + * since the sync mode generates exceptions synchronously and the + * nofault or load_unaligned_zeropad can handle them. + */ +static inline void __uaccess_disable_tco_async(void) +{ + if (system_uses_mte_async_mode()) + __uaccess_disable_tco(); +} + +static inline void __uaccess_enable_tco_async(void) +{ + if (system_uses_mte_async_mode()) + __uaccess_enable_tco(); +} + static inline void uaccess_disable_privileged(void) { __uaccess_disable_tco(); @@ -307,8 +325,10 @@ do { \ do { \ int __gkn_err = 0; \ \ + __uaccess_enable_tco_async(); \ __raw_get_mem("ldr", *((type *)(dst)), \ (__force type *)(src), __gkn_err); \ + __uaccess_disable_tco_async(); \ if (unlikely(__gkn_err)) \ goto err_label; \ } while (0) @@ -380,8 +400,10 @@ do { \ do { \ int __pkn_err = 0; \ \ + __uaccess_enable_tco_async(); \ __raw_put_mem("str", *((type *)(src)), \ (__force type *)(dst), __pkn_err); \ + __uaccess_disable_tco_async(); \ if (unlikely(__pkn_err)) \ goto err_label; \ } while(0) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 949788f5ba40..727bfc3be99b 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 443 +#define __NR_compat_syscalls 447 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 3d874f624056..7859749d6628 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise) __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2) #define __NR_mount_setattr 442 __SYSCALL(__NR_mount_setattr, sys_mount_setattr) +#define __NR_quotactl_path 443 +__SYSCALL(__NR_quotactl_path, sys_quotactl_path) +#define __NR_landlock_create_ruleset 444 +__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) +#define __NR_landlock_add_rule 445 +__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) +#define __NR_landlock_restrict_self 446 +__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index 7508b0ac1d21..ecb6fd4c3c64 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -155,7 +155,8 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) } #ifdef CONFIG_TIME_NS -static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) { const struct vdso_data *ret; diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 631ab1281633..4f7a629df81f 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -83,11 +83,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, */ isb(); asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); - /* - * This isb() is required to prevent that the seq lock is - * speculated.# - */ - isb(); + arch_counter_enforce_ordering(res); return res; } @@ -100,7 +96,7 @@ const struct vdso_data *__arch_get_vdso_data(void) #ifdef CONFIG_TIME_NS static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(void) +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) { return _timens_data; } diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20..7a22aeea9bb5 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,28 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + /* See arch_vmap_pud_supported() */ + return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h index 3333950b5909..2dcb104c645b 100644 --- a/arch/arm64/include/asm/word-at-a-time.h +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -53,7 +53,9 @@ static inline unsigned long find_zero(unsigned long mask) */ static inline unsigned long load_unaligned_zeropad(const void *addr) { - unsigned long ret, offset; + unsigned long ret, tmp; + + __uaccess_enable_tco_async(); /* Load word from unaligned pointer addr */ asm( @@ -61,9 +63,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) "2:\n" " .pushsection .fixup,\"ax\"\n" " .align 2\n" - "3: and %1, %2, #0x7\n" - " bic %2, %2, #0x7\n" - " ldr %0, [%2]\n" + "3: bic %1, %2, #0x7\n" + " ldr %0, [%1]\n" + " and %1, %2, #0x7\n" " lsl %1, %1, #0x3\n" #ifndef __AARCH64EB__ " lsr %0, %0, %1\n" @@ -73,9 +75,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) " b 2b\n" " .popsection\n" _ASM_EXTABLE(1b, 3b) - : "=&r" (ret), "=&r" (offset) + : "=&r" (ret), "=&r" (tmp) : "r" (addr), "Q" (*(unsigned long *)addr)); + __uaccess_disable_tco_async(); + return ret; } diff --git a/arch/arm64/include/asm/xen/swiotlb-xen.h b/arch/arm64/include/asm/xen/swiotlb-xen.h new file mode 100644 index 000000000000..455ade5d5320 --- /dev/null +++ b/arch/arm64/include/asm/xen/swiotlb-xen.h @@ -0,0 +1 @@ +#include <xen/arm/swiotlb-xen.h> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index ed65576ce710..6cc97730790e 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -9,6 +9,11 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) +# Remove stack protector to avoid triggering unneeded stack canary +# checks due to randomize_kstack_offset. +CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong +CFLAGS_syscall.o += -fno-stack-protector + # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-common.o entry-fpsimd.o process.o ptrace.o \ diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c index e7c941d8340d..bfeeb5319abf 100644 --- a/arch/arm64/kernel/acpi_parking_protocol.c +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -99,7 +99,8 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu) * that read this address need to convert this address to the * Boot-Loader's endianness before jumping. */ - writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point); + writeq_relaxed(__pa_symbol(function_nocfi(secondary_entry)), + &mailbox->entry_point); writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 1184c44ea2c7..c906d20c7b52 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -133,11 +133,10 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __apply_alternatives(void *alt_region, bool is_module, +static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module, unsigned long *feature_mask) { struct alt_instr *alt; - struct alt_region *region = alt_region; __le32 *origptr, *updptr; alternative_cb_t alt_cb; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index a36e2fc330d4..0cb34ccb6e73 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -43,6 +43,7 @@ int main(void) #endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); + DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user)); #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); @@ -95,6 +96,8 @@ int main(void) DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET); + DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT); + DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); BLANK(); DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); @@ -120,6 +123,9 @@ int main(void) DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2)); DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va)); DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa)); + DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2)); + DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr)); + DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); @@ -147,10 +153,6 @@ int main(void) #endif #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); - DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib)); - DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); - DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); - DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); BLANK(); #endif diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 37721eb6f9a1..d47ff63a5b66 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -30,10 +30,7 @@ * flat identity mapping. */ SYM_CODE_START(__cpu_soft_restart) - /* Clear sctlr_el1 flags. */ - mrs x12, sctlr_el1 - mov_q x13, SCTLR_ELx_FLAGS - bic x12, x12, x13 + mov_q x12, INIT_SCTLR_EL1_MMU_OFF pre_disable_mmu_workaround /* * either disable EL1&0 translation regime or disable EL2&0 translation diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index ed50e9587ad8..9a7b1262ef17 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -13,16 +13,16 @@ void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); -static inline void __noreturn cpu_soft_restart(unsigned long entry, - unsigned long arg0, - unsigned long arg1, - unsigned long arg2) +static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) { typeof(__cpu_soft_restart) *restart; unsigned long el2_switch = !is_kernel_in_hyp_mode() && is_hyp_mode_available(); - restart = (void *)__pa_symbol(__cpu_soft_restart); + restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); cpu_install_idmap(); restart(el2_switch, entry, arg0, arg1, arg2); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 506a1cd37973..e2c20c036442 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -526,6 +526,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { 1, 0), }, #endif +#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM + { + /* NVIDIA Carmel */ + .desc = "NVIDIA Carmel CNP erratum", + .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, + ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 066030717a4c..efed2830d141 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -68,6 +68,7 @@ #include <linux/sort.h> #include <linux/stop_machine.h> #include <linux/types.h> +#include <linux/minmax.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/kasan.h> @@ -383,7 +384,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { * of support. */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; @@ -695,14 +695,14 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, ret = ftrp->safe_val; break; case FTR_LOWER_SAFE: - ret = new < cur ? new : cur; + ret = min(new, cur); break; case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; fallthrough; case FTR_HIGHER_SAFE: - ret = new > cur ? new : cur; + ret = max(new, cur); break; default: BUG(); @@ -809,6 +809,12 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) reg->name, ftrp->shift + ftrp->width - 1, ftrp->shift, str, tmp); + } else if ((ftr_mask & reg->override->val) == ftr_mask) { + reg->override->val &= ~ftr_mask; + pr_warn("%s[%d:%d]: impossible override, ignored\n", + reg->name, + ftrp->shift + ftrp->width - 1, + ftrp->shift); } val = arm64_ftr_set_value(ftrp, val, ftr_new); @@ -1321,7 +1327,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) * may share TLB entries with a CPU stuck in the crashed * kernel. */ - if (is_kdump_kernel()) + if (is_kdump_kernel()) + return false; + + if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) return false; return has_cpuid_feature(entry, scope); @@ -1443,7 +1452,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, } #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -static void +static void __nocfi kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) { typedef void (kpti_remap_fn)(int, int, phys_addr_t); @@ -1460,7 +1469,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) if (arm64_use_ng_mappings) return; - remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings)); cpu_install_idmap(); remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); @@ -1617,7 +1626,6 @@ int get_cpu_with_amu_feat(void) } #endif -#ifdef CONFIG_ARM64_VHE static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) { return is_kernel_in_hyp_mode(); @@ -1636,7 +1644,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } -#endif static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) { @@ -1821,6 +1828,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_pan, }, #endif /* CONFIG_ARM64_PAN */ +#ifdef CONFIG_ARM64_EPAN + { + .desc = "Enhanced Privileged Access Never", + .capability = ARM64_HAS_EPAN, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64MMFR1_EL1, + .field_pos = ID_AA64MMFR1_PAN_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 3, + }, +#endif /* CONFIG_ARM64_EPAN */ #ifdef CONFIG_ARM64_LSE_ATOMICS { .desc = "LSE atomic instructions", @@ -1839,7 +1858,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .matches = has_no_hw_prefetch, }, -#ifdef CONFIG_ARM64_VHE { .desc = "Virtualization Host Extensions", .capability = ARM64_HAS_VIRT_HOST_EXTN, @@ -1847,7 +1865,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = runs_at_el2, .cpu_enable = cpu_copy_el2regs, }, -#endif /* CONFIG_ARM64_VHE */ { .desc = "32-bit EL0 Support", .capability = ARM64_HAS_32BIT_EL0, diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index b512b5503f6e..03991eeff643 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -29,7 +29,7 @@ int arm_cpuidle_init(unsigned int cpu) /** * arm_cpuidle_suspend() - function to enter a low-power idle state - * @arg: argument to pass to CPU suspend operations + * @index: argument to pass to CPU suspend operations * * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * operations back-end error code otherwise. diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 77605aec25fe..51fcf99d5351 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) * with the CLIDR_EL1 fields to avoid triggering false warnings * when there is a mismatch across the CPUs. Keep track of the * effective value of the CTR_EL0 in our internal records for - * acurate sanity check and feature enablement. + * accurate sanity check and feature enablement. */ info->reg_ctr = read_cpuid_effective_cachetype(); info->reg_dczid = read_cpuid(DCZID_EL0); diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c index e6e284265f19..58303a9ec32c 100644 --- a/arch/arm64/kernel/crash_dump.c +++ b/arch/arm64/kernel/crash_dump.c @@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); + *ppos += count; + return count; } diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 9d3588450473..340d04e13617 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) lockdep_hardirqs_off(CALLER_ADDR0); rcu_irq_enter_check_tick(); trace_hardirqs_off_finish(); + + mte_check_tfsr_entry(); } /* @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); + mte_check_tfsr_exit(); + if (interrupts_enabled(regs)) { if (regs->exit_rcu) { trace_hardirqs_on_prepare(); @@ -226,14 +230,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); - /* - * The CPU masked interrupts, and we are leaving them masked during - * do_debug_exception(). Update PMR as if we had called - * local_daif_mask(). - */ - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - arm64_enter_el1_dbg(regs); if (!cortex_a76_erratum_1463225_debug_handler(regs)) do_debug_exception(far, esr, regs); @@ -293,6 +289,8 @@ asmlinkage void noinstr enter_from_user_mode(void) asmlinkage void noinstr exit_to_user_mode(void) { + mte_check_tfsr_exit(); + trace_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare(CALLER_ADDR0); user_enter_irqoff(); @@ -398,9 +396,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ unsigned long far = read_sysreg(far_el1); - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); do_debug_exception(far, esr, regs); local_daif_restore(DAIF_PROCCTX_NOIRQ); @@ -408,9 +403,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) static void noinstr el0_svc(struct pt_regs *regs) { - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc(regs); @@ -486,9 +478,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) static void noinstr el0_svc_compat(struct pt_regs *regs) { - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - enter_from_user_mode(); cortex_a76_erratum_1463225_svc_handler(); do_el0_svc_compat(regs); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 2ca395c25448..3ecec60d3295 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -48,6 +48,11 @@ SYM_FUNC_START(sve_get_vl) ret SYM_FUNC_END(sve_get_vl) +SYM_FUNC_START(sve_set_vq) + sve_load_vq x0, x1, x2 + ret +SYM_FUNC_END(sve_set_vq) + /* * Load SVE state from FPSIMD state. * diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a31a0a713c85..3513984a88bd 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -148,16 +148,18 @@ alternative_cb_end .endm /* Check for MTE asynchronous tag check faults */ - .macro check_mte_async_tcf, flgs, tmp + .macro check_mte_async_tcf, tmp, ti_flags #ifdef CONFIG_ARM64_MTE + .arch_extension lse alternative_if_not ARM64_MTE b 1f alternative_else_nop_endif mrs_s \tmp, SYS_TFSRE0_EL1 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ - orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT - str \flgs, [tsk, #TSK_TI_FLAGS] + mov \tmp, #_TIF_MTE_ASYNC_FAULT + add \ti_flags, tsk, #TSK_TI_FLAGS + stset \tmp, [\ti_flags] msr_s SYS_TFSRE0_EL1, xzr 1: #endif @@ -244,10 +246,32 @@ alternative_else_nop_endif disable_step_tsk x19, x20 /* Check for asynchronous tag check faults in user space */ - check_mte_async_tcf x19, x22 + check_mte_async_tcf x22, x23 apply_ssbd 1, x22, x23 - ptrauth_keys_install_kernel tsk, x20, x22, x23 +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * Enable IA for in-kernel PAC if the task had it disabled. Although + * this could be implemented with an unconditional MRS which would avoid + * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: + mrs x0, sctlr_el1 + orr x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: + isb +alternative_else_nop_endif +#endif mte_set_kernel_gcr x22, x23 @@ -261,16 +285,16 @@ alternative_else_nop_endif stp lr, x21, [sp, #S_LR] /* - * For exceptions from EL0, terminate the callchain here. + * For exceptions from EL0, create a terminal frame record. * For exceptions from EL1, create a synthetic frame record so the * interrupted code shows up in the backtrace. */ .if \el == 0 - mov x29, xzr + stp xzr, xzr, [sp, #S_STACKFRAME] .else stp x29, x22, [sp, #S_STACKFRAME] - add x29, sp, #S_STACKFRAME .endif + add x29, sp, #S_STACKFRAME #ifdef CONFIG_ARM64_SW_TTBR0_PAN alternative_if_not ARM64_HAS_PAN @@ -290,6 +314,8 @@ alternative_else_nop_endif alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] + mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, x20 alternative_else_nop_endif /* Re-enable tag checking (TCO set on exception entry) */ @@ -351,8 +377,26 @@ alternative_else_nop_endif 3: scs_save tsk, x0 - /* No kernel C function calls after this as user keys are set. */ - ptrauth_keys_install_user tsk, x0, x1, x2 +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif mte_set_user_gcr tsk, x0, x1 @@ -491,28 +535,14 @@ tsk .req x28 // current thread_info /* * Interrupt handling. */ - .macro irq_handler - ldr_l x1, handle_arch_irq + .macro irq_handler, handler:req + ldr_l x1, \handler mov x0, sp irq_stack_entry blr x1 irq_stack_exit .endm -#ifdef CONFIG_ARM64_PSEUDO_NMI - /* - * Set res to 0 if irqs were unmasked in interrupted context. - * Otherwise set res to non-0 value. - */ - .macro test_irqs_unmasked res:req, pmr:req -alternative_if ARM64_HAS_IRQ_PRIO_MASKING - sub \res, \pmr, #GIC_PRIO_IRQON -alternative_else - mov \res, xzr -alternative_endif - .endm -#endif - .macro gic_prio_kentry_setup, tmp:req #ifdef CONFIG_ARM64_PSEUDO_NMI alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -522,13 +552,43 @@ alternative_endif #endif .endm - .macro gic_prio_irq_setup, pmr:req, tmp:req -#ifdef CONFIG_ARM64_PSEUDO_NMI - alternative_if ARM64_HAS_IRQ_PRIO_MASKING - orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET - msr_s SYS_ICC_PMR_EL1, \tmp - alternative_else_nop_endif + .macro el1_interrupt_handler, handler:req + enable_da + + mov x0, sp + bl enter_el1_irq_or_nmi + + irq_handler \handler + +#ifdef CONFIG_PREEMPTION + ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA were cleared at start of handling, and IF are cleared by + * the GIC irqchip driver using gic_arch_enable_irqs() for + * normal IRQs. If anything is set, it means we come back from + * an NMI instead of a normal IRQ, so skip preemption + */ + mrs x0, daif + orr x24, x24, x0 +alternative_else_nop_endif + cbnz x24, 1f // preempt count != 0 || NMI return path + bl arm64_preempt_schedule_irq // irq en/disable is done inside +1: #endif + + mov x0, sp + bl exit_el1_irq_or_nmi + .endm + + .macro el0_interrupt_handler, handler:req + user_exit_irqoff + enable_da + + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: + irq_handler \handler .endm .text @@ -547,18 +607,18 @@ SYM_CODE_START(vectors) kernel_ventry 1, sync // Synchronous EL1h kernel_ventry 1, irq // IRQ EL1h - kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, fiq // FIQ EL1h kernel_ventry 1, error // Error EL1h kernel_ventry 0, sync // Synchronous 64-bit EL0 kernel_ventry 0, irq // IRQ 64-bit EL0 - kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, fiq // FIQ 64-bit EL0 kernel_ventry 0, error // Error 64-bit EL0 #ifdef CONFIG_COMPAT kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 - kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 #else kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 @@ -624,12 +684,6 @@ SYM_CODE_START_LOCAL(el0_error_invalid) inv_entry 0, BAD_ERROR SYM_CODE_END(el0_error_invalid) -#ifdef CONFIG_COMPAT -SYM_CODE_START_LOCAL(el0_fiq_invalid_compat) - inv_entry 0, BAD_FIQ, 32 -SYM_CODE_END(el0_fiq_invalid_compat) -#endif - SYM_CODE_START_LOCAL(el1_sync_invalid) inv_entry 1, BAD_SYNC SYM_CODE_END(el1_sync_invalid) @@ -660,35 +714,16 @@ SYM_CODE_END(el1_sync) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 - gic_prio_irq_setup pmr=x20, tmp=x1 - enable_da_f - - mov x0, sp - bl enter_el1_irq_or_nmi - - irq_handler - -#ifdef CONFIG_PREEMPTION - ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count -alternative_if ARM64_HAS_IRQ_PRIO_MASKING - /* - * DA_F were cleared at start of handling. If anything is set in DAIF, - * we come back from an NMI, so skip preemption - */ - mrs x0, daif - orr x24, x24, x0 -alternative_else_nop_endif - cbnz x24, 1f // preempt count != 0 || NMI return path - bl arm64_preempt_schedule_irq // irq en/disable is done inside -1: -#endif - - mov x0, sp - bl exit_el1_irq_or_nmi - + el1_interrupt_handler handle_arch_irq kernel_exit 1 SYM_CODE_END(el1_irq) +SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) + kernel_entry 1 + el1_interrupt_handler handle_arch_fiq + kernel_exit 1 +SYM_CODE_END(el1_fiq) + /* * EL0 mode handlers. */ @@ -715,6 +750,11 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) b el0_irq_naked SYM_CODE_END(el0_irq_compat) +SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) + kernel_entry 0, 32 + b el0_fiq_naked +SYM_CODE_END(el0_fiq_compat) + SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) kernel_entry 0, 32 b el0_error_naked @@ -725,22 +765,20 @@ SYM_CODE_END(el0_error_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 el0_irq_naked: - gic_prio_irq_setup pmr=x20, tmp=x0 - user_exit_irqoff - enable_da_f - - tbz x22, #55, 1f - bl do_el0_irq_bp_hardening -1: - irq_handler - + el0_interrupt_handler handle_arch_irq b ret_to_user SYM_CODE_END(el0_irq) +SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) + kernel_entry 0 +el0_fiq_naked: + el0_interrupt_handler handle_arch_fiq + b ret_to_user +SYM_CODE_END(el0_fiq) + SYM_CODE_START_LOCAL(el1_error) kernel_entry 1 mrs x1, esr_el1 - gic_prio_kentry_setup tmp=x2 enable_dbg mov x0, sp bl do_serror @@ -751,13 +789,12 @@ SYM_CODE_START_LOCAL(el0_error) kernel_entry 0 el0_error_naked: mrs x25, esr_el1 - gic_prio_kentry_setup tmp=x2 user_exit_irqoff enable_dbg mov x0, sp mov x1, x25 bl do_serror - enable_da_f + enable_da b ret_to_user SYM_CODE_END(el0_error) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 062b21f30f94..ad3dd34a83cf 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void) */ static void get_cpu_fpsimd_context(void) { - preempt_disable(); + local_bh_disable(); __get_cpu_fpsimd_context(); } @@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void) static void put_cpu_fpsimd_context(void) { __put_cpu_fpsimd_context(); - preempt_enable(); + local_bh_enable(); } static bool have_cpu_fpsimd_context(void) @@ -285,7 +285,7 @@ static void task_fpsimd_load(void) WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); - if (system_supports_sve() && test_thread_flag(TIF_SVE)) + if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(¤t->thread), ¤t->thread.uw.fpsimd_state.fpsr, sve_vq_from_vl(current->thread.sve_vl) - 1); @@ -307,7 +307,8 @@ static void fpsimd_save(void) WARN_ON(!have_cpu_fpsimd_context()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { - if (system_supports_sve() && test_thread_flag(TIF_SVE)) { + if (IS_ENABLED(CONFIG_ARM64_SVE) && + test_thread_flag(TIF_SVE)) { if (WARN_ON(sve_get_vl() != last->sve_vl)) { /* * Can't save the user regs, so current would @@ -926,9 +927,8 @@ void fpsimd_release_task(struct task_struct *dead_task) * Trapped SVE access * * Storage is allocated for the full SVE state, the current FPSIMD - * register contents are migrated across, and TIF_SVE is set so that - * the SVE access trap will be disabled the next time this task - * reaches ret_to_user. + * register contents are migrated across, and the access trap is + * disabled. * * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() * would have disabled the SVE access trap for userspace during @@ -946,15 +946,24 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) get_cpu_fpsimd_context(); - fpsimd_save(); - - /* Force ret_to_user to reload the registers: */ - fpsimd_flush_task_state(current); - - fpsimd_to_sve(current); if (test_and_set_thread_flag(TIF_SVE)) WARN_ON(1); /* SVE access shouldn't have trapped */ + /* + * Convert the FPSIMD state to SVE, zeroing all the state that + * is not shared with FPSIMD. If (as is likely) the current + * state is live in the registers then do this there and + * update our metadata for the current task including + * disabling the trap, otherwise update our in-memory copy. + */ + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1); + sve_flush_live(); + fpsimd_bind_task_to_cpu(); + } else { + fpsimd_to_sve(current); + } + put_cpu_fpsimd_context(); } @@ -1092,7 +1101,7 @@ void fpsimd_preserve_current_state(void) void fpsimd_signal_preserve_current_state(void) { fpsimd_preserve_current_state(); - if (system_supports_sve() && test_thread_flag(TIF_SVE)) + if (test_thread_flag(TIF_SVE)) sve_to_fpsimd(current); } @@ -1181,7 +1190,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) get_cpu_fpsimd_context(); current->thread.uw.fpsimd_state = *state; - if (system_supports_sve() && test_thread_flag(TIF_SVE)) + if (test_thread_flag(TIF_SVE)) fpsimd_to_sve(current); task_fpsimd_load(); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 86a5cf9bc19a..b5d3ddaf69d9 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -55,7 +55,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned long pc; u32 new; - pc = (unsigned long)&ftrace_call; + pc = (unsigned long)function_nocfi(ftrace_call); new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, AARCH64_INSN_BRANCH_LINK); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 840bda1869e9..96873dfa67fd 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr) * booted in EL1 or EL2 respectively. */ SYM_FUNC_START(init_kernel_el) - mov_q x0, INIT_SCTLR_EL1_MMU_OFF - msr sctlr_el1, x0 - mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq init_el2 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr sctlr_el1, x0 isb mov_q x0, INIT_PSTATE_EL1 msr spsr_el1, x0 @@ -504,9 +503,43 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr vbar_el2, x0 isb + /* + * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, + * making it impossible to start in nVHE mode. Is that + * compliant with the architecture? Absolutely not! + */ + mrs x0, hcr_el2 + and x0, x0, #HCR_E2H + cbz x0, 1f + + /* Switching to VHE requires a sane SCTLR_EL1 as a start */ + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr_s SYS_SCTLR_EL12, x0 + + /* + * Force an eret into a helper "function", and let it return + * to our original caller... This makes sure that we have + * initialised the basic PSTATE state. + */ + mov x0, #INIT_PSTATE_EL2 + msr spsr_el1, x0 + adr x0, __cpu_stick_to_vhe + msr elr_el1, x0 + eret + +1: + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr sctlr_el1, x0 + msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 eret + +__cpu_stick_to_vhe: + mov x0, #HVC_VHE_RESTART + hvc #0 + mov x0, #BOOT_CPU_MODE_EL2 + ret SYM_FUNC_END(init_kernel_el) /* diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 5eccbd62fec8..43d212618834 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -27,12 +27,12 @@ SYM_CODE_START(__hyp_stub_vectors) ventry el2_fiq_invalid // FIQ EL2t ventry el2_error_invalid // Error EL2t - ventry el2_sync_invalid // Synchronous EL2h + ventry elx_sync // Synchronous EL2h ventry el2_irq_invalid // IRQ EL2h ventry el2_fiq_invalid // FIQ EL2h ventry el2_error_invalid // Error EL2h - ventry el1_sync // Synchronous 64-bit EL1 + ventry elx_sync // Synchronous 64-bit EL1 ventry el1_irq_invalid // IRQ 64-bit EL1 ventry el1_fiq_invalid // FIQ 64-bit EL1 ventry el1_error_invalid // Error 64-bit EL1 @@ -45,7 +45,7 @@ SYM_CODE_END(__hyp_stub_vectors) .align 11 -SYM_CODE_START_LOCAL(el1_sync) +SYM_CODE_START_LOCAL(elx_sync) cmp x0, #HVC_SET_VECTORS b.ne 1f msr vbar_el2, x1 @@ -71,7 +71,7 @@ SYM_CODE_START_LOCAL(el1_sync) 9: mov x0, xzr eret -SYM_CODE_END(el1_sync) +SYM_CODE_END(elx_sync) // nVHE? No way! Give me the real thing! SYM_CODE_START_LOCAL(mutate_to_vhe) @@ -115,9 +115,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe) mrs_s x0, SYS_VBAR_EL12 msr vbar_el1, x0 - // Use EL2 translations for SPE and disable access from EL1 + // Use EL2 translations for SPE & TRBE and disable access from EL1 mrs x0, mdcr_el2 bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) + bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) msr mdcr_el2, x0 // Transfer the MM state from EL1 to EL2 @@ -224,7 +225,6 @@ SYM_FUNC_END(__hyp_reset_vectors) * Entry point to switch to VHE if deemed capable */ SYM_FUNC_START(switch_to_vhe) -#ifdef CONFIG_ARM64_VHE // Need to have booted at EL2 adr_l x1, __boot_cpu_mode ldr w0, [x1] @@ -240,6 +240,5 @@ SYM_FUNC_START(switch_to_vhe) mov x0, #HVC_VHE_RESTART hvc #0 1: -#endif ret SYM_FUNC_END(switch_to_vhe) diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 83f1c4b92095..e628c8ce1ffe 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -25,14 +25,26 @@ struct ftr_set_desc { struct { char name[FTR_DESC_FIELD_LEN]; u8 shift; + bool (*filter)(u64 val); } fields[]; }; +static bool __init mmfr1_vh_filter(u64 val) +{ + /* + * If we ever reach this point while running VHE, we're + * guaranteed to be on one of these funky, VHE-stuck CPUs. If + * the user was trying to force nVHE on us, proceed with + * attitude adjustment. + */ + return !(is_kernel_in_hyp_mode() && val == 0); +} + static const struct ftr_set_desc mmfr1 __initconst = { .name = "id_aa64mmfr1", .override = &id_aa64mmfr1_override, .fields = { - { "vh", ID_AA64MMFR1_VHE_SHIFT }, + { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter }, {} }, }; @@ -124,6 +136,18 @@ static void __init match_options(const char *cmdline) if (find_field(cmdline, regs[i], f, &v)) continue; + /* + * If an override gets filtered out, advertise + * it by setting the value to 0xf, but + * clearing the mask... Yes, this is fragile. + */ + if (regs[i]->fields[f].filter && + !regs[i]->fields[f].filter(v)) { + regs[i]->override->val |= mask; + regs[i]->override->mask &= ~mask; + continue; + } + regs[i]->override->val &= ~mask; regs[i]->override->val |= (v << shift) & mask; regs[i]->override->mask |= mask; diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 5aa9ed1e9ec6..bcf3c2755370 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -65,13 +65,13 @@ __efistub__ctype = _ctype; KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); +KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); /* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */ -KVM_NVHE_ALIAS(__hyp_panic_string); -KVM_NVHE_ALIAS(panic); +KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); @@ -104,6 +104,36 @@ KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base); /* PMU available static key */ KVM_NVHE_ALIAS(kvm_arm_pmu_available); +/* Position-independent library routines */ +KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page); +KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page); +KVM_NVHE_ALIAS_HYP(memcpy, __pi_memcpy); +KVM_NVHE_ALIAS_HYP(memset, __pi_memset); + +#ifdef CONFIG_KASAN +KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy); +KVM_NVHE_ALIAS_HYP(__memset, __pi_memset); +#endif + +/* Kernel memory sections */ +KVM_NVHE_ALIAS(__start_rodata); +KVM_NVHE_ALIAS(__end_rodata); +KVM_NVHE_ALIAS(__bss_start); +KVM_NVHE_ALIAS(__bss_stop); + +/* Hyp memory sections */ +KVM_NVHE_ALIAS(__hyp_idmap_text_start); +KVM_NVHE_ALIAS(__hyp_idmap_text_end); +KVM_NVHE_ALIAS(__hyp_text_start); +KVM_NVHE_ALIAS(__hyp_text_end); +KVM_NVHE_ALIAS(__hyp_bss_start); +KVM_NVHE_ALIAS(__hyp_bss_end); +KVM_NVHE_ALIAS(__hyp_rodata_start); +KVM_NVHE_ALIAS(__hyp_rodata_end); + +/* pKVM static key */ +KVM_NVHE_ALIAS(kvm_protected_mode_initialized); + #endif /* CONFIG_KVM */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index dfb1feab867d..bda49430c9ea 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -71,13 +71,44 @@ static void init_irq_stacks(void) } #endif +static void default_handle_irq(struct pt_regs *regs) +{ + panic("IRQ taken without a root IRQ handler\n"); +} + +static void default_handle_fiq(struct pt_regs *regs) +{ + panic("FIQ taken without a root FIQ handler\n"); +} + +void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq; +void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq; + +int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) +{ + if (handle_arch_irq != default_handle_irq) + return -EBUSY; + + handle_arch_irq = handle_irq; + pr_info("Root IRQ handler: %ps\n", handle_irq); + return 0; +} + +int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *)) +{ + if (handle_arch_fiq != default_handle_fiq) + return -EBUSY; + + handle_arch_fiq = handle_fiq; + pr_info("Root FIQ handler: %ps\n", handle_fiq); + return 0; +} + void __init init_IRQ(void) { init_irq_stacks(); init_irq_scs(); irqchip_init(); - if (!handle_arch_irq) - panic("No interrupt controller found."); if (system_uses_irq_prio_masking()) { /* diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 27f8939deb1b..341342b207f6 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -128,15 +128,17 @@ u64 __init kaslr_early_init(void) /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; - if (IS_ENABLED(CONFIG_KASAN_GENERIC) || - IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && + (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS))) /* - * KASAN does not expect the module region to intersect the - * vmalloc region, since shadow memory is allocated for each - * module at load time, whereas the vmalloc region is shadowed - * by KASAN zero pages. So keep modules out of the vmalloc - * region if KASAN is enabled, and put the kernel well within - * 4 GB of the module region. + * KASAN without KASAN_VMALLOC does not expect the module region + * to intersect the vmalloc region, since shadow memory is + * allocated for each module at load time, whereas the vmalloc + * region is shadowed by KASAN zero pages. So keep modules + * out of the vmalloc region if KASAN is enabled without + * KASAN_VMALLOC, and put the kernel well within 4 GB of the + * module region. */ return offset % SZ_2G; diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 0cde47a63beb..63634b4d72c1 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -15,23 +15,12 @@ #include <linux/kexec.h> #include <linux/libfdt.h> #include <linux/memblock.h> +#include <linux/of.h> #include <linux/of_fdt.h> -#include <linux/random.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> -#include <asm/byteorder.h> - -/* relevant device tree properties */ -#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr" -#define FDT_PROP_MEM_RANGE "linux,usable-memory-range" -#define FDT_PROP_INITRD_START "linux,initrd-start" -#define FDT_PROP_INITRD_END "linux,initrd-end" -#define FDT_PROP_BOOTARGS "bootargs" -#define FDT_PROP_KASLR_SEED "kaslr-seed" -#define FDT_PROP_RNG_SEED "rng-seed" -#define RNG_SEED_SIZE 128 const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_image_ops, @@ -40,174 +29,16 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { int arch_kimage_file_post_load_cleanup(struct kimage *image) { - vfree(image->arch.dtb); + kvfree(image->arch.dtb); image->arch.dtb = NULL; - vfree(image->arch.elf_headers); - image->arch.elf_headers = NULL; - image->arch.elf_headers_sz = 0; + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; return kexec_image_post_load_cleanup_default(image); } -static int setup_dtb(struct kimage *image, - unsigned long initrd_load_addr, unsigned long initrd_len, - char *cmdline, void *dtb) -{ - int off, ret; - - ret = fdt_path_offset(dtb, "/chosen"); - if (ret < 0) - goto out; - - off = ret; - - ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR); - if (ret && ret != -FDT_ERR_NOTFOUND) - goto out; - ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE); - if (ret && ret != -FDT_ERR_NOTFOUND) - goto out; - - if (image->type == KEXEC_TYPE_CRASH) { - /* add linux,elfcorehdr */ - ret = fdt_appendprop_addrrange(dtb, 0, off, - FDT_PROP_KEXEC_ELFHDR, - image->arch.elf_headers_mem, - image->arch.elf_headers_sz); - if (ret) - return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); - - /* add linux,usable-memory-range */ - ret = fdt_appendprop_addrrange(dtb, 0, off, - FDT_PROP_MEM_RANGE, - crashk_res.start, - crashk_res.end - crashk_res.start + 1); - if (ret) - return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); - } - - /* add bootargs */ - if (cmdline) { - ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline); - if (ret) - goto out; - } else { - ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS); - if (ret && (ret != -FDT_ERR_NOTFOUND)) - goto out; - } - - /* add initrd-* */ - if (initrd_load_addr) { - ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START, - initrd_load_addr); - if (ret) - goto out; - - ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END, - initrd_load_addr + initrd_len); - if (ret) - goto out; - } else { - ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START); - if (ret && (ret != -FDT_ERR_NOTFOUND)) - goto out; - - ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END); - if (ret && (ret != -FDT_ERR_NOTFOUND)) - goto out; - } - - /* add kaslr-seed */ - ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); - if (ret == -FDT_ERR_NOTFOUND) - ret = 0; - else if (ret) - goto out; - - if (rng_is_initialized()) { - u64 seed = get_random_u64(); - ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed); - if (ret) - goto out; - } else { - pr_notice("RNG is not initialised: omitting \"%s\" property\n", - FDT_PROP_KASLR_SEED); - } - - /* add rng-seed */ - if (rng_is_initialized()) { - void *rng_seed; - ret = fdt_setprop_placeholder(dtb, off, FDT_PROP_RNG_SEED, - RNG_SEED_SIZE, &rng_seed); - if (ret) - goto out; - get_random_bytes(rng_seed, RNG_SEED_SIZE); - } else { - pr_notice("RNG is not initialised: omitting \"%s\" property\n", - FDT_PROP_RNG_SEED); - } - -out: - if (ret) - return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; - - return 0; -} - -/* - * More space needed so that we can add initrd, bootargs, kaslr-seed, - * rng-seed, userable-memory-range and elfcorehdr. - */ -#define DTB_EXTRA_SPACE 0x1000 - -static int create_dtb(struct kimage *image, - unsigned long initrd_load_addr, unsigned long initrd_len, - char *cmdline, void **dtb) -{ - void *buf; - size_t buf_size; - size_t cmdline_len; - int ret; - - cmdline_len = cmdline ? strlen(cmdline) : 0; - buf_size = fdt_totalsize(initial_boot_params) - + cmdline_len + DTB_EXTRA_SPACE; - - for (;;) { - buf = vmalloc(buf_size); - if (!buf) - return -ENOMEM; - - /* duplicate a device tree blob */ - ret = fdt_open_into(initial_boot_params, buf, buf_size); - if (ret) { - vfree(buf); - return -EINVAL; - } - - ret = setup_dtb(image, initrd_load_addr, initrd_len, - cmdline, buf); - if (ret) { - vfree(buf); - if (ret == -ENOMEM) { - /* unlikely, but just in case */ - buf_size += DTB_EXTRA_SPACE; - continue; - } else { - return ret; - } - } - - /* trim it */ - fdt_pack(buf); - *dtb = buf; - - return 0; - } -} - static int prepare_elf_headers(void **addr, unsigned long *sz) { struct crash_mem *cmem; @@ -284,12 +115,12 @@ int load_other_segments(struct kimage *image, vfree(headers); goto out_err; } - image->arch.elf_headers = headers; - image->arch.elf_headers_mem = kbuf.mem; - image->arch.elf_headers_sz = headers_sz; + image->elf_headers = headers; + image->elf_load_addr = kbuf.mem; + image->elf_headers_sz = headers_sz; pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->arch.elf_headers_mem, kbuf.bufsz, kbuf.memsz); + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); } /* load initrd */ @@ -314,12 +145,15 @@ int load_other_segments(struct kimage *image, } /* load dtb */ - ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb); - if (ret) { + dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, + initrd_len, cmdline, 0); + if (!dtb) { pr_err("Preparing for new dtb failed\n"); goto out_err; } + /* trim it */ + fdt_pack(dtb); dtb_len = fdt_totalsize(dtb); kbuf.buffer = dtb; kbuf.bufsz = dtb_len; @@ -343,6 +177,6 @@ int load_other_segments(struct kimage *image, out_err: image->nr_segments = orig_segments; - vfree(dtb); + kvfree(dtb); return ret; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index fe21e0f06492..b5ec010c481f 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -40,14 +40,16 @@ void *module_alloc(unsigned long size) NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - !IS_ENABLED(CONFIG_KASAN_GENERIC) && - !IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + (IS_ENABLED(CONFIG_KASAN_VMALLOC) || + (!IS_ENABLED(CONFIG_KASAN_GENERIC) && + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) /* - * KASAN can only deal with module allocations being served - * from the reserved module region, since the remainder of - * the vmalloc region is already backed by zero shadow pages, - * and punching holes into it is non-trivial. Since the module - * region is not randomized when KASAN is enabled, it is even + * KASAN without KASAN_VMALLOC can only deal with module + * allocations being served from the reserved module region, + * since the remainder of the vmalloc region is already + * backed by zero shadow pages, and punching holes into it + * is non-trivial. Since the module region is not randomized + * when KASAN is enabled without KASAN_VMALLOC, it is even * less likely that the module region gets exhausted, so we * can simply omit this fallback in that case. */ diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index b3c70a612c7a..125a10e413e9 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -26,6 +26,12 @@ u64 gcr_kernel_excl __ro_after_init; static bool report_fault_once = true; +#ifdef CONFIG_KASAN_HW_TAGS +/* Whether the MTE asynchronous mode is enabled. */ +DEFINE_STATIC_KEY_FALSE(mte_async_mode); +EXPORT_SYMBOL_GPL(mte_async_mode); +#endif + static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) { pte_t old_pte = READ_ONCE(*ptep); @@ -107,13 +113,45 @@ void mte_init_tags(u64 max_tag) write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1); } -void mte_enable_kernel(void) +static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) { /* Enable MTE Sync Mode for EL1. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC); + sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf); isb(); + + pr_info_once("MTE: enabled in %s mode at EL1\n", mode); +} + +#ifdef CONFIG_KASAN_HW_TAGS +void mte_enable_kernel_sync(void) +{ + /* + * Make sure we enter this function when no PE has set + * async mode previously. + */ + WARN_ONCE(system_uses_mte_async_mode(), + "MTE async mode enabled system wide!"); + + __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); } +void mte_enable_kernel_async(void) +{ + __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC); + + /* + * MTE async mode is set system wide by the first PE that + * executes this function. + * + * Note: If in future KASAN acquires a runtime switching + * mode in between sync and async, this strategy needs + * to be reviewed. + */ + if (!system_uses_mte_async_mode()) + static_branch_enable(&mte_async_mode); +} +#endif + void mte_set_report_once(bool state) { WRITE_ONCE(report_fault_once, state); @@ -124,25 +162,28 @@ bool mte_report_once(void) return READ_ONCE(report_fault_once); } -static void update_sctlr_el1_tcf0(u64 tcf0) +#ifdef CONFIG_KASAN_HW_TAGS +void mte_check_tfsr_el1(void) { - /* ISB required for the kernel uaccess routines */ - sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); - isb(); -} + u64 tfsr_el1; -static void set_sctlr_el1_tcf0(u64 tcf0) -{ - /* - * mte_thread_switch() checks current->thread.sctlr_tcf0 as an - * optimisation. Disable preemption so that it does not see - * the variable update before the SCTLR_EL1.TCF0 one. - */ - preempt_disable(); - current->thread.sctlr_tcf0 = tcf0; - update_sctlr_el1_tcf0(tcf0); - preempt_enable(); + if (!system_supports_mte()) + return; + + tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); + + if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { + /* + * Note: isb() is not required after this direct write + * because there is no indirect read subsequent to it + * (per ARM DDI 0487F.c table D13-1). + */ + write_sysreg_s(0, SYS_TFSR_EL1); + + kasan_report_async(); + } } +#endif static void update_gcr_el1_excl(u64 excl) { @@ -166,7 +207,7 @@ static void set_gcr_el1_excl(u64 excl) */ } -void flush_mte_state(void) +void mte_thread_init_user(void) { if (!system_supports_mte()) return; @@ -176,19 +217,39 @@ void flush_mte_state(void) write_sysreg_s(0, SYS_TFSRE0_EL1); clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ - set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); + set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) | + SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } void mte_thread_switch(struct task_struct *next) { + /* + * Check if an async tag exception occurred at EL1. + * + * Note: On the context switch path we rely on the dsb() present + * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 + * are synchronized before this point. + */ + isb(); + mte_check_tfsr_el1(); +} + +void mte_suspend_enter(void) +{ if (!system_supports_mte()) return; - /* avoid expensive SCTLR_EL1 accesses if no change */ - if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) - update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); + /* + * The barriers are required to guarantee that the indirect writes + * to TFSR_EL1 are synchronized before we report the state. + */ + dsb(nsh); + isb(); + + /* Report SYS_TFSR_EL1 before suspend entry */ + mte_check_tfsr_el1(); } void mte_suspend_exit(void) @@ -201,7 +262,7 @@ void mte_suspend_exit(void) long set_mte_ctrl(struct task_struct *task, unsigned long arg) { - u64 tcf0; + u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK; u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & SYS_GCR_EL1_EXCL_MASK; @@ -210,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) switch (arg & PR_MTE_TCF_MASK) { case PR_MTE_TCF_NONE: - tcf0 = SCTLR_EL1_TCF0_NONE; + sctlr |= SCTLR_EL1_TCF0_NONE; break; case PR_MTE_TCF_SYNC: - tcf0 = SCTLR_EL1_TCF0_SYNC; + sctlr |= SCTLR_EL1_TCF0_SYNC; break; case PR_MTE_TCF_ASYNC: - tcf0 = SCTLR_EL1_TCF0_ASYNC; + sctlr |= SCTLR_EL1_TCF0_ASYNC; break; default: return -EINVAL; } if (task != current) { - task->thread.sctlr_tcf0 = tcf0; + task->thread.sctlr_user = sctlr; task->thread.gcr_user_excl = gcr_excl; } else { - set_sctlr_el1_tcf0(tcf0); + set_task_sctlr_el1(sctlr); set_gcr_el1_excl(gcr_excl); } @@ -243,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task) ret = incl << PR_MTE_TAG_SHIFT; - switch (task->thread.sctlr_tcf0) { + switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) { case SCTLR_EL1_TCF0_NONE: ret |= PR_MTE_TCF_NONE; break; diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index c07d7a034941..75fed4460407 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -18,6 +18,7 @@ #include <linux/reboot.h> #include <linux/slab.h> #include <linux/types.h> +#include <linux/static_call.h> #include <asm/paravirt.h> #include <asm/pvclock-abi.h> @@ -26,8 +27,12 @@ struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; -struct paravirt_patch_template pv_ops; -EXPORT_SYMBOL_GPL(pv_ops); +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); struct pv_time_stolen_time_region { struct pvclock_vcpu_stolen_time *kaddr; @@ -45,7 +50,7 @@ static int __init parse_no_stealacc(char *arg) early_param("no-steal-acc", parse_no_stealacc); /* return stolen time in ns by asking the hypervisor */ -static u64 pv_steal_clock(int cpu) +static u64 para_steal_clock(int cpu) { struct pv_time_stolen_time_region *reg; @@ -150,7 +155,7 @@ int __init pv_time_init(void) if (ret) return ret; - pv_ops.time.steal_clock = pv_steal_clock; + static_call_update(pv_steal_clock, para_steal_clock); static_key_slow_inc(¶virt_steal_enabled); if (steal_acc) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 4658fcf88c2b..f594957e29bd 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -470,9 +470,8 @@ static inline u64 armv8pmu_read_evcntr(int idx) static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) { int idx = event->hw.idx; - u64 val = 0; + u64 val = armv8pmu_read_evcntr(idx); - val = armv8pmu_read_evcntr(idx); if (armv8pmu_event_is_chained(event)) val = (val << 32) | armv8pmu_read_evcntr(idx - 1); return val; @@ -520,7 +519,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - u64 value = 0; + u64 value; if (idx == ARMV8_IDX_CYCLE_COUNTER) value = read_sysreg(pmccntr_el0); diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index adb955fd9bdd..60901ab0a7fe 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -43,6 +43,69 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) get_random_bytes(&keys->apdb, sizeof(keys->apdb)); if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); + ptrauth_keys_install_user(keys); return 0; } + +static u64 arg_to_enxx_mask(unsigned long arg) +{ + u64 sctlr_enxx_mask = 0; + + WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK); + if (arg & PR_PAC_APIAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIA; + if (arg & PR_PAC_APIBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIB; + if (arg & PR_PAC_APDAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDA; + if (arg & PR_PAC_APDBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDB; + return sctlr_enxx_mask; +} + +int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, + unsigned long enabled) +{ + u64 sctlr = tsk->thread.sctlr_user; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys)) + return -EINVAL; + + sctlr &= ~arg_to_enxx_mask(keys); + sctlr |= arg_to_enxx_mask(enabled); + if (tsk == current) + set_task_sctlr_el1(sctlr); + else + tsk->thread.sctlr_user = sctlr; + + return 0; +} + +int ptrauth_get_enabled_keys(struct task_struct *tsk) +{ + int retval = 0; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA) + retval |= PR_PAC_APIAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB) + retval |= PR_PAC_APIBKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA) + retval |= PR_PAC_APDAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB) + retval |= PR_PAC_APDBKEY; + + return retval; +} diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 66aac2881ba8..d607c9912025 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -264,13 +264,14 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) * normal page fault. */ instruction_pointer_set(regs, (unsigned long) cur->addr); - if (!instruction_pointer(regs)) - BUG(); + BUG_ON(!instruction_pointer(regs)); - if (kcb->kprobe_status == KPROBE_REENTER) + if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); - else + } else { + kprobes_restore_local_irqflag(kcb, regs); reset_current_kprobe(); + } break; case KPROBE_HIT_ACTIVE: diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 325c83b1a24d..b4bb67f17a2c 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -57,6 +57,8 @@ #include <asm/processor.h> #include <asm/pointer_auth.h> #include <asm/stacktrace.h> +#include <asm/switch_to.h> +#include <asm/system_misc.h> #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> @@ -84,7 +86,7 @@ static void noinstr __cpu_do_idle_irqprio(void) unsigned long daif_bits; daif_bits = read_sysreg(daif); - write_sysreg(daif_bits | PSR_I_BIT, daif); + write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif); /* * Unmask PMR before going idle to make sure interrupts can @@ -292,13 +294,10 @@ void __show_regs(struct pt_regs *regs) i = top_reg; while (i >= 0) { - printk("x%-2d: %016llx ", i, regs->regs[i]); - i--; + printk("x%-2d: %016llx", i, regs->regs[i]); - if (i % 2 == 0) { - pr_cont("x%-2d: %016llx ", i, regs->regs[i]); - i--; - } + while (i-- % 3) + pr_cont(" x%-2d: %016llx", i, regs->regs[i]); pr_cont("\n"); } @@ -339,7 +338,6 @@ void flush_thread(void) tls_thread_flush(); flush_ptrace_hw_breakpoint(current); flush_tagged_addr_state(); - flush_mte_state(); } void release_thread(struct task_struct *dead_task) @@ -529,6 +527,31 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void update_sctlr_el1(u64 sctlr) +{ + /* + * EnIA must not be cleared while in the kernel as this is necessary for + * in-kernel PAC. It will be cleared on kernel exit if needed. + */ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +void set_task_sctlr_el1(u64 sctlr) +{ + /* + * __switch_to() checks current->thread.sctlr as an + * optimisation. Disable preemption so that it does not see + * the variable update before the SCTLR_EL1 one. + */ + preempt_disable(); + current->thread.sctlr_user = sctlr; + update_sctlr_el1(sctlr); + preempt_enable(); +} + /* * Thread switching. */ @@ -544,6 +567,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); + ptrauth_thread_switch_user(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -559,6 +583,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, * registers. */ mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); /* the actual thread switch */ last = cpu_switch_to(prev, next); @@ -608,7 +635,8 @@ void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; - ptrauth_thread_init_user(current); + ptrauth_thread_init_user(); + mte_thread_init_user(); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 62d2bda7adb8..ab7f4c476104 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu) static int cpu_psci_cpu_boot(unsigned int cpu) { - int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry)); + phys_addr_t pa_secondary_entry = __pa_symbol(function_nocfi(secondary_entry)); + int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry); if (err) pr_err("failed to boot CPU%d (%d)\n", cpu, err); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 170f42fd6101..eb2f73939b7b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -909,6 +909,38 @@ static int pac_mask_get(struct task_struct *target, return membuf_write(&to, &uregs, sizeof(uregs)); } +static int pac_enabled_keys_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); +} + +static int pac_enabled_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, + sizeof(long)); + if (ret) + return ret; + + return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, + enabled_keys); +} + #ifdef CONFIG_CHECKPOINT_RESTORE static __uint128_t pac_key_to_user(const struct ptrauth_key *key) { @@ -1074,6 +1106,7 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, + REGSET_PAC_ENABLED_KEYS, #ifdef CONFIG_CHECKPOINT_RESTORE REGSET_PACA_KEYS, REGSET_PACG_KEYS, @@ -1160,6 +1193,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = pac_mask_get, /* this cannot be set dynamically */ }, + [REGSET_PAC_ENABLED_KEYS] = { + .core_note_type = NT_ARM_PAC_ENABLED_KEYS, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = pac_enabled_keys_get, + .set = pac_enabled_keys_set, + }, #ifdef CONFIG_CHECKPOINT_RESTORE [REGSET_PACA_KEYS] = { .core_note_type = NT_ARM_PACA_KEYS, diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 5bfd9b87f85d..4ea9392f86e0 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume) */ bl cpu_do_resume -#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) mov x0, sp bl kasan_unpoison_task_stack_below #endif diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 357590beaabb..dcd7041b2b07 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -188,6 +188,7 @@ static void init_gic_priority_masking(void) cpuflags = read_sysreg(daif); WARN_ON(!(cpuflags & PSR_I_BIT)); + WARN_ON(!(cpuflags & PSR_F_BIT)); gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); } diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 056772c26098..c45a83512805 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -66,6 +66,7 @@ static int smp_spin_table_cpu_init(unsigned int cpu) static int smp_spin_table_cpu_prepare(unsigned int cpu) { __le64 __iomem *release_addr; + phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen)); if (!cpu_release_addr[cpu]) return -ENODEV; @@ -88,7 +89,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * boot-loader's endianness before jumping. This is mandated by * the boot protocol. */ - writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); + writeq_relaxed(pa_holding_pen, release_addr); __flush_dcache_area((__force void *)release_addr, sizeof(*release_addr)); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ad20981dfda4..de07147a7926 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -32,6 +32,30 @@ * add sp, sp, #0x10 */ + +void start_backtrace(struct stackframe *frame, unsigned long fp, + unsigned long pc) +{ + frame->fp = fp; + frame->pc = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame->graph = 0; +#endif + + /* + * Prime the first unwind. + * + * In unwind_frame() we'll check that the FP points to a valid stack, + * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be + * treated as a transition to whichever stack that happens to be. The + * prev_fp value won't be used, but we set it to 0 such that it is + * definitely not an accessible stack address. + */ + bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); + frame->prev_fp = 0; + frame->prev_type = STACK_TYPE_UNKNOWN; +} + /* * Unwind from one frame record (A) to the next frame record (B). * @@ -44,10 +68,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long fp = frame->fp; struct stack_info info; - /* Terminal record; nothing to unwind */ - if (!fp) - return -ENOENT; - if (fp & 0xf) return -EINVAL; @@ -108,6 +128,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ptrauth_strip_insn_pac(frame->pc); + /* + * This is a terminal record, so we have finished unwinding. + */ + if (!frame->fp && !frame->pc) + return -ENOENT; + return 0; } NOKPROBE_SYMBOL(unwind_frame); @@ -194,8 +220,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) #ifdef CONFIG_STACKTRACE -void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, - struct task_struct *task, struct pt_regs *regs) +noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) { struct stackframe frame; @@ -203,8 +230,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, start_backtrace(&frame, regs->regs[29], regs->pc); else if (task == current) start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)arch_stack_walk); + (unsigned long)__builtin_frame_address(1), + (unsigned long)__builtin_return_address(0)); else start_backtrace(&frame, thread_saved_fp(task), thread_saved_pc(task)); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index d7564891ffe1..e3f72df9509d 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -74,8 +74,9 @@ void notrace __cpu_suspend_exit(void) */ spectre_v4_enable_mitigation(NULL); - /* Restore additional MTE-specific configuration */ + /* Restore additional feature-specific configuration */ mte_suspend_exit(); + ptrauth_suspend_exit(); } /* @@ -91,6 +92,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) unsigned long flags; struct sleep_stack_data state; + /* Report any MTE async fault before going to suspend */ + mte_suspend_enter(); + /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index b9cf12b271d7..263d6c1a525f 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -5,6 +5,7 @@ #include <linux/errno.h> #include <linux/nospec.h> #include <linux/ptrace.h> +#include <linux/randomize_kstack.h> #include <linux/syscalls.h> #include <asm/daifflags.h> @@ -43,6 +44,8 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, { long ret; + add_random_kstack_offset(); + if (scno < sc_nr) { syscall_fn_t syscall_fn; syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; @@ -55,6 +58,19 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, ret = lower_32_bits(ret); regs->regs[0] = ret; + + /* + * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), + * but not enough for arm64 stack utilization comfort. To keep + * reasonable stack head room, reduce the maximum offset to 9 bits. + * + * The actual entropy will be further reduced by the compiler when + * applying stack alignment constraints: the AAPCS mandates a + * 16-byte (i.e. 4-bit) aligned SP at function boundaries. + * + * The resulting 5 bits of entropy is seen in SP[8:4]. + */ + choose_random_kstack_offset(get_random_int() & 0x1FF); } static inline bool has_syscall_work(unsigned long flags) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index e08a4126453a..4dd14a6620c1 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -199,12 +199,47 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) return 0; } -static DEFINE_STATIC_KEY_FALSE(amu_fie_key); -#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) +static void amu_scale_freq_tick(void) +{ + u64 prev_core_cnt, prev_const_cnt; + u64 core_cnt, const_cnt, scale; + + prev_const_cnt = this_cpu_read(arch_const_cycles_prev); + prev_core_cnt = this_cpu_read(arch_core_cycles_prev); + + update_freq_counters_refs(); + + const_cnt = this_cpu_read(arch_const_cycles_prev); + core_cnt = this_cpu_read(arch_core_cycles_prev); + + if (unlikely(core_cnt <= prev_core_cnt || + const_cnt <= prev_const_cnt)) + return; + + /* + * /\core arch_max_freq_scale + * scale = ------- * -------------------- + * /\const SCHED_CAPACITY_SCALE + * + * See validate_cpu_freq_invariance_counters() for details on + * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. + */ + scale = core_cnt - prev_core_cnt; + scale *= this_cpu_read(arch_max_freq_scale); + scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, + const_cnt - prev_const_cnt); + + scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); + this_cpu_write(arch_freq_scale, (unsigned long)scale); +} + +static struct scale_freq_data amu_sfd = { + .source = SCALE_FREQ_SOURCE_ARCH, + .set_freq_scale = amu_scale_freq_tick, +}; static void amu_fie_setup(const struct cpumask *cpus) { - bool invariant; int cpu; /* We are already set since the last insmod of cpufreq driver */ @@ -221,25 +256,10 @@ static void amu_fie_setup(const struct cpumask *cpus) cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); - invariant = topology_scale_freq_invariant(); - - /* We aren't fully invariant yet */ - if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask)) - return; - - static_branch_enable(&amu_fie_key); + topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus); pr_debug("CPUs[%*pbl]: counters will be used for FIE.", cpumask_pr_args(cpus)); - - /* - * Task scheduler behavior depends on frequency invariance support, - * either cpufreq or counter driven. If the support status changes as - * a result of counter initialisation and use, retrigger the build of - * scheduling domains to ensure the information is propagated properly. - */ - if (!invariant) - rebuild_sched_domains_energy(); } static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val, @@ -256,8 +276,8 @@ static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val, * initialized AMU support and enabled invariance. The AMU counters will * keep on working just fine in the absence of the cpufreq driver, and * for the CPUs for which there are no counters available, the last set - * value of freq_scale will remain valid as that is the frequency those - * CPUs are running at. + * value of arch_freq_scale will remain valid as that is the frequency + * those CPUs are running at. */ return 0; @@ -283,53 +303,6 @@ static int __init init_amu_fie(void) } core_initcall(init_amu_fie); -bool arch_freq_counters_available(const struct cpumask *cpus) -{ - return amu_freq_invariant() && - cpumask_subset(cpus, amu_fie_cpus); -} - -void topology_scale_freq_tick(void) -{ - u64 prev_core_cnt, prev_const_cnt; - u64 core_cnt, const_cnt, scale; - int cpu = smp_processor_id(); - - if (!amu_freq_invariant()) - return; - - if (!cpumask_test_cpu(cpu, amu_fie_cpus)) - return; - - prev_const_cnt = this_cpu_read(arch_const_cycles_prev); - prev_core_cnt = this_cpu_read(arch_core_cycles_prev); - - update_freq_counters_refs(); - - const_cnt = this_cpu_read(arch_const_cycles_prev); - core_cnt = this_cpu_read(arch_core_cycles_prev); - - if (unlikely(core_cnt <= prev_core_cnt || - const_cnt <= prev_const_cnt)) - return; - - /* - * /\core arch_max_freq_scale - * scale = ------- * -------------------- - * /\const SCHED_CAPACITY_SCALE - * - * See validate_cpu_freq_invariance_counters() for details on - * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. - */ - scale = core_cnt - prev_core_cnt; - scale *= this_cpu_read(arch_max_freq_scale); - scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, - const_cnt - prev_const_cnt); - - scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); - this_cpu_write(freq_scale, (unsigned long)scale); -} - #ifdef CONFIG_ACPI_CPPC_LIB #include <acpi/cppc_acpi.h> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index cee5d04ea9ad..a61fc4f989b3 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -86,7 +86,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } -static int __vdso_init(enum vdso_abi abi) +static int __init __vdso_init(enum vdso_abi abi) { int i; struct page **vdso_pagelist; @@ -271,6 +271,14 @@ enum aarch32_map { static struct page *aarch32_vectors_page __ro_after_init; static struct page *aarch32_sig_page __ro_after_init; +static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.sigpage = (void *)new_vma->vm_start; + + return 0; +} + static struct vm_special_mapping aarch32_vdso_maps[] = { [AA32_MAP_VECTORS] = { .name = "[vectors]", /* ABI */ @@ -279,6 +287,7 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { [AA32_MAP_SIGPAGE] = { .name = "[sigpage]", /* ABI */ .pages = &aarch32_sig_page, + .mremap = aarch32_sigpage_mremap, }, [AA32_MAP_VVAR] = { .name = "[vvar]", @@ -299,34 +308,35 @@ static int aarch32_alloc_kuser_vdso_page(void) if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) return 0; - vdso_page = get_zeroed_page(GFP_ATOMIC); + vdso_page = get_zeroed_page(GFP_KERNEL); if (!vdso_page) return -ENOMEM; memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); aarch32_vectors_page = virt_to_page(vdso_page); - flush_dcache_page(aarch32_vectors_page); return 0; } +#define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1 static int aarch32_alloc_sigpage(void) { extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; - unsigned long sigpage; + __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD); + void *sigpage; - sigpage = get_zeroed_page(GFP_ATOMIC); + sigpage = (void *)__get_free_page(GFP_KERNEL); if (!sigpage) return -ENOMEM; - memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); + memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison)); + memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz); aarch32_sig_page = virt_to_page(sigpage); - flush_dcache_page(aarch32_sig_page); return 0; } -static int __aarch32_alloc_vdso_pages(void) +static int __init __aarch32_alloc_vdso_pages(void) { if (!IS_ENABLED(CONFIG_COMPAT_VDSO)) diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 61dbb4c838ef..a5e61e09ea92 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -31,6 +31,13 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.GNU-stack .note.gnu.property) + } .note : { *(.note.*) } :text :note . = ALIGN(16); @@ -48,7 +55,6 @@ SECTIONS PROVIDE(end = .); /DISCARD/ : { - *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.eh_frame .eh_frame_hdr) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 789ad420f16b..3dba0c4f8f42 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -10,15 +10,7 @@ include $(srctree)/lib/vdso/Makefile # Same as cc-*option, but using CC_COMPAT instead of CC ifeq ($(CONFIG_CC_IS_CLANG), y) -COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) -COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) - CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) -CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT)) -CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments -ifneq ($(COMPAT_GCC_TOOLCHAIN),) -CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) -endif CC_COMPAT ?= $(CC) CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7eea7888bb02..709d2c433c5e 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -5,24 +5,7 @@ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ -#define RO_EXCEPTION_TABLE_ALIGN 8 -#define RUNTIME_DISCARD_EXIT - -#include <asm-generic/vmlinux.lds.h> -#include <asm/cache.h> #include <asm/hyp_image.h> -#include <asm/kernel-pgtable.h> -#include <asm/memory.h> -#include <asm/page.h> - -#include "image.h" - -OUTPUT_ARCH(aarch64) -ENTRY(_text) - -jiffies = jiffies_64; - - #ifdef CONFIG_KVM #define HYPERVISOR_EXTABLE \ . = ALIGN(SZ_8); \ @@ -32,9 +15,11 @@ jiffies = jiffies_64; #define HYPERVISOR_DATA_SECTIONS \ HYP_SECTION_NAME(.rodata) : { \ + . = ALIGN(PAGE_SIZE); \ __hyp_rodata_start = .; \ *(HYP_SECTION_NAME(.data..ro_after_init)) \ *(HYP_SECTION_NAME(.rodata)) \ + . = ALIGN(PAGE_SIZE); \ __hyp_rodata_end = .; \ } @@ -51,29 +36,52 @@ jiffies = jiffies_64; __hyp_reloc_end = .; \ } +#define BSS_FIRST_SECTIONS \ + __hyp_bss_start = .; \ + *(HYP_SECTION_NAME(.bss)) \ + . = ALIGN(PAGE_SIZE); \ + __hyp_bss_end = .; + +/* + * We require that __hyp_bss_start and __bss_start are aligned, and enforce it + * with an assertion. But the BSS_SECTION macro places an empty .sbss section + * between them, which can in some cases cause the linker to misalign them. To + * work around the issue, force a page alignment for __bss_start. + */ +#define SBSS_ALIGN PAGE_SIZE #else /* CONFIG_KVM */ #define HYPERVISOR_EXTABLE #define HYPERVISOR_DATA_SECTIONS #define HYPERVISOR_PERCPU_SECTION #define HYPERVISOR_RELOC_SECTION +#define SBSS_ALIGN 0 #endif +#define RO_EXCEPTION_TABLE_ALIGN 8 +#define RUNTIME_DISCARD_EXIT + +#include <asm-generic/vmlinux.lds.h> +#include <asm/cache.h> +#include <asm/kernel-pgtable.h> +#include <asm/memory.h> +#include <asm/page.h> + +#include "image.h" + +OUTPUT_ARCH(aarch64) +ENTRY(_text) + +jiffies = jiffies_64; + #define HYPERVISOR_TEXT \ - /* \ - * Align to 4 KB so that \ - * a) the HYP vector table is at its minimum \ - * alignment of 2048 bytes \ - * b) the HYP init code will not cross a page \ - * boundary if its size does not exceed \ - * 4 KB (see related ASSERT() below) \ - */ \ - . = ALIGN(SZ_4K); \ + . = ALIGN(PAGE_SIZE); \ __hyp_idmap_text_start = .; \ *(.hyp.idmap.text) \ __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ HYPERVISOR_EXTABLE \ + . = ALIGN(PAGE_SIZE); \ __hyp_text_end = .; #define IDMAP_TEXT \ @@ -276,7 +284,7 @@ SECTIONS __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; - BSS_SECTION(0, 0, 0) + BSS_SECTION(SBSS_ALIGN, 0, 0) . = ALIGN(PAGE_SIZE); init_pg_dir = .; @@ -309,11 +317,12 @@ SECTIONS #include "image-vars.h" /* - * The HYP init code and ID map text can't be longer than a page each, - * and should not cross a page boundary. + * The HYP init code and ID map text can't be longer than a page each. The + * former is page-aligned, but the latter may not be with 16K or 64K pages, so + * it should also not cross a page boundary. */ -ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, - "HYP init code too big or misaligned") +ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE, + "HYP init code too big") ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") #ifdef CONFIG_HIBERNATION @@ -324,6 +333,9 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, "Entry trampoline text too big") #endif +#ifdef CONFIG_KVM +ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7f06ba76698d..1cb39c0803a4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -206,8 +206,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: + case KVM_CAP_PTP_KVM: r = 1; break; + case KVM_CAP_SET_GUEST_DEBUG2: + return KVM_GUESTDBG_VALID_MASK; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; break; @@ -416,10 +419,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); + kvm_arch_vcpu_load_debug_state_flags(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) kvm_vcpu_put_sysregs_vhe(vcpu); @@ -580,6 +585,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) vcpu->arch.has_run_once = true; + kvm_arm_vcpu_init_debug(vcpu); + if (likely(irqchip_in_kernel(kvm))) { /* * Map the VGIC hardware resources before running a vcpu the @@ -1268,7 +1275,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { kvm_flush_remote_tlbs(kvm); } @@ -1350,16 +1357,9 @@ static unsigned long nvhe_percpu_order(void) /* A lookup table holding the hypervisor VA for each vector slot */ static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; -static int __kvm_vector_slot2idx(enum arm64_hyp_spectre_vector slot) -{ - return slot - (slot != HYP_VECTOR_DIRECT); -} - static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) { - int idx = __kvm_vector_slot2idx(slot); - - hyp_spectre_vector_selector[slot] = base + (idx * SZ_2K); + hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); } static int kvm_init_vector_slots(void) @@ -1388,22 +1388,18 @@ static int kvm_init_vector_slots(void) return 0; } -static void cpu_init_hyp_mode(void) +static void cpu_prepare_hyp_mode(int cpu) { - struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params); - struct arm_smccc_res res; + struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); unsigned long tcr; - /* Switch from the HYP stub to our own HYP init vector */ - __hyp_set_vectors(kvm_get_idmap_vector()); - /* * Calculate the raw per-cpu offset without a translation from the * kernel's mapping to the linear mapping, and store it in tpidr_el2 * so that we can use adr_l to access per-cpu variables in EL2. * Also drop the KASAN tag which gets in the way... */ - params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) - + params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); params->mair_el2 = read_sysreg(mair_el1); @@ -1427,14 +1423,28 @@ static void cpu_init_hyp_mode(void) tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET; params->tcr_el2 = tcr; - params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE); + params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE); params->pgd_pa = kvm_mmu_get_httbr(); + if (is_protected_kvm_enabled()) + params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; + else + params->hcr_el2 = HCR_HOST_NVHE_FLAGS; + params->vttbr = params->vtcr = 0; /* * Flush the init params from the data cache because the struct will * be read while the MMU is off. */ kvm_flush_dcache_to_poc(params, sizeof(*params)); +} + +static void hyp_install_host_vector(void) +{ + struct kvm_nvhe_init_params *params; + struct arm_smccc_res res; + + /* Switch from the HYP stub to our own HYP init vector */ + __hyp_set_vectors(kvm_get_idmap_vector()); /* * Call initialization code, and switch to the full blown HYP code. @@ -1443,8 +1453,14 @@ static void cpu_init_hyp_mode(void) * cpus_have_const_cap() wrapper. */ BUG_ON(!system_capabilities_finalized()); + params = this_cpu_ptr_nvhe_sym(kvm_init_params); arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); WARN_ON(res.a0 != SMCCC_RET_SUCCESS); +} + +static void cpu_init_hyp_mode(void) +{ + hyp_install_host_vector(); /* * Disabling SSBD on a non-VHE system requires us to enable SSBS @@ -1487,7 +1503,10 @@ static void cpu_set_hyp_vector(void) struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); void *vector = hyp_spectre_vector_selector[data->slot]; - *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + if (!is_protected_kvm_enabled()) + *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + else + kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); } static void cpu_hyp_reinit(void) @@ -1495,13 +1514,14 @@ static void cpu_hyp_reinit(void) kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); cpu_hyp_reset(); - cpu_set_hyp_vector(); if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else cpu_init_hyp_mode(); + cpu_set_hyp_vector(); + kvm_arm_init_debug(); if (vgic_present) @@ -1697,18 +1717,62 @@ static void teardown_hyp_mode(void) } } +static int do_pkvm_init(u32 hyp_va_bits) +{ + void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); + int ret; + + preempt_disable(); + hyp_install_host_vector(); + ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, + num_possible_cpus(), kern_hyp_va(per_cpu_base), + hyp_va_bits); + preempt_enable(); + + return ret; +} + +static int kvm_hyp_init_protection(u32 hyp_va_bits) +{ + void *addr = phys_to_virt(hyp_mem_base); + int ret; + + kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); + if (ret) + return ret; + + ret = do_pkvm_init(hyp_va_bits); + if (ret) + return ret; + + free_hyp_pgds(); + + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ static int init_hyp_mode(void) { + u32 hyp_va_bits; int cpu; - int err = 0; + int err = -ENOMEM; + + /* + * The protected Hyp-mode cannot be initialized if the memory pool + * allocation has failed. + */ + if (is_protected_kvm_enabled() && !hyp_mem_base) + goto out_err; /* * Allocate Hyp PGD and setup Hyp identity mapping */ - err = kvm_mmu_init(); + err = kvm_mmu_init(&hyp_va_bits); if (err) goto out_err; @@ -1769,7 +1833,19 @@ static int init_hyp_mode(void) goto out_err; } - err = create_hyp_mappings(kvm_ksym_ref(__bss_start), + /* + * .hyp.bss is guaranteed to be placed at the beginning of the .bss + * section thanks to an assertion in the linker script. Map it RW and + * the rest of .bss RO. + */ + err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), + kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); + if (err) { + kvm_err("Cannot map hyp bss section: %d\n", err); + goto out_err; + } + + err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); if (err) { kvm_err("Cannot map bss section\n"); @@ -1790,26 +1866,36 @@ static int init_hyp_mode(void) } } - /* - * Map Hyp percpu pages - */ for_each_possible_cpu(cpu) { char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; char *percpu_end = percpu_begin + nvhe_percpu_size(); + /* Map Hyp percpu pages */ err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); - if (err) { kvm_err("Cannot map hyp percpu region\n"); goto out_err; } + + /* Prepare the CPU initialization parameters */ + cpu_prepare_hyp_mode(cpu); } if (is_protected_kvm_enabled()) { init_cpu_logical_map(); - if (!init_psci_relay()) + if (!init_psci_relay()) { + err = -ENODEV; + goto out_err; + } + } + + if (is_protected_kvm_enabled()) { + err = kvm_hyp_init_protection(hyp_va_bits); + if (err) { + kvm_err("Failed to init hyp memory protection\n"); goto out_err; + } } return 0; @@ -1820,6 +1906,72 @@ out_err: return err; } +static void _kvm_host_prot_finalize(void *discard) +{ + WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)); +} + +static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end) +{ + return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end); +} + +#define pkvm_mark_hyp_section(__section) \ + pkvm_mark_hyp(__pa_symbol(__section##_start), \ + __pa_symbol(__section##_end)) + +static int finalize_hyp_mode(void) +{ + int cpu, ret; + + if (!is_protected_kvm_enabled()) + return 0; + + ret = pkvm_mark_hyp_section(__hyp_idmap_text); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_text); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_rodata); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_bss); + if (ret) + return ret; + + ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size); + if (ret) + return ret; + + for_each_possible_cpu(cpu) { + phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]); + phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order()); + + ret = pkvm_mark_hyp(start, end); + if (ret) + return ret; + + start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu)); + end = start + PAGE_SIZE; + ret = pkvm_mark_hyp(start, end); + if (ret) + return ret; + } + + /* + * Flip the static key upfront as that may no longer be possible + * once the host stage 2 is installed. + */ + static_branch_enable(&kvm_protected_mode_initialized); + on_each_cpu(_kvm_host_prot_finalize, NULL, 1); + + return 0; +} + static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); @@ -1894,11 +2046,6 @@ int kvm_arch_init(void *opaque) in_hyp_mode = is_kernel_in_hyp_mode(); - if (!in_hyp_mode && kvm_arch_requires_vhe()) { - kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); - return -ENODEV; - } - if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ @@ -1936,8 +2083,15 @@ int kvm_arch_init(void *opaque) if (err) goto out_hyp; + if (!in_hyp_mode) { + err = finalize_hyp_mode(); + if (err) { + kvm_err("Failed to finalize Hyp protection\n"); + goto out_hyp; + } + } + if (is_protected_kvm_enabled()) { - static_branch_enable(&kvm_protected_mode_initialized); kvm_info("Protected nVHE mode initialized successfully\n"); } else if (in_hyp_mode) { kvm_info("VHE mode initialized successfully\n"); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 7a7e425616b5..d5e79d7ee6e9 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -69,6 +69,65 @@ void kvm_arm_init_debug(void) } /** + * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value + * + * @vcpu: the vcpu pointer + * + * This ensures we will trap access to: + * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR) + * - Debug ROM Address (MDCR_EL2_TDRA) + * - OS related registers (MDCR_EL2_TDOSA) + * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) + * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF) + * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB) + */ +static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) +{ + /* + * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK + * to disable guest access to the profiling and trace buffers + */ + vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; + vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | + MDCR_EL2_TPMS | + MDCR_EL2_TTRF | + MDCR_EL2_TPMCR | + MDCR_EL2_TDRA | + MDCR_EL2_TDOSA); + + /* Is the VM being debugged by userspace? */ + if (vcpu->guest_debug) + /* Route all software debug exceptions to EL2 */ + vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; + + /* + * Trap debug register access when one of the following is true: + * - Userspace is using the hardware to debug the guest + * (KVM_GUESTDBG_USE_HW is set). + * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear). + */ + if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) || + !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) + vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; + + trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); +} + +/** + * kvm_arm_vcpu_init_debug - setup vcpu debug traps + * + * @vcpu: the vcpu pointer + * + * Set vcpu initial mdcr_el2 value. + */ +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + kvm_arm_setup_mdcr_el2(vcpu); + preempt_enable(); +} + +/** * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state */ @@ -83,12 +142,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) * @vcpu: the vcpu pointer * * This is called before each entry into the hypervisor to setup any - * debug related registers. Currently this just ensures we will trap - * access to: - * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR) - * - Debug ROM Address (MDCR_EL2_TDRA) - * - OS related registers (MDCR_EL2_TDOSA) - * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) + * debug related registers. * * Additionally, KVM only traps guest accesses to the debug registers if * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY @@ -100,27 +154,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) { - bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY); unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); - /* - * This also clears MDCR_EL2_E2PB_MASK to disable guest access - * to the profiling buffer. - */ - vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; - vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | - MDCR_EL2_TPMS | - MDCR_EL2_TPMCR | - MDCR_EL2_TDRA | - MDCR_EL2_TDOSA); + kvm_arm_setup_mdcr_el2(vcpu); /* Is Guest debugging in effect? */ if (vcpu->guest_debug) { - /* Route all software debug exceptions to EL2 */ - vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; - /* Save guest debug state */ save_guest_debug_regs(vcpu); @@ -174,7 +215,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; - trap_debug = true; trace_kvm_arm_set_regset("BKPTS", get_num_brps(), &vcpu->arch.debug_ptr->dbg_bcr[0], @@ -189,10 +229,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) BUG_ON(!vcpu->guest_debug && vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state); - /* Trap debug register access */ - if (trap_debug) - vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; - /* If KDE or MDE are set, perform a full save/restore cycle. */ if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; @@ -201,7 +237,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); } @@ -229,3 +264,32 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) } } } + +void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) +{ + u64 dfr0; + + /* For VHE, there is nothing to do */ + if (has_vhe()) + return; + + dfr0 = read_sysreg(id_aa64dfr0_el1); + /* + * If SPE is present on this CPU and is available at current EL, + * we may need to check if the host state needs to be saved. + */ + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) + vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE; + + /* Check if we have TRBE implemented and available at the host */ + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && + !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) + vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE; +} + +void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu) +{ + vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE | + KVM_ARM64_DEBUG_STATE_SAVE_TRBE); +} diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 3e081d556e81..5621020b28de 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -11,6 +11,7 @@ #include <linux/kvm_host.h> #include <asm/fpsimd.h> #include <asm/kvm_asm.h> +#include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/sysreg.h> @@ -42,6 +43,17 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) if (ret) goto error; + if (vcpu->arch.sve_state) { + void *sve_end; + + sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu); + + ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end, + PAGE_HYP); + if (ret) + goto error; + } + vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); error: @@ -109,11 +121,17 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - fpsimd_save_and_flush_cpu_state(); + if (guest_has_sve) { + __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); + + /* Restore the VL that was saved when bound to the CPU */ + if (!has_vhe()) + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, + SYS_ZCR_EL1); + } - if (guest_has_sve) - __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_s(SYS_ZCR_EL12); - } else if (host_has_sve) { + fpsimd_save_and_flush_cpu_state(); + } else if (has_vhe() && host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 9bbd30e62799..5cb4a1cd5603 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -299,7 +299,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) memset(vqs, 0, sizeof(vqs)); - max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + max_vq = vcpu_sve_max_vq(vcpu); for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) if (sve_vq_available(vq)) vqs[vq_word(vq)] |= vq_mask(vq); @@ -427,7 +427,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) return -ENOENT; - vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + vq = vcpu_sve_max_vq(vcpu); reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - SVE_SIG_REGS_OFFSET; @@ -437,7 +437,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) return -ENOENT; - vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + vq = vcpu_sve_max_vq(vcpu); reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - SVE_SIG_REGS_OFFSET; @@ -888,11 +888,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return -EINVAL; } -#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ - KVM_GUESTDBG_USE_SW_BP | \ - KVM_GUESTDBG_USE_HW | \ - KVM_GUESTDBG_SINGLESTEP) - /** * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging * @kvm: pointer to the KVM struct diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index cebe39f3b1b6..6f48336b1d86 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -291,3 +291,48 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) if (exception_index == ARM_EXCEPTION_EL1_SERROR) kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu)); } + +void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr, + u64 par, uintptr_t vcpu, + u64 far, u64 hpfar) { + u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr)); + u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr; + u64 mode = spsr & PSR_MODE_MASK; + + /* + * The nVHE hyp symbols are not included by kallsyms to avoid issues + * with aliasing. That means that the symbols cannot be printed with the + * "%pS" format specifier, so fall back to the vmlinux address if + * there's no better option. + */ + if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) { + kvm_err("Invalid host exception to nVHE hyp!\n"); + } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 && + (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) { + struct bug_entry *bug = find_bug(elr_in_kimg); + const char *file = NULL; + unsigned int line = 0; + + /* All hyp bugs, including warnings, are treated as fatal. */ + if (bug) + bug_get_file_line(bug, &file, &line); + + if (file) + kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line); + else + kvm_err("nVHE hyp BUG at: %016llx!\n", elr + hyp_offset); + } else { + kvm_err("nVHE hyp panic at: %016llx!\n", elr + hyp_offset); + } + + /* + * Hyp has panicked and we're going to handle that by panicking the + * kernel. The kernel offset will be revealed in the panic so we're + * also safe to reveal the hyp offset as a debugging aid for translating + * hyp VAs to vmlinux addresses. + */ + kvm_err("Hyp Offset: 0x%llx\n", hyp_offset); + + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n", + spsr, elr, esr, far, hpfar, par, vcpu); +} diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 687598e41b21..b726332eec49 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) -obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o +obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 01f114aa47b0..3c635929771a 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state) fpsimd_restore x0, 1 ret SYM_FUNC_END(__fpsimd_restore_state) + +SYM_FUNC_START(__sve_restore_state) + __sve_load 0, x1, 2 + ret +SYM_FUNC_END(__sve_restore_state) + +SYM_FUNC_START(__sve_save_state) + sve_save 0, x1, 2 + ret +SYM_FUNC_END(__sve_save_state) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 6c1f51f25eb3..e4a2f295a394 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -30,8 +30,6 @@ #include <asm/processor.h> #include <asm/thread_info.h> -extern const char __hyp_panic_string[]; - extern struct exception_table_entry __start___kvm_ex_table; extern struct exception_table_entry __stop___kvm_ex_table; @@ -160,18 +158,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) return true; } -static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) +static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault) { - u8 ec; - u64 esr; u64 hpfar, far; - esr = vcpu->arch.fault.esr_el2; - ec = ESR_ELx_EC(esr); - - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) - return true; - far = read_sysreg_el2(SYS_FAR); /* @@ -194,33 +184,59 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) hpfar = read_sysreg(hpfar_el2); } - vcpu->arch.fault.far_el2 = far; - vcpu->arch.fault.hpfar_el2 = hpfar; + fault->far_el2 = far; + fault->hpfar_el2 = hpfar; return true; } +static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u8 ec; + u64 esr; + + esr = vcpu->arch.fault.esr_el2; + ec = ESR_ELx_EC(esr); + + if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) + return true; + + return __get_fault_info(esr, &vcpu->arch.fault); +} + +static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) +{ + struct thread_struct *thread; + + thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct, + uw.fpsimd_state); + + __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr); +} + +static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) +{ + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); + __sve_restore_state(vcpu_sve_pffr(vcpu), + &vcpu->arch.ctxt.fp_regs.fpsr); + write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); +} + /* Check for an FPSIMD/SVE trap and handle as appropriate */ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - bool vhe, sve_guest, sve_host; + bool sve_guest, sve_host; u8 esr_ec; + u64 reg; if (!system_supports_fpsimd()) return false; - /* - * Currently system_supports_sve() currently implies has_vhe(), - * so the check is redundant. However, has_vhe() can be determined - * statically and helps the compiler remove dead code. - */ - if (has_vhe() && system_supports_sve()) { + if (system_supports_sve()) { sve_guest = vcpu_has_sve(vcpu); sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - vhe = true; } else { sve_guest = false; sve_host = false; - vhe = has_vhe(); } esr_ec = kvm_vcpu_trap_get_class(vcpu); @@ -229,53 +245,38 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) return false; /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest) - if (esr_ec != ESR_ELx_EC_FP_ASIMD) - return false; + if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + return false; /* Valid trap. Switch the context: */ - - if (vhe) { - u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; - + if (has_vhe()) { + reg = CPACR_EL1_FPEN; if (sve_guest) reg |= CPACR_EL1_ZEN; - write_sysreg(reg, cpacr_el1); + sysreg_clear_set(cpacr_el1, 0, reg); } else { - write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, - cptr_el2); - } + reg = CPTR_EL2_TFP; + if (sve_guest) + reg |= CPTR_EL2_TZ; + sysreg_clear_set(cptr_el2, reg, 0); + } isb(); if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - /* - * In the SVE case, VHE is assumed: it is enforced by - * Kconfig and kvm_arch_init(). - */ - if (sve_host) { - struct thread_struct *thread = container_of( - vcpu->arch.host_fpsimd_state, - struct thread_struct, uw.fpsimd_state); - - sve_save_state(sve_pffr(thread), - &vcpu->arch.host_fpsimd_state->fpsr); - } else { + if (sve_host) + __hyp_sve_save_host(vcpu); + else __fpsimd_save_state(vcpu->arch.host_fpsimd_state); - } vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } - if (sve_guest) { - sve_load_state(vcpu_sve_pffr(vcpu), - &vcpu->arch.ctxt.fp_regs.fpsr, - sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); - write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12); - } else { + if (sve_guest) + __hyp_sve_restore_guest(vcpu); + else __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); - } /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) diff --git a/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h new file mode 100644 index 000000000000..dc61aaa56f31 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_EARLY_ALLOC_H +#define __KVM_HYP_EARLY_ALLOC_H + +#include <asm/kvm_pgtable.h> + +void hyp_early_alloc_init(void *virt, unsigned long size); +unsigned long hyp_early_alloc_nr_used_pages(void); +void *hyp_early_alloc_page(void *arg); +void *hyp_early_alloc_contig(unsigned int nr_pages); + +extern struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops; + +#endif /* __KVM_HYP_EARLY_ALLOC_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h new file mode 100644 index 000000000000..18a4494337bd --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_GFP_H +#define __KVM_HYP_GFP_H + +#include <linux/list.h> + +#include <nvhe/memory.h> +#include <nvhe/spinlock.h> + +#define HYP_NO_ORDER UINT_MAX + +struct hyp_pool { + /* + * Spinlock protecting concurrent changes to the memory pool as well as + * the struct hyp_page of the pool's pages until we have a proper atomic + * API at EL2. + */ + hyp_spinlock_t lock; + struct list_head free_area[MAX_ORDER]; + phys_addr_t range_start; + phys_addr_t range_end; + unsigned int max_order; +}; + +static inline void hyp_page_ref_inc(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + p->refcount++; + hyp_spin_unlock(&pool->lock); +} + +static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + int ret; + + hyp_spin_lock(&pool->lock); + p->refcount--; + ret = (p->refcount == 0); + hyp_spin_unlock(&pool->lock); + + return ret; +} + +static inline void hyp_set_page_refcounted(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + if (p->refcount) { + hyp_spin_unlock(&pool->lock); + BUG(); + } + p->refcount = 1; + hyp_spin_unlock(&pool->lock); +} + +/* Allocation */ +void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order); +void hyp_get_page(void *addr); +void hyp_put_page(void *addr); + +/* Used pages cannot be freed */ +int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, + unsigned int reserved_pages); +#endif /* __KVM_HYP_GFP_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h new file mode 100644 index 000000000000..42d81ec739fa --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#ifndef __KVM_NVHE_MEM_PROTECT__ +#define __KVM_NVHE_MEM_PROTECT__ +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_pgtable.h> +#include <asm/virt.h> +#include <nvhe/spinlock.h> + +struct host_kvm { + struct kvm_arch arch; + struct kvm_pgtable pgt; + struct kvm_pgtable_mm_ops mm_ops; + hyp_spinlock_t lock; +}; +extern struct host_kvm host_kvm; + +int __pkvm_prot_finalize(void); +int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end); + +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool); +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); + +static __always_inline void __load_host_stage2(void) +{ + if (static_branch_likely(&kvm_protected_mode_initialized)) + __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); + else + write_sysreg(0, vttbr_el2); +} +#endif /* __KVM_NVHE_MEM_PROTECT__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h new file mode 100644 index 000000000000..fd78bde939ee --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_MEMORY_H +#define __KVM_HYP_MEMORY_H + +#include <asm/kvm_mmu.h> +#include <asm/page.h> + +#include <linux/types.h> + +struct hyp_pool; +struct hyp_page { + unsigned int refcount; + unsigned int order; + struct hyp_pool *pool; + struct list_head node; +}; + +extern u64 __hyp_vmemmap; +#define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap) + +#define __hyp_va(phys) ((void *)((phys_addr_t)(phys) - hyp_physvirt_offset)) + +static inline void *hyp_phys_to_virt(phys_addr_t phys) +{ + return __hyp_va(phys); +} + +static inline phys_addr_t hyp_virt_to_phys(void *addr) +{ + return __hyp_pa(addr); +} + +#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) +#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT)) +#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)]) +#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt)) +#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt)) + +#define hyp_page_to_pfn(page) ((struct hyp_page *)(page) - hyp_vmemmap) +#define hyp_page_to_phys(page) hyp_pfn_to_phys((hyp_page_to_pfn(page))) +#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page)) +#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool) + +static inline int hyp_page_count(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + return p->refcount; +} + +#endif /* __KVM_HYP_MEMORY_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h new file mode 100644 index 000000000000..0095f6289742 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_MM_H +#define __KVM_HYP_MM_H + +#include <asm/kvm_pgtable.h> +#include <asm/spectre.h> +#include <linux/memblock.h> +#include <linux/types.h> + +#include <nvhe/memory.h> +#include <nvhe/spinlock.h> + +#define HYP_MEMBLOCK_REGIONS 128 +extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; +extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); +extern struct kvm_pgtable pkvm_pgtable; +extern hyp_spinlock_t pkvm_pgd_lock; +extern struct hyp_pool hpool; +extern u64 __io_map_base; + +int hyp_create_idmap(u32 hyp_va_bits); +int hyp_map_vectors(void); +int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back); +int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot); +int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot); +int __pkvm_create_mappings(unsigned long start, unsigned long size, + unsigned long phys, enum kvm_pgtable_prot prot); +unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, + enum kvm_pgtable_prot prot); + +static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, + unsigned long *start, unsigned long *end) +{ + unsigned long nr_pages = size >> PAGE_SHIFT; + struct hyp_page *p = hyp_phys_to_page(phys); + + *start = (unsigned long)p; + *end = *start + nr_pages * sizeof(struct hyp_page); + *start = ALIGN_DOWN(*start, PAGE_SIZE); + *end = ALIGN(*end, PAGE_SIZE); +} + +static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) +{ + unsigned long total = 0, i; + + /* Provision the worst case scenario */ + for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { + nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); + total += nr_pages; + } + + return total; +} + +static inline unsigned long __hyp_pgtable_total_pages(void) +{ + unsigned long res = 0, i; + + /* Cover all of memory with page-granularity */ + for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { + struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; + res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); + } + + return res; +} + +static inline unsigned long hyp_s1_pgtable_pages(void) +{ + unsigned long res; + + res = __hyp_pgtable_total_pages(); + + /* Allow 1 GiB for private mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +static inline unsigned long host_s2_mem_pgtable_pages(void) +{ + /* + * Include an extra 16 pages to safely upper-bound the worst case of + * concatenated pgds. + */ + return __hyp_pgtable_total_pages() + 16; +} + +static inline unsigned long host_s2_dev_pgtable_pages(void) +{ + /* Allow 1 GiB for MMIO mappings */ + return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); +} + +#endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h new file mode 100644 index 000000000000..76b537f8d1c6 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A stand-alone ticket spinlock implementation for use by the non-VHE + * KVM hypervisor code running at EL2. + * + * Copyright (C) 2020 Google LLC + * Author: Will Deacon <will@kernel.org> + * + * Heavily based on the implementation removed by c11090474d70 which was: + * Copyright (C) 2012 ARM Ltd. + */ + +#ifndef __ARM64_KVM_NVHE_SPINLOCK_H__ +#define __ARM64_KVM_NVHE_SPINLOCK_H__ + +#include <asm/alternative.h> +#include <asm/lse.h> + +typedef union hyp_spinlock { + u32 __val; + struct { +#ifdef __AARCH64EB__ + u16 next, owner; +#else + u16 owner, next; +#endif + }; +} hyp_spinlock_t; + +#define hyp_spin_lock_init(l) \ +do { \ + *(l) = (hyp_spinlock_t){ .__val = 0 }; \ +} while (0) + +static inline void hyp_spin_lock(hyp_spinlock_t *lock) +{ + u32 tmp; + hyp_spinlock_t lockval, newval; + + asm volatile( + /* Atomically increment the next ticket. */ + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ +" prfm pstl1strm, %3\n" +"1: ldaxr %w0, %3\n" +" add %w1, %w0, #(1 << 16)\n" +" stxr %w2, %w1, %3\n" +" cbnz %w2, 1b\n", + /* LSE atomics */ +" mov %w2, #(1 << 16)\n" +" ldadda %w2, %w0, %3\n" + __nops(3)) + + /* Did we get the lock? */ +" eor %w1, %w0, %w0, ror #16\n" +" cbz %w1, 3f\n" + /* + * No: spin on the owner. Send a local event to avoid missing an + * unlock before the exclusive load. + */ +" sevl\n" +"2: wfe\n" +" ldaxrh %w2, %4\n" +" eor %w1, %w2, %w0, lsr #16\n" +" cbnz %w1, 2b\n" + /* We got the lock. Critical section starts here. */ +"3:" + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) + : "Q" (lock->owner) + : "memory"); +} + +static inline void hyp_spin_unlock(hyp_spinlock_t *lock) +{ + u64 tmp; + + asm volatile( + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " ldrh %w1, %0\n" + " add %w1, %w1, #1\n" + " stlrh %w1, %0", + /* LSE atomics */ + " mov %w1, #1\n" + " staddlh %w1, %0\n" + __nops(1)) + : "=Q" (lock->owner), "=&r" (tmp) + : + : "memory"); +} + +#endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index a6707df4f6c0..5df6193fc430 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -9,10 +9,15 @@ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS hostprogs := gen-hyprel HOST_EXTRACFLAGS += -I$(objtree)/include +lib-objs := clear_page.o copy_page.o memcpy.o memset.o +lib-objs := $(addprefix ../../../lib/, $(lib-objs)) + obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ - hyp-main.o hyp-smp.o psci-relay.o + hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \ + cache.o setup.o mm.o mem_protect.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ - ../fpsimd.o ../hyp-entry.o ../exception.o + ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o +obj-y += $(lib-objs) ## ## Build rules for compiling nVHE hyp code @@ -75,9 +80,9 @@ quiet_cmd_hyprel = HYPREL $@ quiet_cmd_hypcopy = HYPCOPY $@ cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@ -# Remove ftrace and Shadow Call Stack CFLAGS. -# This is equivalent to the 'notrace' and '__noscs' annotations. -KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) +# Remove ftrace, Shadow Call Stack, and CFI CFLAGS. +# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) # KVM nVHE code is run at a different exception code with a different map, so # compiler instrumentation that inserts callbacks or checks into the code may diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S new file mode 100644 index 000000000000..36cef6915428 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Code copied from arch/arm64/mm/cache.S. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/alternative.h> + +SYM_FUNC_START_PI(__flush_dcache_area) + dcache_by_line_op civac, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__flush_dcache_area) diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index f401724f12ef..7d3f25868cae 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -21,17 +21,11 @@ static void __debug_save_spe(u64 *pmscr_el1) /* Clear pmscr in case of early return */ *pmscr_el1 = 0; - /* SPE present on this CPU? */ - if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), - ID_AA64DFR0_PMSVER_SHIFT)) - return; - - /* Yes; is it owned by EL3? */ - reg = read_sysreg_s(SYS_PMBIDR_EL1); - if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) - return; - - /* No; is the host actually using the thing? */ + /* + * At this point, we know that this CPU implements + * SPE and is available to the host. + * Check if the host is actually using it ? + */ reg = read_sysreg_s(SYS_PMBLIMITR_EL1); if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) return; @@ -58,10 +52,43 @@ static void __debug_restore_spe(u64 pmscr_el1) write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); } +static void __debug_save_trace(u64 *trfcr_el1) +{ + *trfcr_el1 = 0; + + /* Check if the TRBE is enabled */ + if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE)) + return; + /* + * Prohibit trace generation while we are in guest. + * Since access to TRFCR_EL1 is trapped, the guest can't + * modify the filtering set by the host. + */ + *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1); + write_sysreg_s(0, SYS_TRFCR_EL1); + isb(); + /* Drain the trace buffer to memory */ + tsb_csync(); + dsb(nsh); +} + +static void __debug_restore_trace(u64 trfcr_el1) +{ + if (!trfcr_el1) + return; + + /* Restore trace filter controls */ + write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1); +} + void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ - __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); + if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE) + __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); + /* Disable and flush Self-Hosted Trace generation */ + if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE) + __debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1); } void __debug_switch_to_guest(struct kvm_vcpu *vcpu) @@ -71,7 +98,10 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { - __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); + if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE) + __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); + if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE) + __debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1); } void __debug_switch_to_host(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/nvhe/early_alloc.c b/arch/arm64/kvm/hyp/nvhe/early_alloc.c new file mode 100644 index 000000000000..1306c430ab87 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/early_alloc.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <asm/kvm_pgtable.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/memory.h> + +struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops; +s64 __ro_after_init hyp_physvirt_offset; + +static unsigned long base; +static unsigned long end; +static unsigned long cur; + +unsigned long hyp_early_alloc_nr_used_pages(void) +{ + return (cur - base) >> PAGE_SHIFT; +} + +void *hyp_early_alloc_contig(unsigned int nr_pages) +{ + unsigned long size = (nr_pages << PAGE_SHIFT); + void *ret = (void *)cur; + + if (!nr_pages) + return NULL; + + if (end - cur < size) + return NULL; + + cur += size; + memset(ret, 0, size); + + return ret; +} + +void *hyp_early_alloc_page(void *arg) +{ + return hyp_early_alloc_contig(1); +} + +void hyp_early_alloc_init(void *virt, unsigned long size) +{ + base = cur = (unsigned long)virt; + end = base + size; + + hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page; + hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt; + hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys; +} diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c index ead02c6a7628..6bc88a756cb7 100644 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c @@ -50,6 +50,18 @@ #ifndef R_AARCH64_ABS64 #define R_AARCH64_ABS64 257 #endif +#ifndef R_AARCH64_PREL64 +#define R_AARCH64_PREL64 260 +#endif +#ifndef R_AARCH64_PREL32 +#define R_AARCH64_PREL32 261 +#endif +#ifndef R_AARCH64_PREL16 +#define R_AARCH64_PREL16 262 +#endif +#ifndef R_AARCH64_PLT32 +#define R_AARCH64_PLT32 314 +#endif #ifndef R_AARCH64_LD_PREL_LO19 #define R_AARCH64_LD_PREL_LO19 273 #endif @@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela) case R_AARCH64_ABS64: emit_rela_abs64(rela, sh_orig_name); break; + /* Allow position-relative data relocations. */ + case R_AARCH64_PREL64: + case R_AARCH64_PREL32: + case R_AARCH64_PREL16: + case R_AARCH64_PLT32: + break; /* Allow relocations to generate PC-relative addressing. */ case R_AARCH64_LD_PREL_LO19: case R_AARCH64_ADR_PREL_LO21: diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 5d94584840cc..2b23400e0fb3 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -79,22 +79,18 @@ SYM_FUNC_START(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr - ldr lr, =panic + ldr lr, =nvhe_hyp_panic_handler hyp_kimg_va lr, x6 msr elr_el2, lr mov x29, x0 - /* Load the format string into x0 and arguments into x1-7 */ - ldr x0, =__hyp_panic_string - hyp_kimg_va x0, x6 - - /* Load the format arguments into x1-7. */ - mov x6, x3 - get_vcpu_ptr x7, x3 - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 + /* Load the panic arguments into x0-7 */ + mrs x0, esr_el2 + get_vcpu_ptr x4, x5 + mrs x5, far_el2 + mrs x6, hpfar_el2 + mov x7, xzr // Unused argument /* Enter the host, conditionally restoring the host context. */ cbz x29, __host_enter_without_restoring diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index c631e29fb001..c953fb4b9a13 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -83,11 +83,6 @@ SYM_CODE_END(__kvm_hyp_init) * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START_LOCAL(___kvm_hyp_init) -alternative_if ARM64_KVM_PROTECTED_MODE - mov_q x1, HCR_HOST_NVHE_PROTECTED_FLAGS - msr hcr_el2, x1 -alternative_else_nop_endif - ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] msr tpidr_el2, x1 @@ -97,6 +92,15 @@ alternative_else_nop_endif ldr x1, [x0, #NVHE_INIT_MAIR_EL2] msr mair_el2, x1 + ldr x1, [x0, #NVHE_INIT_HCR_EL2] + msr hcr_el2, x1 + + ldr x1, [x0, #NVHE_INIT_VTTBR] + msr vttbr_el2, x1 + + ldr x1, [x0, #NVHE_INIT_VTCR] + msr vtcr_el2, x1 + ldr x1, [x0, #NVHE_INIT_PGD_PA] phys_to_ttbr x2, x1 alternative_if ARM64_HAS_CNP @@ -115,15 +119,10 @@ alternative_else_nop_endif /* Invalidate the stale TLBs from Bootloader */ tlbi alle2 + tlbi vmalls12e1 dsb sy - /* - * Preserve all the RES1 bits while setting the default flags, - * as well as the EE bit on BE. Drop the A flag since the compiler - * is allowed to generate unaligned accesses. - */ - mov_q x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) -CPU_BE( orr x0, x0, #SCTLR_ELx_EE) + mov_q x0, INIT_SCTLR_EL2_MMU_ON alternative_if ARM64_HAS_ADDRESS_AUTH mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) @@ -221,9 +220,7 @@ SYM_CODE_START(__kvm_handle_stub_hvc) mov x0, xzr reset: /* Reset kvm back to the hyp stub. */ - mrs x5, sctlr_el2 - mov_q x6, SCTLR_ELx_FLAGS - bic x5, x5, x6 // Clear SCTL_M and etc + mov_q x5, INIT_SCTLR_EL2_MMU_OFF pre_disable_mmu_workaround msr sctlr_el2, x5 isb @@ -244,4 +241,31 @@ alternative_else_nop_endif SYM_CODE_END(__kvm_handle_stub_hvc) +SYM_FUNC_START(__pkvm_init_switch_pgd) + /* Turn the MMU off */ + pre_disable_mmu_workaround + mrs x2, sctlr_el2 + bic x3, x2, #SCTLR_ELx_M + msr sctlr_el2, x3 + isb + + tlbi alle2 + + /* Install the new pgtables */ + ldr x3, [x0, #NVHE_INIT_PGD_PA] + phys_to_ttbr x4, x3 +alternative_if ARM64_HAS_CNP + orr x4, x4, #TTBR_CNP_BIT +alternative_else_nop_endif + msr ttbr0_el2, x4 + + /* Set the new stack pointer */ + ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] + mov sp, x0 + + /* And turn the MMU back on! */ + set_sctlr_el2 x2 + ret x1 +SYM_FUNC_END(__pkvm_init_switch_pgd) + .popsection diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 936328207bde..f36420a80474 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -6,12 +6,15 @@ #include <hyp/switch.h> +#include <asm/pgtable-types.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> #include <nvhe/trap_handler.h> DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); @@ -106,6 +109,61 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) __vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); } +static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); + DECLARE_REG(unsigned long, size, host_ctxt, 2); + DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3); + DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4); + DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5); + + /* + * __pkvm_init() will return only if an error occurred, otherwise it + * will tail-call in __pkvm_init_finalise() which will have to deal + * with the host context directly. + */ + cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base, + hyp_va_bits); +} + +static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1); + + cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot); +} + +static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(unsigned long, start, host_ctxt, 1); + DECLARE_REG(unsigned long, size, host_ctxt, 2); + DECLARE_REG(unsigned long, phys, host_ctxt, 3); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4); + + cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot); +} + +static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); + DECLARE_REG(size_t, size, host_ctxt, 2); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3); + + cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot); +} + +static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt) +{ + cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize(); +} + +static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, start, host_ctxt, 1); + DECLARE_REG(phys_addr_t, end, host_ctxt, 2); + + cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end); +} typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x @@ -125,6 +183,12 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_get_mdcr_el2), HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_aprs), + HANDLE_FUNC(__pkvm_init), + HANDLE_FUNC(__pkvm_cpu_set_vector), + HANDLE_FUNC(__pkvm_create_mappings), + HANDLE_FUNC(__pkvm_create_private_mapping), + HANDLE_FUNC(__pkvm_prot_finalize), + HANDLE_FUNC(__pkvm_mark_hyp), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) @@ -177,7 +241,16 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) case ESR_ELx_EC_SMC64: handle_host_smc(host_ctxt); break; + case ESR_ELx_EC_SVE: + sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); + isb(); + sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + break; + case ESR_ELx_EC_IABT_LOW: + case ESR_ELx_EC_DABT_LOW: + handle_host_mem_abort(host_ctxt); + break; default: - hyp_panic(); + BUG(); } } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c index 879559057dee..9f54833af400 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c @@ -18,8 +18,7 @@ u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID u64 cpu_logical_map(unsigned int cpu) { - if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map)) - hyp_panic(); + BUG_ON(cpu >= ARRAY_SIZE(hyp_cpu_logical_map)); return hyp_cpu_logical_map[cpu]; } @@ -30,8 +29,7 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu) unsigned long this_cpu_base; unsigned long elf_base; - if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)) - hyp_panic(); + BUG_ON(cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)); cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base; this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index cd119d82d8e3..f4562f417d3f 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -25,4 +25,5 @@ SECTIONS { BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) END_HYP_SECTION + HYP_SECTION(.bss) } diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c new file mode 100644 index 000000000000..e342f7f4f4fb --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> +#include <asm/stage2_pgtable.h> + +#include <hyp/switch.h> + +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> + +#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) + +extern unsigned long hyp_nr_cpus; +struct host_kvm host_kvm; + +struct hyp_pool host_s2_mem; +struct hyp_pool host_s2_dev; + +/* + * Copies of the host's CPU features registers holding sanitized values. + */ +u64 id_aa64mmfr0_el1_sys_val; +u64 id_aa64mmfr1_el1_sys_val; + +static const u8 pkvm_hyp_id = 1; + +static void *host_s2_zalloc_pages_exact(size_t size) +{ + return hyp_alloc_pages(&host_s2_mem, get_order(size)); +} + +static void *host_s2_zalloc_page(void *pool) +{ + return hyp_alloc_pages(pool, 0); +} + +static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool) +{ + unsigned long nr_pages, pfn; + int ret; + + pfn = hyp_virt_to_pfn(mem_pgt_pool); + nr_pages = host_s2_mem_pgtable_pages(); + ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0); + if (ret) + return ret; + + pfn = hyp_virt_to_pfn(dev_pgt_pool); + nr_pages = host_s2_dev_pgtable_pages(); + ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0); + if (ret) + return ret; + + host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { + .zalloc_pages_exact = host_s2_zalloc_pages_exact, + .zalloc_page = host_s2_zalloc_page, + .phys_to_virt = hyp_phys_to_virt, + .virt_to_phys = hyp_virt_to_phys, + .page_count = hyp_page_count, + .get_page = hyp_get_page, + .put_page = hyp_put_page, + }; + + return 0; +} + +static void prepare_host_vtcr(void) +{ + u32 parange, phys_shift; + + /* The host stage 2 is id-mapped, so use parange for T0SZ */ + parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); + phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); + + host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, + id_aa64mmfr1_el1_sys_val, phys_shift); +} + +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool) +{ + struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + int ret; + + prepare_host_vtcr(); + hyp_spin_lock_init(&host_kvm.lock); + + ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool); + if (ret) + return ret; + + ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch, + &host_kvm.mm_ops, KVM_HOST_S2_FLAGS); + if (ret) + return ret; + + mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); + mmu->arch = &host_kvm.arch; + mmu->pgt = &host_kvm.pgt; + mmu->vmid.vmid_gen = 0; + mmu->vmid.vmid = 0; + + return 0; +} + +int __pkvm_prot_finalize(void) +{ + struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); + + params->vttbr = kvm_get_vttbr(mmu); + params->vtcr = host_kvm.arch.vtcr; + params->hcr_el2 |= HCR_VM; + kvm_flush_dcache_to_poc(params, sizeof(*params)); + + write_sysreg(params->hcr_el2, hcr_el2); + __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); + + /* + * Make sure to have an ISB before the TLB maintenance below but only + * when __load_stage2() doesn't include one already. + */ + asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); + + /* Invalidate stale HCR bits that may be cached in TLBs */ + __tlbi(vmalls12e1); + dsb(nsh); + isb(); + + return 0; +} + +static int host_stage2_unmap_dev_all(void) +{ + struct kvm_pgtable *pgt = &host_kvm.pgt; + struct memblock_region *reg; + u64 addr = 0; + int i, ret; + + /* Unmap all non-memory regions to recycle the pages */ + for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { + reg = &hyp_memory[i]; + ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); + if (ret) + return ret; + } + return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); +} + +static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) +{ + int cur, left = 0, right = hyp_memblock_nr; + struct memblock_region *reg; + phys_addr_t end; + + range->start = 0; + range->end = ULONG_MAX; + + /* The list of memblock regions is sorted, binary search it */ + while (left < right) { + cur = (left + right) >> 1; + reg = &hyp_memory[cur]; + end = reg->base + reg->size; + if (addr < reg->base) { + right = cur; + range->end = reg->base; + } else if (addr >= end) { + left = cur + 1; + range->start = end; + } else { + range->start = reg->base; + range->end = end; + return true; + } + } + + return false; +} + +static bool range_is_memory(u64 start, u64 end) +{ + struct kvm_mem_range r1, r2; + + if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2)) + return false; + if (r1.start != r2.start) + return false; + + return true; +} + +static inline int __host_stage2_idmap(u64 start, u64 end, + enum kvm_pgtable_prot prot, + struct hyp_pool *pool) +{ + return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, + prot, pool); +} + +static int host_stage2_idmap(u64 addr) +{ + enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; + struct kvm_mem_range range; + bool is_memory = find_mem_range(addr, &range); + struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev; + int ret; + + if (is_memory) + prot |= KVM_PGTABLE_PROT_X; + + hyp_spin_lock(&host_kvm.lock); + ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range); + if (ret) + goto unlock; + + ret = __host_stage2_idmap(range.start, range.end, prot, pool); + if (is_memory || ret != -ENOMEM) + goto unlock; + + /* + * host_s2_mem has been provided with enough pages to cover all of + * memory with page granularity, so we should never hit the ENOMEM case. + * However, it is difficult to know how much of the MMIO range we will + * need to cover upfront, so we may need to 'recycle' the pages if we + * run out. + */ + ret = host_stage2_unmap_dev_all(); + if (ret) + goto unlock; + + ret = __host_stage2_idmap(range.start, range.end, prot, pool); + +unlock: + hyp_spin_unlock(&host_kvm.lock); + + return ret; +} + +int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end) +{ + int ret; + + /* + * host_stage2_unmap_dev_all() currently relies on MMIO mappings being + * non-persistent, so don't allow changing page ownership in MMIO range. + */ + if (!range_is_memory(start, end)) + return -EINVAL; + + hyp_spin_lock(&host_kvm.lock); + ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start, + &host_s2_mem, pkvm_hyp_id); + hyp_spin_unlock(&host_kvm.lock); + + return ret != -EAGAIN ? ret : 0; +} + +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_vcpu_fault_info fault; + u64 esr, addr; + int ret = 0; + + esr = read_sysreg_el2(SYS_ESR); + BUG_ON(!__get_fault_info(esr, &fault)); + + addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; + ret = host_stage2_idmap(addr); + BUG_ON(ret && ret != -EAGAIN); +} diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c new file mode 100644 index 000000000000..a8efdf0f9003 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> +#include <asm/spectre.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mm.h> +#include <nvhe/spinlock.h> + +struct kvm_pgtable pkvm_pgtable; +hyp_spinlock_t pkvm_pgd_lock; +u64 __io_map_base; + +struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; +unsigned int hyp_memblock_nr; + +int __pkvm_create_mappings(unsigned long start, unsigned long size, + unsigned long phys, enum kvm_pgtable_prot prot) +{ + int err; + + hyp_spin_lock(&pkvm_pgd_lock); + err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); + hyp_spin_unlock(&pkvm_pgd_lock); + + return err; +} + +unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, + enum kvm_pgtable_prot prot) +{ + unsigned long addr; + int err; + + hyp_spin_lock(&pkvm_pgd_lock); + + size = PAGE_ALIGN(size + offset_in_page(phys)); + addr = __io_map_base; + __io_map_base += size; + + /* Are we overflowing on the vmemmap ? */ + if (__io_map_base > __hyp_vmemmap) { + __io_map_base -= size; + addr = (unsigned long)ERR_PTR(-ENOMEM); + goto out; + } + + err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot); + if (err) { + addr = (unsigned long)ERR_PTR(err); + goto out; + } + + addr = addr + offset_in_page(phys); +out: + hyp_spin_unlock(&pkvm_pgd_lock); + + return addr; +} + +int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) +{ + unsigned long start = (unsigned long)from; + unsigned long end = (unsigned long)to; + unsigned long virt_addr; + phys_addr_t phys; + + start = start & PAGE_MASK; + end = PAGE_ALIGN(end); + + for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { + int err; + + phys = hyp_virt_to_phys((void *)virt_addr); + err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot); + if (err) + return err; + } + + return 0; +} + +int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back) +{ + unsigned long start, end; + + hyp_vmemmap_range(phys, size, &start, &end); + + return __pkvm_create_mappings(start, end - start, back, PAGE_HYP); +} + +static void *__hyp_bp_vect_base; +int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) +{ + void *vector; + + switch (slot) { + case HYP_VECTOR_DIRECT: { + vector = __kvm_hyp_vector; + break; + } + case HYP_VECTOR_SPECTRE_DIRECT: { + vector = __bp_harden_hyp_vecs; + break; + } + case HYP_VECTOR_INDIRECT: + case HYP_VECTOR_SPECTRE_INDIRECT: { + vector = (void *)__hyp_bp_vect_base; + break; + } + default: + return -EINVAL; + } + + vector = __kvm_vector_slot2addr(vector, slot); + *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; + + return 0; +} + +int hyp_map_vectors(void) +{ + phys_addr_t phys; + void *bp_base; + + if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) + return 0; + + phys = __hyp_pa(__bp_harden_hyp_vecs); + bp_base = (void *)__pkvm_create_private_mapping(phys, + __BP_HARDEN_HYP_VECS_SZ, + PAGE_HYP_EXEC); + if (IS_ERR_OR_NULL(bp_base)) + return PTR_ERR(bp_base); + + __hyp_bp_vect_base = bp_base; + + return 0; +} + +int hyp_create_idmap(u32 hyp_va_bits) +{ + unsigned long start, end; + + start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); + start = ALIGN_DOWN(start, PAGE_SIZE); + + end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); + end = ALIGN(end, PAGE_SIZE); + + /* + * One half of the VA space is reserved to linearly map portions of + * memory -- see va_layout.c for more details. The other half of the VA + * space contains the trampoline page, and needs some care. Split that + * second half in two and find the quarter of VA space not conflicting + * with the idmap to place the IOs and the vmemmap. IOs use the lower + * half of the quarter and the vmemmap the upper half. + */ + __io_map_base = start & BIT(hyp_va_bits - 2); + __io_map_base ^= BIT(hyp_va_bits - 2); + __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); + + return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); +} diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c new file mode 100644 index 000000000000..237e03bf0cb1 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <asm/kvm_hyp.h> +#include <nvhe/gfp.h> + +u64 __hyp_vmemmap; + +/* + * Index the hyp_vmemmap to find a potential buddy page, but make no assumption + * about its current state. + * + * Example buddy-tree for a 4-pages physically contiguous pool: + * + * o : Page 3 + * / + * o-o : Page 2 + * / + * / o : Page 1 + * / / + * o---o-o : Page 0 + * Order 2 1 0 + * + * Example of requests on this pool: + * __find_buddy_nocheck(pool, page 0, order 0) => page 1 + * __find_buddy_nocheck(pool, page 0, order 1) => page 2 + * __find_buddy_nocheck(pool, page 1, order 0) => page 0 + * __find_buddy_nocheck(pool, page 2, order 0) => page 3 + */ +static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + phys_addr_t addr = hyp_page_to_phys(p); + + addr ^= (PAGE_SIZE << order); + + /* + * Don't return a page outside the pool range -- it belongs to + * something else and may not be mapped in hyp_vmemmap. + */ + if (addr < pool->range_start || addr >= pool->range_end) + return NULL; + + return hyp_phys_to_page(addr); +} + +/* Find a buddy page currently available for allocation */ +static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); + + if (!buddy || buddy->order != order || list_empty(&buddy->node)) + return NULL; + + return buddy; + +} + +static void __hyp_attach_page(struct hyp_pool *pool, + struct hyp_page *p) +{ + unsigned int order = p->order; + struct hyp_page *buddy; + + memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); + + /* + * Only the first struct hyp_page of a high-order page (otherwise known + * as the 'head') should have p->order set. The non-head pages should + * have p->order = HYP_NO_ORDER. Here @p may no longer be the head + * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. + */ + p->order = HYP_NO_ORDER; + for (; (order + 1) < pool->max_order; order++) { + buddy = __find_buddy_avail(pool, p, order); + if (!buddy) + break; + + /* Take the buddy out of its list, and coallesce with @p */ + list_del_init(&buddy->node); + buddy->order = HYP_NO_ORDER; + p = min(p, buddy); + } + + /* Mark the new head, and insert it */ + p->order = order; + list_add_tail(&p->node, &pool->free_area[order]); +} + +static void hyp_attach_page(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + __hyp_attach_page(pool, p); + hyp_spin_unlock(&pool->lock); +} + +static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + struct hyp_page *buddy; + + list_del_init(&p->node); + while (p->order > order) { + /* + * The buddy of order n - 1 currently has HYP_NO_ORDER as it + * is covered by a higher-level page (whose head is @p). Use + * __find_buddy_nocheck() to find it and inject it in the + * free_list[n - 1], effectively splitting @p in half. + */ + p->order--; + buddy = __find_buddy_nocheck(pool, p, p->order); + buddy->order = p->order; + list_add_tail(&buddy->node, &pool->free_area[buddy->order]); + } + + return p; +} + +void hyp_put_page(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + if (hyp_page_ref_dec_and_test(p)) + hyp_attach_page(p); +} + +void hyp_get_page(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + hyp_page_ref_inc(p); +} + +void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order) +{ + unsigned int i = order; + struct hyp_page *p; + + hyp_spin_lock(&pool->lock); + + /* Look for a high-enough-order page */ + while (i < pool->max_order && list_empty(&pool->free_area[i])) + i++; + if (i >= pool->max_order) { + hyp_spin_unlock(&pool->lock); + return NULL; + } + + /* Extract it from the tree at the right order */ + p = list_first_entry(&pool->free_area[i], struct hyp_page, node); + p = __hyp_extract_page(pool, p, order); + + hyp_spin_unlock(&pool->lock); + hyp_set_page_refcounted(p); + + return hyp_page_to_virt(p); +} + +int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, + unsigned int reserved_pages) +{ + phys_addr_t phys = hyp_pfn_to_phys(pfn); + struct hyp_page *p; + int i; + + hyp_spin_lock_init(&pool->lock); + pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); + for (i = 0; i < pool->max_order; i++) + INIT_LIST_HEAD(&pool->free_area[i]); + pool->range_start = phys; + pool->range_end = phys + (nr_pages << PAGE_SHIFT); + + /* Init the vmemmap portion */ + p = hyp_phys_to_page(phys); + memset(p, 0, sizeof(*p) * nr_pages); + for (i = 0; i < nr_pages; i++) { + p[i].pool = pool; + INIT_LIST_HEAD(&p[i].node); + } + + /* Attach the unused pages to the buddy tree */ + for (i = reserved_pages; i < nr_pages; i++) + __hyp_attach_page(pool, &p[i]); + + return 0; +} diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 63de71c0481e..08508783ec3d 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -11,6 +11,7 @@ #include <linux/kvm_host.h> #include <uapi/linux/psci.h> +#include <nvhe/memory.h> #include <nvhe/trap_handler.h> void kvm_hyp_cpu_entry(unsigned long r0); @@ -20,9 +21,6 @@ void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); /* Config options set by the host. */ struct kvm_host_psci_config __ro_after_init kvm_host_psci_config; -s64 __ro_after_init hyp_physvirt_offset; - -#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset) #define INVALID_CPU_ID UINT_MAX diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c new file mode 100644 index 000000000000..7488f53b0aa2 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> +#include <nvhe/trap_handler.h> + +struct hyp_pool hpool; +struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; +unsigned long hyp_nr_cpus; + +#define hyp_percpu_size ((unsigned long)__per_cpu_end - \ + (unsigned long)__per_cpu_start) + +static void *vmemmap_base; +static void *hyp_pgt_base; +static void *host_s2_mem_pgt_base; +static void *host_s2_dev_pgt_base; + +static int divide_memory_pool(void *virt, unsigned long size) +{ + unsigned long vstart, vend, nr_pages; + + hyp_early_alloc_init(virt, size); + + hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend); + nr_pages = (vend - vstart) >> PAGE_SHIFT; + vmemmap_base = hyp_early_alloc_contig(nr_pages); + if (!vmemmap_base) + return -ENOMEM; + + nr_pages = hyp_s1_pgtable_pages(); + hyp_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!hyp_pgt_base) + return -ENOMEM; + + nr_pages = host_s2_mem_pgtable_pages(); + host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!host_s2_mem_pgt_base) + return -ENOMEM; + + nr_pages = host_s2_dev_pgtable_pages(); + host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!host_s2_dev_pgt_base) + return -ENOMEM; + + return 0; +} + +static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, + unsigned long *per_cpu_base, + u32 hyp_va_bits) +{ + void *start, *end, *virt = hyp_phys_to_virt(phys); + unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT; + int ret, i; + + /* Recreate the hyp page-table using the early page allocator */ + hyp_early_alloc_init(hyp_pgt_base, pgt_size); + ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits, + &hyp_early_alloc_mm_ops); + if (ret) + return ret; + + ret = hyp_create_idmap(hyp_va_bits); + if (ret) + return ret; + + ret = hyp_map_vectors(); + if (ret) + return ret; + + ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base)); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC); + if (ret) + return ret; + + ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP); + if (ret) + return ret; + + for (i = 0; i < hyp_nr_cpus; i++) { + start = (void *)kern_hyp_va(per_cpu_base[i]); + end = start + PAGE_ALIGN(hyp_percpu_size); + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + + end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va; + start = end - PAGE_SIZE; + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + } + + return 0; +} + +static void update_nvhe_init_params(void) +{ + struct kvm_nvhe_init_params *params; + unsigned long i; + + for (i = 0; i < hyp_nr_cpus; i++) { + params = per_cpu_ptr(&kvm_init_params, i); + params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); + __flush_dcache_area(params, sizeof(*params)); + } +} + +static void *hyp_zalloc_hyp_page(void *arg) +{ + return hyp_alloc_pages(&hpool, 0); +} + +void __noreturn __pkvm_init_finalise(void) +{ + struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); + struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; + unsigned long nr_pages, reserved_pages, pfn; + int ret; + + /* Now that the vmemmap is backed, install the full-fledged allocator */ + pfn = hyp_virt_to_pfn(hyp_pgt_base); + nr_pages = hyp_s1_pgtable_pages(); + reserved_pages = hyp_early_alloc_nr_used_pages(); + ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages); + if (ret) + goto out; + + ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base); + if (ret) + goto out; + + pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) { + .zalloc_page = hyp_zalloc_hyp_page, + .phys_to_virt = hyp_phys_to_virt, + .virt_to_phys = hyp_virt_to_phys, + .get_page = hyp_get_page, + .put_page = hyp_put_page, + }; + pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; + +out: + /* + * We tail-called to here from handle___pkvm_init() and will not return, + * so make sure to propagate the return value to the host. + */ + cpu_reg(host_ctxt, 1) = ret; + + __host_enter(host_ctxt); +} + +int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, + unsigned long *per_cpu_base, u32 hyp_va_bits) +{ + struct kvm_nvhe_init_params *params; + void *virt = hyp_phys_to_virt(phys); + void (*fn)(phys_addr_t params_pa, void *finalize_fn_va); + int ret; + + if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) + return -EINVAL; + + hyp_spin_lock_init(&pkvm_pgd_lock); + hyp_nr_cpus = nr_cpus; + + ret = divide_memory_pool(virt, size); + if (ret) + return ret; + + ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits); + if (ret) + return ret; + + update_nvhe_init_params(); + + /* Jump in the idmap page to switch to the new page-tables */ + params = this_cpu_ptr(&kvm_init_params); + fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd); + fn(__hyp_pa(params), __pkvm_init_finalise); + + unreachable(); +} diff --git a/arch/arm64/kvm/hyp/nvhe/stub.c b/arch/arm64/kvm/hyp/nvhe/stub.c new file mode 100644 index 000000000000..c0aa6bbfd79d --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/stub.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Stubs for out-of-line function calls caused by re-using kernel + * infrastructure at EL2. + * + * Copyright (C) 2020 - Google LLC + */ + +#include <linux/list.h> + +#ifdef CONFIG_DEBUG_LIST +bool __list_add_valid(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + return true; +} + +bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 68ab6b4d5141..e9f6ea704d07 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -28,6 +28,8 @@ #include <asm/processor.h> #include <asm/thread_info.h> +#include <nvhe/mem_protect.h> + /* Non-VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); @@ -41,9 +43,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; + val |= CPTR_EL2_TTA | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { - val |= CPTR_EL2_TFP; + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; __activate_traps_fpsimd32(vcpu); } @@ -68,7 +70,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { extern char __kvm_hyp_host_vector[]; - u64 mdcr_el2; + u64 mdcr_el2, cptr; ___deactivate_traps(vcpu); @@ -95,19 +97,17 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) mdcr_el2 &= MDCR_EL2_HPMN_MASK; mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; + mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - if (is_protected_kvm_enabled()) - write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2); - else - write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); - write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); - write_sysreg(__kvm_hyp_host_vector, vbar_el2); -} + write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); -static void __load_host_stage2(void) -{ - write_sysreg(0, vttbr_el2); + cptr = CPTR_EL2_DEFAULT; + if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) + cptr |= CPTR_EL2_TZ; + + write_sysreg(cptr, cptr_el2); + write_sysreg(__kvm_hyp_host_vector, vbar_el2); } /* Save VGICv3 state on non-VHE systems */ diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 229b06748c20..83dc3b271bc5 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -8,6 +8,8 @@ #include <asm/kvm_mmu.h> #include <asm/tlbflush.h> +#include <nvhe/mem_protect.h> + struct tlb_inv_context { u64 tcr; }; @@ -43,7 +45,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, static void __tlb_switch_to_host(struct tlb_inv_context *cxt) { - write_sysreg(0, vttbr_el2); + __load_host_stage2(); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* Ensure write of the host VMID */ diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 926fc07074f5..c37c1dc4feaf 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -9,8 +9,7 @@ #include <linux/bitfield.h> #include <asm/kvm_pgtable.h> - -#define KVM_PGTABLE_MAX_LEVELS 4U +#include <asm/stage2_pgtable.h> #define KVM_PTE_VALID BIT(0) @@ -49,6 +48,11 @@ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ KVM_PTE_LEAF_ATTR_HI_S2_XN) +#define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55) + +#define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56) +#define KVM_MAX_OWNER_ID 1 + struct kvm_pgtable_walk_data { struct kvm_pgtable *pgt; struct kvm_pgtable_walker *walker; @@ -68,21 +72,36 @@ static u64 kvm_granule_size(u32 level) return BIT(kvm_granule_shift(level)); } -static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) +#define KVM_PHYS_INVALID (-1ULL) + +static bool kvm_phys_is_valid(u64 phys) { - u64 granule = kvm_granule_size(level); + return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); +} +static bool kvm_level_supports_block_mapping(u32 level) +{ /* * Reject invalid block mappings and don't bother with 4TB mappings for * 52-bit PAs. */ - if (level == 0 || (PAGE_SIZE != SZ_4K && level == 1)) + return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); +} + +static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) +{ + u64 granule = kvm_granule_size(level); + + if (!kvm_level_supports_block_mapping(level)) return false; if (granule > (end - addr)) return false; - return IS_ALIGNED(addr, granule) && IS_ALIGNED(phys, granule); + if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) + return false; + + return IS_ALIGNED(addr, granule); } static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) @@ -152,20 +171,20 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa) return pte; } -static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte) +static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops) { - return __va(kvm_pte_to_phys(pte)); + return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); } -static void kvm_set_invalid_pte(kvm_pte_t *ptep) +static void kvm_clear_pte(kvm_pte_t *ptep) { - kvm_pte_t pte = *ptep; - WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID); + WRITE_ONCE(*ptep, 0); } -static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp) +static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp, + struct kvm_pgtable_mm_ops *mm_ops) { - kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(__pa(childp)); + kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE); pte |= KVM_PTE_VALID; @@ -187,6 +206,11 @@ static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) return pte; } +static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id) +{ + return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id); +} + static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag) @@ -228,7 +252,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, goto out; } - childp = kvm_pte_follow(pte); + childp = kvm_pte_follow(pte, data->pgt->mm_ops); ret = __kvm_pgtable_walk(data, childp, level + 1); if (ret) goto out; @@ -303,12 +327,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, } struct hyp_map_data { - u64 phys; - kvm_pte_t attr; + u64 phys; + kvm_pte_t attr; + struct kvm_pgtable_mm_ops *mm_ops; }; -static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, - struct hyp_map_data *data) +static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) { bool device = prot & KVM_PGTABLE_PROT_DEVICE; u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL; @@ -333,7 +357,8 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap); attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh); attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF; - data->attr = attr; + *ptep = attr; + return 0; } @@ -359,6 +384,8 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { kvm_pte_t *childp; + struct hyp_map_data *data = arg; + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg)) return 0; @@ -366,11 +393,11 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; - childp = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); + childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); if (!childp) return -ENOMEM; - kvm_set_table_pte(ptep, childp); + kvm_set_table_pte(ptep, childp, mm_ops); return 0; } @@ -380,6 +407,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, int ret; struct hyp_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), + .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = hyp_map_walker, @@ -387,7 +415,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, .arg = &map_data, }; - ret = hyp_map_set_prot_attr(prot, &map_data); + ret = hyp_set_prot_attr(prot, &map_data.attr); if (ret) return ret; @@ -397,16 +425,18 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, return ret; } -int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) +int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, + struct kvm_pgtable_mm_ops *mm_ops) { u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits); - pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); + pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL); if (!pgt->pgd) return -ENOMEM; pgt->ia_bits = va_bits; pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; + pgt->mm_ops = mm_ops; pgt->mmu = NULL; return 0; } @@ -414,7 +444,9 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { - free_page((unsigned long)kvm_pte_follow(*ptep)); + struct kvm_pgtable_mm_ops *mm_ops = arg; + + mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops)); return 0; } @@ -423,29 +455,75 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) struct kvm_pgtable_walker walker = { .cb = hyp_free_walker, .flags = KVM_PGTABLE_WALK_TABLE_POST, + .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); - free_page((unsigned long)pgt->pgd); + pgt->mm_ops->put_page(pgt->pgd); pgt->pgd = NULL; } struct stage2_map_data { u64 phys; kvm_pte_t attr; + u8 owner_id; kvm_pte_t *anchor; + kvm_pte_t *childp; struct kvm_s2_mmu *mmu; - struct kvm_mmu_memory_cache *memcache; + void *memcache; + + struct kvm_pgtable_mm_ops *mm_ops; }; -static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, - struct stage2_map_data *data) +u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) +{ + u64 vtcr = VTCR_EL2_FLAGS; + u8 lvls; + + vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT; + vtcr |= VTCR_EL2_T0SZ(phys_shift); + /* + * Use a minimum 2 level page table to prevent splitting + * host PMD huge pages at stage2. + */ + lvls = stage2_pgtable_levels(phys_shift); + if (lvls < 2) + lvls = 2; + vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); + + /* + * Enable the Hardware Access Flag management, unconditionally + * on all CPUs. The features is RES0 on CPUs without the support + * and must be ignored by the CPUs. + */ + vtcr |= VTCR_EL2_HA; + + /* Set the vmid bits */ + vtcr |= (get_vmid_bits(mmfr1) == 16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; + + return vtcr; +} + +static bool stage2_has_fwb(struct kvm_pgtable *pgt) +{ + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + return false; + + return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); +} + +#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) + +static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, + kvm_pte_t *ptep) { bool device = prot & KVM_PGTABLE_PROT_DEVICE; - kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) : - PAGE_S2_MEMATTR(NORMAL); + kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : + KVM_S2_MEMATTR(pgt, NORMAL); u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; if (!(prot & KVM_PGTABLE_PROT_X)) @@ -461,44 +539,78 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh); attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF; - data->attr = attr; + *ptep = attr; + return 0; } +static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new) +{ + if (!kvm_pte_valid(old) || !kvm_pte_valid(new)) + return true; + + return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)); +} + +static bool stage2_pte_is_counted(kvm_pte_t pte) +{ + /* + * The refcount tracks valid entries as well as invalid entries if they + * encode ownership of a page to another entity than the page-table + * owner, whose id is 0. + */ + return !!pte; +} + +static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr, + u32 level, struct kvm_pgtable_mm_ops *mm_ops) +{ + /* + * Clear the existing PTE, and perform break-before-make with + * TLB maintenance if it was valid. + */ + if (kvm_pte_valid(*ptep)) { + kvm_clear_pte(ptep); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); + } + + mm_ops->put_page(ptep); +} + static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; - struct page *page = virt_to_page(ptep); + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; if (!kvm_block_mapping_supported(addr, end, phys, level)) return -E2BIG; - new = kvm_init_valid_leaf_pte(phys, data->attr, level); - if (kvm_pte_valid(old)) { + if (kvm_phys_is_valid(phys)) + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + else + new = kvm_init_invalid_leaf_owner(data->owner_id); + + if (stage2_pte_is_counted(old)) { /* * Skip updating the PTE if we are trying to recreate the exact * same mapping or only change the access permissions. Instead, * the vCPU will exit one more time from guest if still needed * and then go through the path of relaxing permissions. */ - if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS))) + if (!stage2_pte_needs_update(old, new)) return -EAGAIN; - /* - * There's an existing different valid leaf entry, so perform - * break-before-make. - */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - put_page(page); + stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); } smp_store_release(ptep, new); - get_page(page); - data->phys += granule; + if (stage2_pte_is_counted(new)) + mm_ops->get_page(ptep); + if (kvm_phys_is_valid(phys)) + data->phys += granule; return 0; } @@ -512,7 +624,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, if (!kvm_block_mapping_supported(addr, end, data->phys, level)) return 0; - kvm_set_invalid_pte(ptep); + data->childp = kvm_pte_follow(*ptep, data->mm_ops); + kvm_clear_pte(ptep); /* * Invalidate the whole stage-2, as we may have numerous leaf @@ -527,13 +640,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { - int ret; + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; kvm_pte_t *childp, pte = *ptep; - struct page *page = virt_to_page(ptep); + int ret; if (data->anchor) { - if (kvm_pte_valid(pte)) - put_page(page); + if (stage2_pte_is_counted(pte)) + mm_ops->put_page(ptep); return 0; } @@ -548,7 +661,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (!data->memcache) return -ENOMEM; - childp = kvm_mmu_memory_cache_alloc(data->memcache); + childp = mm_ops->zalloc_page(data->memcache); if (!childp) return -ENOMEM; @@ -557,14 +670,11 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * a table. Accesses beyond 'end' that fall within the new table * will be mapped lazily. */ - if (kvm_pte_valid(pte)) { - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - put_page(page); - } + if (stage2_pte_is_counted(pte)) + stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); - kvm_set_table_pte(ptep, childp); - get_page(page); + kvm_set_table_pte(ptep, childp, mm_ops); + mm_ops->get_page(ptep); return 0; } @@ -573,19 +683,25 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t *childp; int ret = 0; if (!data->anchor) return 0; - free_page((unsigned long)kvm_pte_follow(*ptep)); - put_page(virt_to_page(ptep)); - if (data->anchor == ptep) { + childp = data->childp; data->anchor = NULL; + data->childp = NULL; ret = stage2_map_walk_leaf(addr, end, level, ptep, data); + } else { + childp = kvm_pte_follow(*ptep, mm_ops); } + mm_ops->put_page(childp); + mm_ops->put_page(ptep); + return ret; } @@ -627,13 +743,14 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, - struct kvm_mmu_memory_cache *mc) + void *mc) { int ret; struct stage2_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), .mmu = pgt->mmu, .memcache = mc, + .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = stage2_map_walker, @@ -643,7 +760,10 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, .arg = &map_data, }; - ret = stage2_map_set_prot_attr(prot, &map_data); + if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys))) + return -EINVAL; + + ret = stage2_set_prot_attr(pgt, prot, &map_data.attr); if (ret) return ret; @@ -652,38 +772,63 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, return ret; } -static void stage2_flush_dcache(void *addr, u64 size) +int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, + void *mc, u8 owner_id) { - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) - return; + int ret; + struct stage2_map_data map_data = { + .phys = KVM_PHYS_INVALID, + .mmu = pgt->mmu, + .memcache = mc, + .mm_ops = pgt->mm_ops, + .owner_id = owner_id, + }; + struct kvm_pgtable_walker walker = { + .cb = stage2_map_walker, + .flags = KVM_PGTABLE_WALK_TABLE_PRE | + KVM_PGTABLE_WALK_LEAF | + KVM_PGTABLE_WALK_TABLE_POST, + .arg = &map_data, + }; + + if (owner_id > KVM_MAX_OWNER_ID) + return -EINVAL; - __flush_dcache_area(addr, size); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + return ret; } -static bool stage2_pte_cacheable(kvm_pte_t pte) +static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) { u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; - return memattr == PAGE_S2_MEMATTR(NORMAL); + return memattr == KVM_S2_MEMATTR(pgt, NORMAL); } static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { - struct kvm_s2_mmu *mmu = arg; + struct kvm_pgtable *pgt = arg; + struct kvm_s2_mmu *mmu = pgt->mmu; + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep, *childp = NULL; bool need_flush = false; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(pte)) { + if (stage2_pte_is_counted(pte)) { + kvm_clear_pte(ptep); + mm_ops->put_page(ptep); + } return 0; + } if (kvm_pte_table(pte, level)) { - childp = kvm_pte_follow(pte); + childp = kvm_pte_follow(pte, mm_ops); - if (page_count(virt_to_page(childp)) != 1) + if (mm_ops->page_count(childp) != 1) return 0; - } else if (stage2_pte_cacheable(pte)) { - need_flush = true; + } else if (stage2_pte_cacheable(pgt, pte)) { + need_flush = !stage2_has_fwb(pgt); } /* @@ -691,17 +836,15 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * block entry and rely on the remaining portions being faulted * back lazily. */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); - put_page(virt_to_page(ptep)); + stage2_put_pte(ptep, mmu, addr, level, mm_ops); if (need_flush) { - stage2_flush_dcache(kvm_pte_follow(pte), + __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); } if (childp) - free_page((unsigned long)childp); + mm_ops->put_page(childp); return 0; } @@ -710,7 +853,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { struct kvm_pgtable_walker walker = { .cb = stage2_unmap_walker, - .arg = pgt->mmu, + .arg = pgt, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; @@ -842,12 +985,14 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { + struct kvm_pgtable *pgt = arg; + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep; - if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte)) + if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) return 0; - stage2_flush_dcache(kvm_pte_follow(pte), kvm_granule_size(level)); + __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); return 0; } @@ -856,30 +1001,35 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) struct kvm_pgtable_walker walker = { .cb = stage2_flush_walker, .flags = KVM_PGTABLE_WALK_LEAF, + .arg = pgt, }; - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (stage2_has_fwb(pgt)) return 0; return kvm_pgtable_walk(pgt, addr, size, &walker); } -int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm) +int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch, + struct kvm_pgtable_mm_ops *mm_ops, + enum kvm_pgtable_stage2_flags flags) { size_t pgd_sz; - u64 vtcr = kvm->arch.vtcr; + u64 vtcr = arch->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; - pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO); + pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz); if (!pgt->pgd) return -ENOMEM; pgt->ia_bits = ia_bits; pgt->start_level = start_level; - pgt->mmu = &kvm->arch.mmu; + pgt->mm_ops = mm_ops; + pgt->mmu = &arch->mmu; + pgt->flags = flags; /* Ensure zeroed PGD pages are visible to the hardware walker */ dsb(ishst); @@ -890,15 +1040,16 @@ static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { + struct kvm_pgtable_mm_ops *mm_ops = arg; kvm_pte_t pte = *ptep; - if (!kvm_pte_valid(pte)) + if (!stage2_pte_is_counted(pte)) return 0; - put_page(virt_to_page(ptep)); + mm_ops->put_page(ptep); if (kvm_pte_table(pte, level)) - free_page((unsigned long)kvm_pte_follow(pte)); + mm_ops->put_page(kvm_pte_follow(pte, mm_ops)); return 0; } @@ -910,10 +1061,85 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) .cb = stage2_free_walker, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, + .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; - free_pages_exact(pgt->pgd, pgd_sz); + pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz); pgt->pgd = NULL; } + +#define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \ + KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \ + KVM_PTE_LEAF_ATTR_S2_IGNORED) + +static int stage2_check_permission_walker(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, + enum kvm_pgtable_walk_flags flag, + void * const arg) +{ + kvm_pte_t old_attr, pte = *ptep, *new_attr = arg; + + /* + * Compatible mappings are either invalid and owned by the page-table + * owner (whose id is 0), or valid with matching permission attributes. + */ + if (kvm_pte_valid(pte)) { + old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK; + if (old_attr != *new_attr) + return -EEXIST; + } else if (pte) { + return -EEXIST; + } + + return 0; +} + +int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_prot prot, + struct kvm_mem_range *range) +{ + kvm_pte_t attr; + struct kvm_pgtable_walker check_perm_walker = { + .cb = stage2_check_permission_walker, + .flags = KVM_PGTABLE_WALK_LEAF, + .arg = &attr, + }; + u64 granule, start, end; + u32 level; + int ret; + + ret = stage2_set_prot_attr(pgt, prot, &attr); + if (ret) + return ret; + attr &= KVM_PTE_LEAF_S2_COMPAT_MASK; + + for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) { + granule = kvm_granule_size(level); + start = ALIGN_DOWN(addr, granule); + end = start + granule; + + if (!kvm_level_supports_block_mapping(level)) + continue; + + if (start < range->start || range->end < end) + continue; + + /* + * Check the presence of existing mappings with incompatible + * permissions within the current block range, and try one level + * deeper if one is found. + */ + ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker); + if (ret != -EEXIST) + break; + } + + if (!ret) { + range->start = start; + range->end = end; + } + + return ret; +} diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c new file mode 100644 index 000000000000..83ca23ac259b --- /dev/null +++ b/arch/arm64/kvm/hyp/reserved_mem.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <linux/memblock.h> +#include <linux/sort.h> + +#include <asm/kvm_host.h> + +#include <nvhe/memory.h> +#include <nvhe/mm.h> + +static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); +static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); + +phys_addr_t hyp_mem_base; +phys_addr_t hyp_mem_size; + +static int cmp_hyp_memblock(const void *p1, const void *p2) +{ + const struct memblock_region *r1 = p1; + const struct memblock_region *r2 = p2; + + return r1->base < r2->base ? -1 : (r1->base > r2->base); +} + +static void __init sort_memblock_regions(void) +{ + sort(hyp_memory, + *hyp_memblock_nr_ptr, + sizeof(struct memblock_region), + cmp_hyp_memblock, + NULL); +} + +static int __init register_memblock_regions(void) +{ + struct memblock_region *reg; + + for_each_mem_region(reg) { + if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS) + return -ENOMEM; + + hyp_memory[*hyp_memblock_nr_ptr] = *reg; + (*hyp_memblock_nr_ptr)++; + } + sort_memblock_regions(); + + return 0; +} + +void __init kvm_hyp_reserve(void) +{ + u64 nr_pages, prev, hyp_mem_pages = 0; + int ret; + + if (!is_hyp_mode_available() || is_kernel_in_hyp_mode()) + return; + + if (kvm_get_mode() != KVM_MODE_PROTECTED) + return; + + ret = register_memblock_regions(); + if (ret) { + *hyp_memblock_nr_ptr = 0; + kvm_err("Failed to register hyp memblocks: %d\n", ret); + return; + } + + hyp_mem_pages += hyp_s1_pgtable_pages(); + hyp_mem_pages += host_s2_mem_pgtable_pages(); + hyp_mem_pages += host_s2_dev_pgtable_pages(); + + /* + * The hyp_vmemmap needs to be backed by pages, but these pages + * themselves need to be present in the vmemmap, so compute the number + * of pages needed by looking for a fixed point. + */ + nr_pages = 0; + do { + prev = nr_pages; + nr_pages = hyp_mem_pages + prev; + nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE); + nr_pages += __hyp_pgtable_max_pages(nr_pages); + } while (nr_pages != prev); + hyp_mem_pages += nr_pages; + + /* + * Try to allocate a PMD-aligned region to reduce TLB pressure once + * this is unmapped from the host stage-2, and fallback to PAGE_SIZE. + */ + hyp_mem_size = hyp_mem_pages << PAGE_SHIFT; + hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), + ALIGN(hyp_mem_size, PMD_SIZE), + PMD_SIZE); + if (!hyp_mem_base) + hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), + hyp_mem_size, PAGE_SIZE); + else + hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE); + + if (!hyp_mem_base) { + kvm_err("Failed to reserve hyp memory\n"); + return; + } + memblock_reserve(hyp_mem_base, hyp_mem_size); + + kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20, + hyp_mem_base); +} diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index ee3682b9873c..39f8f7f9227c 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void) if (has_vhe()) flags = local_daif_save(); + /* + * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates + * that to be able to set ICC_SRE_EL1.SRE to 0, all the + * interrupt overrides must be set. You've got to love this. + */ + sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO); + isb(); write_gicreg(0, ICC_SRE_EL1); isb(); @@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void) write_gicreg(sre, ICC_SRE_EL1); isb(); + sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0); + isb(); if (has_vhe()) local_daif_restore(flags); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index af8e940d0f03..7b8f7db5c1ed 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -27,8 +27,6 @@ #include <asm/processor.h> #include <asm/thread_info.h> -const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; - /* VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); @@ -207,7 +205,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) __deactivate_traps(vcpu); sysreg_restore_host_state_vhe(host_ctxt); - panic(__hyp_panic_string, + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n", spsr, elr, read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR), read_sysreg(hpfar_el2), par, vcpu); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index ead21b98b620..30da78f72b3b 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -9,16 +9,65 @@ #include <kvm/arm_hypercalls.h> #include <kvm/arm_psci.h> +static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) +{ + struct system_time_snapshot systime_snapshot; + u64 cycles = ~0UL; + u32 feature; + + /* + * system time and counter value must captured at the same + * time to keep consistency and precision. + */ + ktime_get_snapshot(&systime_snapshot); + + /* + * This is only valid if the current clocksource is the + * architected counter, as this is the only one the guest + * can see. + */ + if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER) + return; + + /* + * The guest selects one of the two reference counters + * (virtual or physical) with the first argument of the SMCCC + * call. In case the identifier is not supported, error out. + */ + feature = smccc_get_arg1(vcpu); + switch (feature) { + case KVM_PTP_VIRT_COUNTER: + cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); + break; + case KVM_PTP_PHYS_COUNTER: + cycles = systime_snapshot.cycles; + break; + default: + return; + } + + /* + * This relies on the top bit of val[0] never being set for + * valid values of system time, because that is *really* far + * in the future (about 292 years from 1970, and at that stage + * nobody will give a damn about it). + */ + val[0] = upper_32_bits(systime_snapshot.real); + val[1] = lower_32_bits(systime_snapshot.real); + val[2] = upper_32_bits(cycles); + val[3] = lower_32_bits(cycles); +} + int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); - long val = SMCCC_RET_NOT_SUPPORTED; + u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; u32 feature; gpa_t gpa; switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: - val = ARM_SMCCC_VERSION_1_1; + val[0] = ARM_SMCCC_VERSION_1_1; break; case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: feature = smccc_get_arg1(vcpu); @@ -28,10 +77,10 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) case SPECTRE_VULNERABLE: break; case SPECTRE_MITIGATED: - val = SMCCC_RET_SUCCESS; + val[0] = SMCCC_RET_SUCCESS; break; case SPECTRE_UNAFFECTED: - val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; + val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; break; } break; @@ -54,22 +103,35 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) break; fallthrough; case SPECTRE_UNAFFECTED: - val = SMCCC_RET_NOT_REQUIRED; + val[0] = SMCCC_RET_NOT_REQUIRED; break; } break; case ARM_SMCCC_HV_PV_TIME_FEATURES: - val = SMCCC_RET_SUCCESS; + val[0] = SMCCC_RET_SUCCESS; break; } break; case ARM_SMCCC_HV_PV_TIME_FEATURES: - val = kvm_hypercall_pv_features(vcpu); + val[0] = kvm_hypercall_pv_features(vcpu); break; case ARM_SMCCC_HV_PV_TIME_ST: gpa = kvm_init_stolen_time(vcpu); if (gpa != GPA_INVALID) - val = gpa; + val[0] = gpa; + break; + case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: + val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; + val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; + val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2; + val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3; + break; + case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: + val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES); + val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PTP); + break; + case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: + kvm_ptp_get_time(vcpu, val); break; case ARM_SMCCC_TRNG_VERSION: case ARM_SMCCC_TRNG_FEATURES: @@ -81,6 +143,6 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) return kvm_psci_call(vcpu); } - smccc_set_retval(vcpu, val, 0, 0, 0); + smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]); return 1; } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8711894db8c2..c5d1f3c87dbd 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -88,6 +88,44 @@ static bool kvm_is_device_pfn(unsigned long pfn) return !pfn_valid(pfn); } +static void *stage2_memcache_zalloc_page(void *arg) +{ + struct kvm_mmu_memory_cache *mc = arg; + + /* Allocated with __GFP_ZERO, so no need to zero */ + return kvm_mmu_memory_cache_alloc(mc); +} + +static void *kvm_host_zalloc_pages_exact(size_t size) +{ + return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); +} + +static void kvm_host_get_page(void *addr) +{ + get_page(virt_to_page(addr)); +} + +static void kvm_host_put_page(void *addr) +{ + put_page(virt_to_page(addr)); +} + +static int kvm_host_page_count(void *addr) +{ + return page_count(virt_to_page(addr)); +} + +static phys_addr_t kvm_host_pa(void *addr) +{ + return __pa(addr); +} + +static void *kvm_host_va(phys_addr_t phys) +{ + return __va(phys); +} + /* * Unmapping vs dcache management: * @@ -127,7 +165,7 @@ static bool kvm_is_device_pfn(unsigned long pfn) static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, bool may_block) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); phys_addr_t end = start + size; assert_spin_locked(&kvm->mmu_lock); @@ -183,15 +221,39 @@ void free_hyp_pgds(void) if (hyp_pgtable) { kvm_pgtable_hyp_destroy(hyp_pgtable); kfree(hyp_pgtable); + hyp_pgtable = NULL; } mutex_unlock(&kvm_hyp_pgd_mutex); } +static bool kvm_host_owns_hyp_mappings(void) +{ + if (static_branch_likely(&kvm_protected_mode_initialized)) + return false; + + /* + * This can happen at boot time when __create_hyp_mappings() is called + * after the hyp protection has been enabled, but the static key has + * not been flipped yet. + */ + if (!hyp_pgtable && is_protected_kvm_enabled()) + return false; + + WARN_ON(!hyp_pgtable); + + return true; +} + static int __create_hyp_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot) { int err; + if (!kvm_host_owns_hyp_mappings()) { + return kvm_call_hyp_nvhe(__pkvm_create_mappings, + start, size, phys, prot); + } + mutex_lock(&kvm_hyp_pgd_mutex); err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); mutex_unlock(&kvm_hyp_pgd_mutex); @@ -253,6 +315,16 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, unsigned long base; int ret = 0; + if (!kvm_host_owns_hyp_mappings()) { + base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, + phys_addr, size, prot); + if (IS_ERR_OR_NULL((void *)base)) + return PTR_ERR((void *)base); + *haddr = base; + + return 0; + } + mutex_lock(&kvm_hyp_pgd_mutex); /* @@ -351,6 +423,17 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, return 0; } +static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { + .zalloc_page = stage2_memcache_zalloc_page, + .zalloc_pages_exact = kvm_host_zalloc_pages_exact, + .free_pages_exact = free_pages_exact, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .page_count = kvm_host_page_count, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + /** * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure * @kvm: The pointer to the KVM structure @@ -374,7 +457,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) if (!pgt) return -ENOMEM; - err = kvm_pgtable_stage2_init(pgt, kvm); + err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; @@ -387,7 +470,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) for_each_possible_cpu(cpu) *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; - mmu->kvm = kvm; + mmu->arch = &kvm->arch; mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); mmu->vmid.vmid_gen = 0; @@ -421,10 +504,11 @@ static void stage2_unmap_memslot(struct kvm *kvm, * +--------------------------------------------+ */ do { - struct vm_area_struct *vma = find_vma(current->mm, hva); + struct vm_area_struct *vma; hva_t vm_start, vm_end; - if (!vma || vma->vm_start >= reg_end) + vma = find_vma_intersection(current->mm, hva, reg_end); + if (!vma) break; /* @@ -469,7 +553,7 @@ void stage2_unmap_vm(struct kvm *kvm) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); struct kvm_pgtable *pgt = NULL; spin_lock(&kvm->mmu_lock); @@ -538,7 +622,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, */ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); } @@ -555,7 +639,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, * serializing operations for VM memory regions. */ -void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) { struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); @@ -839,13 +923,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk * the page we just got a reference to gets unmapped before we have a * chance to grab the mmu_lock, which ensure that if the page gets - * unmapped afterwards, the call to kvm_unmap_hva will take it away + * unmapped afterwards, the call to kvm_unmap_gfn will take it away * from us again properly. This smp_rmb() interacts with the smp_wmb() * in kvm_mmu_notifier_invalidate_<page|range_end>. + * + * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is + * used to avoid unnecessary overhead introduced to locate the memory + * slot because it's always fixed even @gfn is adjusted for huge pages. */ smp_rmb(); - pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + write_fault, &writable, NULL); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); return 0; @@ -911,7 +1000,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) { kvm_set_pfn_dirty(pfn); - mark_page_dirty(kvm, gfn); + mark_page_dirty_in_slot(kvm, memslot, gfn); } out_unlock: @@ -1064,126 +1153,70 @@ out_unlock: return ret; } -static int handle_hva_to_gpa(struct kvm *kvm, - unsigned long start, - unsigned long end, - int (*handler)(struct kvm *kvm, - gpa_t gpa, u64 size, - void *data), - void *data) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int ret = 0; - - slots = kvm_memslots(kvm); - - /* we only care about the pages that the guest sees */ - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gpa; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - - gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; - ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); - } - - return ret; -} - -static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) -{ - unsigned flags = *(unsigned *)data; - bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; - - __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); - return 0; -} - -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { if (!kvm->arch.mmu.pgt) return 0; - trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; -} - -static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) -{ - kvm_pfn_t *pfn = (kvm_pfn_t *)data; - - WARN_ON(size != PAGE_SIZE); + __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, + (range->end - range->start) << PAGE_SHIFT, + range->may_block); - /* - * The MMU notifiers will have unmapped a huge PMD before calling - * ->change_pte() (which in turn calls kvm_set_spte_hva()) and - * therefore we never need to clear out a huge PMD through this - * calling path and a memcache is not required. - */ - kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE, - __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL); return 0; } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - unsigned long end = hva + PAGE_SIZE; - kvm_pfn_t pfn = pte_pfn(pte); + kvm_pfn_t pfn = pte_pfn(range->pte); if (!kvm->arch.mmu.pgt) return 0; - trace_kvm_set_spte_hva(hva); + WARN_ON(range->end - range->start != 1); /* * We've moved a page around, probably through CoW, so let's treat it * just like a translation fault and clean the cache to the PoC. */ clean_dcache_guest_page(pfn, PAGE_SIZE); - handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn); + + /* + * The MMU notifiers will have unmapped a huge PMD before calling + * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and + * therefore we never need to clear out a huge PMD through this + * calling path and a memcache is not required. + */ + kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, + PAGE_SIZE, __pfn_to_phys(pfn), + KVM_PGTABLE_PROT_R, NULL); + return 0; } -static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - pte_t pte; + u64 size = (range->end - range->start) << PAGE_SHIFT; kvm_pte_t kpte; + pte_t pte; + + if (!kvm->arch.mmu.pgt) + return 0; WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); - kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa); + + kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT); pte = __pte(kpte); return pte_valid(pte) && pte_young(pte); } -static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) -{ - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); - return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa); -} - -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { if (!kvm->arch.mmu.pgt) return 0; - trace_kvm_age_hva(start, end); - return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); -} -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - if (!kvm->arch.mmu.pgt) - return 0; - trace_kvm_test_age_hva(hva); - return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE, - kvm_test_age_hva_handler, NULL); + return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT); } phys_addr_t kvm_mmu_get_httbr(void) @@ -1208,10 +1241,22 @@ static int kvm_map_idmap_text(void) return err; } -int kvm_mmu_init(void) +static void *kvm_hyp_zalloc_page(void *arg) +{ + return (void *)get_zeroed_page(GFP_KERNEL); +} + +static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { + .zalloc_page = kvm_hyp_zalloc_page, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + +int kvm_mmu_init(u32 *hyp_va_bits) { int err; - u32 hyp_va_bits; hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); @@ -1225,8 +1270,8 @@ int kvm_mmu_init(void) */ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); - hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); - kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits); + *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); + kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("HYP VA range: %lx:%lx\n", kern_hyp_va(PAGE_OFFSET), @@ -1251,7 +1296,7 @@ int kvm_mmu_init(void) goto out; } - err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits); + err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); if (err) goto out_free_pgtable; @@ -1329,10 +1374,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * +--------------------------------------------+ */ do { - struct vm_area_struct *vma = find_vma(current->mm, hva); + struct vm_area_struct *vma; hva_t vm_start, vm_end; - if (!vma || vma->vm_start >= reg_end) + vma = find_vma_intersection(current->mm, hva, reg_end); + if (!vma) break; /* diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c index 739164324afe..151c31fb9860 100644 --- a/arch/arm64/kvm/perf.c +++ b/arch/arm64/kvm/perf.c @@ -50,12 +50,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { int kvm_perf_init(void) { - /* - * Check if HW_PERF_EVENTS are supported by checking the number of - * hardware performance counters. This could ensure the presence of - * a physical PMU and CONFIG_PERF_EVENT is selected. - */ - if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0) + if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled()) static_branch_enable(&kvm_arm_pmu_available); return perf_register_guest_info_callbacks(&kvm_guest_cbs); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index e32c6e139a09..fd167d4f4215 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -739,7 +739,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, kvm_pmu_create_perf_event(vcpu, select_idx); } -static int kvm_pmu_probe_pmuver(void) +int kvm_pmu_probe_pmuver(void) { struct perf_event_attr attr = { }; struct perf_event *event; diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index faf32a44ba04..03a6c1f4a09a 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -33,7 +33,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) { struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); - if (!ctx || !kvm_pmu_switch_needed(attr)) + if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr)) return; if (!attr->exclude_host) @@ -49,7 +49,7 @@ void kvm_clr_pmu_events(u32 clr) { struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); - if (!ctx) + if (!kvm_arm_support_pmu_v3() || !ctx) return; ctx->pmu_events.events_host &= ~clr; @@ -172,7 +172,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) struct kvm_host_data *host; u32 events_guest, events_host; - if (!has_vhe()) + if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; preempt_disable(); @@ -193,7 +193,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) struct kvm_host_data *host; u32 events_guest, events_host; - if (!has_vhe()) + if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; host = this_cpu_ptr_hyp_sym(kvm_host_data); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bd354cd45d28..956cdc240148 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -74,10 +74,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) if (!system_supports_sve()) return -EINVAL; - /* Verify that KVM startup enforced this when SVE was detected: */ - if (WARN_ON(!has_vhe())) - return -EINVAL; - vcpu->arch.sve_max_vl = kvm_sve_max_vl; /* @@ -242,6 +238,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset core registers */ memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); + memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); + vcpu->arch.ctxt.spsr_abt = 0; + vcpu->arch.ctxt.spsr_und = 0; + vcpu->arch.ctxt.spsr_irq = 0; + vcpu->arch.ctxt.spsr_fiq = 0; vcpu_gp_regs(vcpu)->pstate = pstate; /* Reset system registers */ @@ -333,19 +334,10 @@ int kvm_set_ipa_limit(void) return 0; } -/* - * Configure the VTCR_EL2 for this VM. The VTCR value is common - * across all the physical CPUs on the system. We use system wide - * sanitised values to fill in different fields, except for Hardware - * Management of Access Flags. HA Flag is set unconditionally on - * all CPUs, as it is safe to run with or without the feature and - * the bit is RES0 on CPUs that don't support it. - */ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) { - u64 vtcr = VTCR_EL2_FLAGS, mmfr0; - u32 parange, phys_shift; - u8 lvls; + u64 mmfr0, mmfr1; + u32 phys_shift; if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) return -EINVAL; @@ -365,33 +357,8 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) } mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; - vtcr |= parange << VTCR_EL2_PS_SHIFT; - - vtcr |= VTCR_EL2_T0SZ(phys_shift); - /* - * Use a minimum 2 level page table to prevent splitting - * host PMD huge pages at stage2. - */ - lvls = stage2_pgtable_levels(phys_shift); - if (lvls < 2) - lvls = 2; - vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); - - /* - * Enable the Hardware Access Flag management, unconditionally - * on all CPUs. The features is RES0 on CPUs without the support - * and must be ignored by the CPUs. - */ - vtcr |= VTCR_EL2_HA; + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); - /* Set the vmid bits */ - vtcr |= (kvm_get_vmid_bits() == 16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; - kvm->arch.vtcr = vtcr; return 0; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4f2f1e3145de..76ea2800c33e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1063,6 +1063,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, val = cpuid_feature_cap_perfmon_field(val, ID_AA64DFR0_PMUVER_SHIFT, kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); + /* Hide SPE from guests */ + val &= ~FEATURE(ID_AA64DFR0_PMSVER); break; case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.4 */ @@ -1472,6 +1474,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_GCR_EL1), undef_access }, { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, + { SYS_DESC(SYS_TRFCR_EL1), undef_access }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, @@ -1501,6 +1504,19 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, + { SYS_DESC(SYS_PMSCR_EL1), undef_access }, + { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access }, + { SYS_DESC(SYS_PMSICR_EL1), undef_access }, + { SYS_DESC(SYS_PMSIRR_EL1), undef_access }, + { SYS_DESC(SYS_PMSFCR_EL1), undef_access }, + { SYS_DESC(SYS_PMSEVFR_EL1), undef_access }, + { SYS_DESC(SYS_PMSLATFR_EL1), undef_access }, + { SYS_DESC(SYS_PMSIDR_EL1), undef_access }, + { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access }, + { SYS_DESC(SYS_PMBPTR_EL1), undef_access }, + { SYS_DESC(SYS_PMBSR_EL1), undef_access }, + /* PMBIDR_EL1 is not trapped */ + { PMU_SYS_REG(SYS_PMINTENSET_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, { PMU_SYS_REG(SYS_PMINTENCLR_EL1), diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index ff0444352bba..33e4e7dd2719 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -135,72 +135,6 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); -TRACE_EVENT(kvm_unmap_hva_range, - TP_PROTO(unsigned long start, unsigned long end), - TP_ARGS(start, end), - - TP_STRUCT__entry( - __field( unsigned long, start ) - __field( unsigned long, end ) - ), - - TP_fast_assign( - __entry->start = start; - __entry->end = end; - ), - - TP_printk("mmu notifier unmap range: %#016lx -- %#016lx", - __entry->start, __entry->end) -); - -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) -); - -TRACE_EVENT(kvm_age_hva, - TP_PROTO(unsigned long start, unsigned long end), - TP_ARGS(start, end), - - TP_STRUCT__entry( - __field( unsigned long, start ) - __field( unsigned long, end ) - ), - - TP_fast_assign( - __entry->start = start; - __entry->end = end; - ), - - TP_printk("mmu notifier age hva: %#016lx -- %#016lx", - __entry->start, __entry->end) -); - -TRACE_EVENT(kvm_test_age_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier test age hva: %#016lx", __entry->hva) -); - TRACE_EVENT(kvm_set_way_flush, TP_PROTO(unsigned long vcpu_pc, bool cache), TP_ARGS(vcpu_pc, cache), diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 978301392d67..acdb7b3cc97d 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -288,3 +288,10 @@ void kvm_get_kimage_voffset(struct alt_instr *alt, { generate_mov_q(kimage_voffset, origptr, updptr, nr_inst); } + +void kvm_compute_final_ctr_el0(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0), + origptr, updptr, nr_inst); +} diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 052917deb149..58cbda00e56d 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -335,13 +335,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) kfree(dist->spis); dist->spis = NULL; dist->nr_spis = 0; + dist->vgic_dist_base = VGIC_ADDR_UNDEF; - if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) { - list_del(&rdreg->list); - kfree(rdreg); - } + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) + vgic_v3_free_redist_region(rdreg); INIT_LIST_HEAD(&dist->rd_regions); + } else { + dist->vgic_cpu_base = VGIC_ADDR_UNDEF; } if (vgic_has_its(kvm)) @@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) vgic_flush_pending_lpis(vcpu); INIT_LIST_HEAD(&vgic_cpu->ap_list_head); + vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; } /* To be called with kvm->lock held */ diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 40cbaca81333..61728c543eb9 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2190,8 +2190,8 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, return offset; } -static int vgic_its_ite_cmp(void *priv, struct list_head *a, - struct list_head *b) +static int vgic_its_ite_cmp(void *priv, const struct list_head *a, + const struct list_head *b) { struct its_ite *itea = container_of(a, struct its_ite, ite_list); struct its_ite *iteb = container_of(b, struct its_ite, ite_list); @@ -2218,10 +2218,10 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device) /* * If an LPI carries the HW bit, this means that this * interrupt is controlled by GICv4, and we do not - * have direct access to that state. Let's simply fail - * the save operation... + * have direct access to that state without GICv4.1. + * Let's simply fail the save operation... */ - if (ite->irq->hw) + if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1) return -EACCES; ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz); @@ -2329,8 +2329,8 @@ static int vgic_its_restore_dte(struct vgic_its *its, u32 id, return offset; } -static int vgic_its_device_cmp(void *priv, struct list_head *a, - struct list_head *b) +static int vgic_its_device_cmp(void *priv, const struct list_head *a, + const struct list_head *b) { struct its_device *deva = container_of(a, struct its_device, dev_list); struct its_device *devb = container_of(b, struct its_device, dev_list); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 44419679f91a..7740995de982 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) r = vgic_v3_set_redist_base(kvm, 0, *addr, 0); goto out; } - rdreg = list_first_entry(&vgic->rd_regions, - struct vgic_redist_region, list); + rdreg = list_first_entry_or_null(&vgic->rd_regions, + struct vgic_redist_region, list); if (!rdreg) addr_ptr = &undef_value; else @@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev, u64 addr; unsigned long type = (unsigned long)attr->attr; + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + r = kvm_vgic_addr(dev->kvm, type, &addr, false); if (r) return (r == -ENODEV) ? -ENXIO : r; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 15a6c98ee92f..a09cdc0b953c 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -86,7 +86,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, } break; case GICD_TYPER2: - if (kvm_vgic_global_state.has_gicv4_1) + if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi()) value = GICD_TYPER2_nASSGIcap; break; case GICD_IIDR: @@ -119,7 +119,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; /* Not a GICv4.1? No HW SGIs */ - if (!kvm_vgic_global_state.has_gicv4_1) + if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi()) val &= ~GICD_CTLR_nASSGIreq; /* Dist stays enabled? nASSGIreq is RO */ @@ -251,30 +251,35 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, vgic_enable_lpis(vcpu); } -static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) +static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu) { - unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; - struct vgic_redist_region *rdreg = vgic_cpu->rdreg; - int target_vcpu_id = vcpu->vcpu_id; - gpa_t last_rdist_typer = rdreg->base + GICR_TYPER + - (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE; - u64 value; + struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg; - value = (u64)(mpidr & GENMASK(23, 0)) << 32; - value |= ((target_vcpu_id & 0xffff) << 8); + if (!rdreg) + return false; - if (addr == last_rdist_typer) - value |= GICR_TYPER_LAST; - if (vgic_has_its(vcpu->kvm)) - value |= GICR_TYPER_PLPIS; + if (vgic_cpu->rdreg_index < rdreg->free_index - 1) { + return false; + } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) { + struct list_head *rd_regions = &vgic->rd_regions; + gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE; - return extract_bytes(value, addr & 7, len); + /* + * the rdist is the last one of the redist region, + * check whether there is no other contiguous rdist region + */ + list_for_each_entry(iter, rd_regions, list) { + if (iter->base == end && iter->free_index > 0) + return false; + } + } + return true; } -static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) +static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) { unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); int target_vcpu_id = vcpu->vcpu_id; @@ -286,7 +291,9 @@ static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu, if (vgic_has_its(vcpu->kvm)) value |= GICR_TYPER_PLPIS; - /* reporting of the Last bit is not supported for userspace */ + if (vgic_mmio_vcpu_rdist_is_last(vcpu)) + value |= GICR_TYPER_LAST; + return extract_bytes(value, addr & 7, len); } @@ -612,7 +619,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER, vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, - vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8, + NULL, vgic_mmio_uaccess_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_WAKER, vgic_mmio_read_raz, vgic_mmio_write_wi, 4, @@ -714,6 +721,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) return -EINVAL; vgic_cpu->rdreg = rdreg; + vgic_cpu->rdreg_index = rdreg->free_index; rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; @@ -768,7 +776,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) } /** - * vgic_v3_insert_redist_region - Insert a new redistributor region + * vgic_v3_alloc_redist_region - Allocate a new redistributor region * * Performs various checks before inserting the rdist region in the list. * Those tests depend on whether the size of the rdist region is known @@ -782,8 +790,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) * * Return 0 on success, < 0 otherwise */ -static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, - gpa_t base, uint32_t count) +static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, + gpa_t base, uint32_t count) { struct vgic_dist *d = &kvm->arch.vgic; struct vgic_redist_region *rdreg; @@ -791,10 +799,6 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, size_t size = count * KVM_VGIC_V3_REDIST_SIZE; int ret; - /* single rdist region already set ?*/ - if (!count && !list_empty(rd_regions)) - return -EINVAL; - /* cross the end of memory ? */ if (base + size < base) return -EINVAL; @@ -805,11 +809,15 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, } else { rdreg = list_last_entry(rd_regions, struct vgic_redist_region, list); - if (index != rdreg->index + 1) + + /* Don't mix single region and discrete redist regions */ + if (!count && rdreg->count) return -EINVAL; - /* Cannot add an explicitly sized regions after legacy region */ - if (!rdreg->count) + if (!count) + return -EEXIST; + + if (index != rdreg->index + 1) return -EINVAL; } @@ -848,11 +856,17 @@ free: return ret; } +void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg) +{ + list_del(&rdreg->list); + kfree(rdreg); +} + int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) { int ret; - ret = vgic_v3_insert_redist_region(kvm, index, addr, count); + ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); if (ret) return ret; @@ -861,8 +875,13 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) * afterwards will register the iodevs when needed. */ ret = vgic_register_all_redist_iodevs(kvm); - if (ret) + if (ret) { + struct vgic_redist_region *rdreg; + + rdreg = vgic_v3_rdist_region_from_index(kvm, index); + vgic_v3_free_redist_region(rdreg); return ret; + } return 0; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index b2d73fc0d1ef..48c6067fc5ec 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -938,10 +938,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, return region; } -static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, +static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, u32 *val) { - struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); const struct vgic_register_region *region; struct kvm_vcpu *r_vcpu; @@ -960,10 +959,9 @@ static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, return 0; } -static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, +static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, const u32 *val) { - struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); const struct vgic_register_region *region; struct kvm_vcpu *r_vcpu; @@ -986,9 +984,9 @@ int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, bool is_write, int offset, u32 *val) { if (is_write) - return vgic_uaccess_write(vcpu, &dev->dev, offset, val); + return vgic_uaccess_write(vcpu, dev, offset, val); else - return vgic_uaccess_read(vcpu, &dev->dev, offset, val); + return vgic_uaccess_read(vcpu, dev, offset, val); } static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 6f530925a231..41ecf219c333 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/irqchip/arm-gic-v3.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <kvm/arm_vgic.h> @@ -356,6 +358,32 @@ retry: return 0; } +/* + * The deactivation of the doorbell interrupt will trigger the + * unmapping of the associated vPE. + */ +static void unmap_all_vpes(struct vgic_dist *dist) +{ + struct irq_desc *desc; + int i; + + for (i = 0; i < dist->its_vm.nr_vpes; i++) { + desc = irq_to_desc(dist->its_vm.vpes[i]->irq); + irq_domain_deactivate_irq(irq_desc_get_irq_data(desc)); + } +} + +static void map_all_vpes(struct vgic_dist *dist) +{ + struct irq_desc *desc; + int i; + + for (i = 0; i < dist->its_vm.nr_vpes; i++) { + desc = irq_to_desc(dist->its_vm.vpes[i]->irq); + irq_domain_activate_irq(irq_desc_get_irq_data(desc), false); + } +} + /** * vgic_v3_save_pending_tables - Save the pending tables into guest RAM * kvm lock and all vcpu lock must be held @@ -365,13 +393,28 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_irq *irq; gpa_t last_ptr = ~(gpa_t)0; - int ret; + bool vlpi_avail = false; + int ret = 0; u8 val; + if (unlikely(!vgic_initialized(kvm))) + return -ENXIO; + + /* + * A preparation for getting any VLPI states. + * The above vgic initialized check also ensures that the allocation + * and enabling of the doorbells have already been done. + */ + if (kvm_vgic_global_state.has_gicv4_1) { + unmap_all_vpes(dist); + vlpi_avail = true; + } + list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { int byte_offset, bit_nr; struct kvm_vcpu *vcpu; gpa_t pendbase, ptr; + bool is_pending; bool stored; vcpu = irq->target_vcpu; @@ -387,24 +430,35 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) if (ptr != last_ptr) { ret = kvm_read_guest_lock(kvm, ptr, &val, 1); if (ret) - return ret; + goto out; last_ptr = ptr; } stored = val & (1U << bit_nr); - if (stored == irq->pending_latch) + + is_pending = irq->pending_latch; + + if (irq->hw && vlpi_avail) + vgic_v4_get_vlpi_state(irq, &is_pending); + + if (stored == is_pending) continue; - if (irq->pending_latch) + if (is_pending) val |= 1 << bit_nr; else val &= ~(1 << bit_nr); ret = kvm_write_guest_lock(kvm, ptr, &val, 1); if (ret) - return ret; + goto out; } - return 0; + +out: + if (vlpi_avail) + map_all_vpes(dist); + + return ret; } /** diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 66508b03094f..c1845d8f5f7e 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -203,6 +203,25 @@ void vgic_v4_configure_vsgis(struct kvm *kvm) kvm_arm_resume_guest(kvm); } +/* + * Must be called with GICv4.1 and the vPE unmapped, which + * indicates the invalidation of any VPT caches associated + * with the vPE, thus we can get the VLPI state by peeking + * at the VPT. + */ +void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) +{ + struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + int mask = BIT(irq->intid % BITS_PER_BYTE); + void *va; + u8 *ptr; + + va = page_address(vpe->vpt_page); + ptr = va + irq->intid / BITS_PER_BYTE; + + *val = !!(*ptr & mask); +} + /** * vgic_v4_init - Initialize the GICv4 data structures * @kvm: Pointer to the VM being initialized @@ -385,6 +404,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, struct vgic_its *its; struct vgic_irq *irq; struct its_vlpi_map map; + unsigned long flags; int ret; if (!vgic_supports_direct_msis(kvm)) @@ -430,6 +450,24 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, irq->host_irq = virq; atomic_inc(&map.vpe->vlpi_count); + /* Transfer pending state */ + raw_spin_lock_irqsave(&irq->irq_lock, flags); + if (irq->pending_latch) { + ret = irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); + + /* + * Clear pending_latch and communicate this state + * change via vgic_queue_irq_unlock. + */ + irq->pending_latch = false; + vgic_queue_irq_unlock(kvm, irq, flags); + } else { + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + } + out: mutex_unlock(&its->its_lock); return ret; diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 1c597c9885fa..15b666200f0b 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -255,7 +255,8 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) * Return negative if "a" sorts before "b", 0 to preserve order, and positive * to sort "b" before "a". */ -static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) +static int vgic_irq_cmp(void *priv, const struct list_head *a, + const struct list_head *b) { struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 64fcd7511110..dc1f3d1657ee 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -293,6 +293,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg) struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, u32 index); +void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg); bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size); @@ -317,5 +318,6 @@ bool vgic_supports_direct_msis(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); +void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); #endif diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index 073acbf02a7c..b84b179edba3 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -14,7 +14,7 @@ * Parameters: * x0 - dest */ -SYM_FUNC_START(clear_page) +SYM_FUNC_START_PI(clear_page) mrs x1, dczid_el0 and w1, w1, #0xf mov x2, #4 @@ -25,5 +25,5 @@ SYM_FUNC_START(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret -SYM_FUNC_END(clear_page) +SYM_FUNC_END_PI(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index e7a793961408..29144f4cd449 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -17,7 +17,7 @@ * x0 - dest * x1 - src */ -SYM_FUNC_START(copy_page) +SYM_FUNC_START_PI(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] @@ -75,5 +75,5 @@ alternative_else_nop_endif stnp x16, x17, [x0, #112 - 256] ret -SYM_FUNC_END(copy_page) +SYM_FUNC_END_PI(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 93e87b287556..4bf1dd3eb041 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -53,7 +53,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, iommu_setup_dma_ops(dev, dma_base, size); #ifdef CONFIG_XEN - if (xen_initial_domain()) + if (xen_swiotlb_detect()) dev->dma_ops = &xen_swiotlb_dma_ops; #endif } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f37d4e3830b7..871c82ab0a30 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -527,7 +527,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault; - unsigned long vm_flags = VM_ACCESS_FLAGS; + unsigned long vm_flags; unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned long addr = untagged_addr(far); @@ -544,12 +544,28 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr, if (user_mode(regs)) mm_flags |= FAULT_FLAG_USER; + /* + * vm_flags tells us what bits we must have in vma->vm_flags + * for the fault to be benign, __do_page_fault() would check + * vma->vm_flags & vm_flags and returns an error if the + * intersection is empty + */ if (is_el0_instruction_abort(esr)) { + /* It was exec fault */ vm_flags = VM_EXEC; mm_flags |= FAULT_FLAG_INSTRUCTION; } else if (is_write_abort(esr)) { + /* It was write fault */ vm_flags = VM_WRITE; mm_flags |= FAULT_FLAG_WRITE; + } else { + /* It was read fault */ + vm_flags = VM_READ; + /* Write implies read */ + vm_flags |= VM_WRITE; + /* If EPAN is absent then exec implies read */ + if (!cpus_have_const_cap(ARM64_HAS_EPAN)) + vm_flags |= VM_EXEC; } if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..58987a98e179 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -284,9 +284,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, */ ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 3685e12aba9b..16a2b2b1c54d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -35,6 +35,7 @@ #include <asm/fixmap.h> #include <asm/kasan.h> #include <asm/kernel-pgtable.h> +#include <asm/kvm_host.h> #include <asm/memory.h> #include <asm/numa.h> #include <asm/sections.h> @@ -220,6 +221,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) int pfn_valid(unsigned long pfn) { phys_addr_t addr = PFN_PHYS(pfn); + struct mem_section *ms; /* * Ensure the upper PAGE_SHIFT bits are clear in the @@ -230,10 +232,6 @@ int pfn_valid(unsigned long pfn) if (PHYS_PFN(addr) != pfn) return 0; -#ifdef CONFIG_SPARSEMEM -{ - struct mem_section *ms; - if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; @@ -252,8 +250,7 @@ int pfn_valid(unsigned long pfn) */ if (!early_section(ms)) return pfn_section_valid(ms, pfn); -} -#endif + return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); @@ -452,6 +449,8 @@ void __init bootmem_init(void) dma_pernuma_cma_reserve(); + kvm_hyp_reserve(); + /* * sparse_init() tries to allocate memory from memblock, so must be * done after the fixed reservations @@ -491,8 +490,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. @@ -521,7 +518,7 @@ void free_initmem(void) * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); + vunmap_range((u64)__init_begin, (u64)__init_end); } void dump_mem_limit(void) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index d8e66c78440e..61b52a92b8b6 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -79,7 +79,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, phys_addr_t pmd_phys = early ? __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); - __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); } return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); @@ -92,7 +92,7 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, phys_addr_t pud_phys = early ? __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); - __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE); + __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE); } return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr); @@ -214,15 +214,18 @@ static void __init kasan_init_shadow(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; + u64 vmalloc_shadow_end; phys_addr_t pa_start, pa_end; u64 i; - kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; - kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); + kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK; + kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); + /* * We are going to perform proper setup of shadow memory. * At first we should unmap early shadow (clear_pgds() call below). @@ -237,16 +240,22 @@ static void __init kasan_init_shadow(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START)))); kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), (void *)mod_shadow_start); - kasan_populate_early_shadow((void *)kimg_shadow_end, - (void *)KASAN_SHADOW_END); - if (kimg_shadow_start > mod_shadow_end) - kasan_populate_early_shadow((void *)mod_shadow_end, - (void *)kimg_shadow_start); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + BUILD_BUG_ON(VMALLOC_START != MODULES_END); + kasan_populate_early_shadow((void *)vmalloc_shadow_end, + (void *)KASAN_SHADOW_END); + } else { + kasan_populate_early_shadow((void *)kimg_shadow_end, + (void *)KASAN_SHADOW_END); + if (kimg_shadow_start > mod_shadow_end) + kasan_populate_early_shadow((void *)mod_shadow_end, + (void *)kimg_shadow_start); + } for_each_mem_range(i, &pa_start, &pa_end) { void *start = (void *)__phys_to_virt(pa_start); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7484ea4f6ba0..6dd9369e3ea0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -39,6 +39,7 @@ #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; @@ -185,10 +186,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, BUG_ON(pmd_sect(pmd)); if (pmd_none(pmd)) { + pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN; phys_addr_t pte_phys; + + if (flags & NO_EXEC_MAPPINGS) + pmdval |= PMD_TABLE_PXN; BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(PAGE_SHIFT); - __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); + __pmd_populate(pmdp, pte_phys, pmdval); pmd = READ_ONCE(*pmdp); } BUG_ON(pmd_bad(pmd)); @@ -259,10 +264,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, */ BUG_ON(pud_sect(pud)); if (pud_none(pud)) { + pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN; phys_addr_t pmd_phys; + + if (flags & NO_EXEC_MAPPINGS) + pudval |= PUD_TABLE_PXN; BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(PMD_SHIFT); - __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, pudval); pud = READ_ONCE(*pudp); } BUG_ON(pud_bad(pud)); @@ -306,10 +315,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, p4d_t p4d = READ_ONCE(*p4dp); if (p4d_none(p4d)) { + p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN; phys_addr_t pud_phys; + + if (flags & NO_EXEC_MAPPINGS) + p4dval |= P4D_TABLE_PXN; BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(PUD_SHIFT); - __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE); + __p4d_populate(p4dp, pud_phys, p4dval); p4d = READ_ONCE(*p4dp); } BUG_ON(p4d_bad(p4d)); @@ -486,14 +499,24 @@ early_param("crashkernel", enable_crash_mem_map); static void __init map_mem(pgd_t *pgdp) { + static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); phys_addr_t kernel_start = __pa_symbol(_stext); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; - int flags = 0; + int flags = NO_EXEC_MAPPINGS; u64 i; + /* + * Setting hierarchical PXNTable attributes on table entries covering + * the linear region is only possible if it is guaranteed that no table + * entries at any level are being shared between the linear region and + * the vmalloc region. Check whether this is true for the PGD level, in + * which case it is guaranteed to be true for all other levels as well. + */ + BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) - flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* * Take care not to create a writable alias for the @@ -1090,7 +1113,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) @@ -1154,7 +1176,6 @@ void vmemmap_free(unsigned long start, unsigned long end, free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); #endif } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ static inline pud_t *fixmap_pud(unsigned long addr) { @@ -1210,11 +1231,11 @@ void __init early_fixmap_init(void) pudp = pud_offset_kimg(p4dp, addr); } else { if (p4d_none(p4d)) - __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); + __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); pudp = fixmap_pud(addr); } if (pud_none(READ_ONCE(*pudp))) - __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); + __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); pmdp = fixmap_pmd(addr); __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); @@ -1316,27 +1337,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} - -int __init arch_ioremap_pud_supported(void) -{ - /* - * Only 4k granule supports level 1 block mappings. - * SW table walks can't handle removal of intermediate entries. - */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && - !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); -} - -int __init arch_ioremap_pmd_supported(void) -{ - /* See arch_ioremap_pud_supported() */ - return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); -} - int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); @@ -1428,11 +1428,6 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) return 1; } -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; /* Don't attempt a block mapping */ -} - #ifdef CONFIG_MEMORY_HOTPLUG static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) { @@ -1448,6 +1443,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) struct range arch_get_mappable_range(void) { struct range mhp_range; + u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); + u64 end_linear_pa = __pa(PAGE_END - 1); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + /* + * Check for a wrap, it is possible because of randomized linear + * mapping the start physical address is actually bigger than + * the end physical address. In this case set start to zero + * because [0, end_linear_pa] range must still be able to cover + * all addressable physical addresses. + */ + if (start_linear_pa > end_linear_pa) + start_linear_pa = 0; + } + + WARN_ON(start_linear_pa > end_linear_pa); /* * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] @@ -1455,15 +1466,16 @@ struct range arch_get_mappable_range(void) * range which can be mapped inside this linear mapping range, must * also be derived from its end points. */ - mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); - mhp_range.end = __pa(PAGE_END - 1); + mhp_range.start = start_linear_pa; + mhp_range.end = end_linear_pa; + return mhp_range; } int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { - int ret, flags = 0; + int ret, flags = NO_EXEC_MAPPINGS; VM_BUG_ON(!mhp_range_allowed(start, size, true)); @@ -1473,7 +1485,7 @@ int arch_add_memory(int nid, u64 start, u64 size, */ if (rodata_full || debug_pagealloc_enabled() || IS_ENABLED(CONFIG_KFENCE)) - flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), size, params->pgprot, __pgd_pgtable_alloc, diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index c967bfd30d2b..0a48191534ff 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -419,14 +419,17 @@ SYM_FUNC_START(__cpu_setup) reset_amuserenr_el0 x1 // Disable AMU access from EL0 /* - * Memory region attributes + * Default values for VMSA control registers. These will be adjusted + * below depending on detected CPU features. */ - mov_q x5, MAIR_EL1_SET -#ifdef CONFIG_ARM64_MTE - mte_tcr .req x20 - - mov mte_tcr, #0 + mair .req x17 + tcr .req x16 + mov_q mair, MAIR_EL1_SET + mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ + TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS +#ifdef CONFIG_ARM64_MTE /* * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported * (ID_AA64PFR1_EL1[11:8] > 1). @@ -438,7 +441,7 @@ SYM_FUNC_START(__cpu_setup) /* Normal Tagged memory type at the corresponding MAIR index */ mov x10, #MAIR_ATTR_NORMAL_TAGGED - bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8 + bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8 /* initialize GCR_EL1: all non-zero tags excluded by default */ mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK) @@ -449,37 +452,26 @@ SYM_FUNC_START(__cpu_setup) msr_s SYS_TFSRE0_EL1, xzr /* set the TCR_EL1 bits */ - mov_q mte_tcr, TCR_KASAN_HW_FLAGS + mov_q x10, TCR_KASAN_HW_FLAGS + orr tcr, tcr, x10 1: #endif - msr mair_el1, x5 - /* - * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further - * adjusted if the kernel is compiled with 52bit VA support. - */ - mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ - TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS -#ifdef CONFIG_ARM64_MTE - orr x10, x10, mte_tcr - .unreq mte_tcr -#endif - tcr_clear_errata_bits x10, x9, x5 + tcr_clear_errata_bits tcr, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x9, vabits_actual sub x9, xzr, x9 add x9, x9, #64 - tcr_set_t1sz x10, x9 + tcr_set_t1sz tcr, x9 #else ldr_l x9, idmap_t0sz #endif - tcr_set_t0sz x10, x9 + tcr_set_t0sz tcr, x9 /* * Set the IPS bits in TCR_EL1. */ - tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 + tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. @@ -489,13 +481,17 @@ SYM_FUNC_START(__cpu_setup) mrs x9, ID_AA64MMFR1_EL1 and x9, x9, #0xf cbz x9, 1f - orr x10, x10, #TCR_HA // hardware Access flag update + orr tcr, tcr, #TCR_HA // hardware Access flag update 1: #endif /* CONFIG_ARM64_HW_AFDBM */ - msr tcr_el1, x10 + msr mair_el1, mair + msr tcr_el1, tcr /* * Prepare SCTLR */ mov_q x0, INIT_SCTLR_EL1_MMU_ON ret // return to head.S + + .unreq mair + .unreq tcr SYM_FUNC_END(__cpu_setup) diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 0e050d76b83a..a1937dfff31c 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -51,10 +51,8 @@ static struct addr_marker address_markers[] = { { FIXADDR_TOP, "Fixmap end" }, { PCI_IO_START, "PCI I/O start" }, { PCI_IO_END, "PCI I/O end" }, -#ifdef CONFIG_SPARSEMEM_VMEMMAP { VMEMMAP_START, "vmemmap start" }, { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, -#endif { -1, NULL }, }; @@ -337,7 +335,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info) ptdump_walk_pgd(&st.ptdump, info->mm, NULL); } -static void ptdump_initialize(void) +static void __init ptdump_initialize(void) { unsigned i, j; @@ -381,7 +379,7 @@ void ptdump_check_wx(void) pr_info("Checked W+X mappings: passed, no W+X pages found\n"); } -static int ptdump_init(void) +static int __init ptdump_init(void) { address_markers[PAGE_END_NR].start_address = PAGE_END; #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index d29d722ec3ec..68bf1a125502 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -16,7 +16,7 @@ static int ptdump_show(struct seq_file *m, void *v) } DEFINE_SHOW_ATTRIBUTE(ptdump); -void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name) { debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 34e91224adc3..8de5b987edb9 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER int "Maximum zone order" default "11" -config RAM_BASE +config DRAM_BASE hex "DRAM start addr (the same with memory-section in dts)" default 0x0 diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c index 9f1fe80cc847..07ff17ea33de 100644 --- a/arch/csky/abiv1/cacheflush.c +++ b/arch/csky/abiv1/cacheflush.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/spinlock.h> #include <asm/page.h> diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index cc24bb8e539f..904a18a818be 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += asm-offsets.h +generic-y += extable.h generic-y += gpio.h generic-y += kvm_para.h generic-y += qrwlock.h diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h index ac08b0ffbe1f..6ff205a97a14 100644 --- a/arch/csky/include/asm/asid.h +++ b/arch/csky/include/asm/asid.h @@ -37,7 +37,7 @@ void asid_new_context(struct asid_info *info, atomic64_t *pasid, * Check the ASID is still valid for the context. If not generate a new ASID. * * @pasid: Pointer to the current ASID batch - * @cpu: current CPU ID. Must have been acquired throught get_cpu() + * @cpu: current CPU ID. Must have been acquired through get_cpu() */ static inline void asid_check_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h index 84fc600c8b45..f4045dd53e17 100644 --- a/arch/csky/include/asm/barrier.h +++ b/arch/csky/include/asm/barrier.h @@ -64,7 +64,7 @@ /* * sync: completion barrier, all sync.xx instructions - * guarantee the last response recieved by bus transaction + * guarantee the last response received by bus transaction * made by ld/st instructions before sync.s * sync.s: inherit from sync, but also shareable to other cores * sync.i: inherit from sync, but also flush cpu pipeline diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 3b91fc3cf36f..ed7451478b1b 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -28,7 +28,7 @@ #define SSEG_SIZE 0x20000000 #define LOWMEM_LIMIT (SSEG_SIZE * 2) -#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) +#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) #ifndef __ASSEMBLY__ diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index 589e8321dc14..5bc1cc62b87f 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -7,11 +7,4 @@ typedef struct { unsigned long seg; } mm_segment_t; -#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) - -#define USER_DS ((mm_segment_t) { PAGE_OFFSET }) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - #endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index 3dec272e1fa3..ac83823fc437 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -3,122 +3,26 @@ #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H -/* - * User space memory access functions - */ -#include <linux/compiler.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/version.h> -#include <asm/segment.h> +#define user_addr_max() \ + (uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg) -static inline int access_ok(const void *addr, unsigned long size) +static inline int __access_ok(unsigned long addr, unsigned long size) { unsigned long limit = current_thread_info()->addr_limit.seg; - return (((unsigned long)addr < limit) && - ((unsigned long)(addr + size) < limit)); + return ((addr < limit) && ((addr + size) < limit)); } - -#define __addr_ok(addr) (access_ok(addr, 0)) - -extern int __put_user_bad(void); +#define __access_ok __access_ok /* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. + * __put_user_fn */ +extern int __put_user_bad(void); -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - * - * This gets kind of ugly. We want to return _two_ values in "get_user()" - * and yet we don't want to do any pointers, because that is too much - * of a performance impact. Thus we have a few rather ugly macros here, - * and hide all the ugliness from the user. - * - * The "__xxx" versions of the user access functions are versions that - * do not verify the address space, that must have been done previously - * with a separate "access_ok()" call (this is used when we do multiple - * accesses to the same area of user memory). - * - * As we use the same address space for kernel and user data on - * Ckcore, we can just do these as direct assignments. (Of course, the - * exception handling means that it's no longer "just"...) - */ - -#define put_user(x, ptr) \ - __put_user_check((x), (ptr), sizeof(*(ptr))) - -#define __put_user(x, ptr) \ - __put_user_nocheck((x), (ptr), sizeof(*(ptr))) - -#define __ptr(x) ((unsigned long *)(x)) - -#define get_user(x, ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr))) - -#define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) - -#define __put_user_nocheck(x, ptr, size) \ -({ \ - long __pu_err = 0; \ - typeof(*(ptr)) *__pu_addr = (ptr); \ - typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ - if (__pu_addr) \ - __put_user_size(__pu_val, (__pu_addr), (size), \ - __pu_err); \ - __pu_err; \ -}) - -#define __put_user_check(x, ptr, size) \ -({ \ - long __pu_err = -EFAULT; \ - typeof(*(ptr)) *__pu_addr = (ptr); \ - typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ - if (access_ok(__pu_addr, size) && __pu_addr) \ - __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ - __pu_err; \ -}) - -#define __put_user_size(x, ptr, size, retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: \ - __put_user_asm_b(x, ptr, retval); \ - break; \ - case 2: \ - __put_user_asm_h(x, ptr, retval); \ - break; \ - case 4: \ - __put_user_asm_w(x, ptr, retval); \ - break; \ - case 8: \ - __put_user_asm_64(x, ptr, retval); \ - break; \ - default: \ - __put_user_bad(); \ - } \ -} while (0) - -/* - * We don't tell gcc that we are accessing memory, but this is OK - * because we do not write to any memory gcc knows about, so there - * are no aliasing issues. - * - * Note that PC at a fault is the address *after* the faulting - * instruction. - */ #define __put_user_asm_b(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: stb %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -136,7 +40,7 @@ do { \ #define __put_user_asm_h(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: sth %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -154,7 +58,7 @@ do { \ #define __put_user_asm_w(x, ptr, err) \ do { \ int errcode; \ - asm volatile( \ + __asm__ __volatile__( \ "1: stw %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ @@ -169,241 +73,149 @@ do { \ : "memory"); \ } while (0) -#define __put_user_asm_64(x, ptr, err) \ -do { \ - int tmp; \ - int errcode; \ - typeof(*(ptr))src = (typeof(*(ptr)))x; \ - typeof(*(ptr))*psrc = &src; \ - \ - asm volatile( \ - " ldw %3, (%1, 0) \n" \ - "1: stw %3, (%2, 0) \n" \ - " ldw %3, (%1, 4) \n" \ - "2: stw %3, (%2, 4) \n" \ - " br 4f \n" \ - "3: mov %0, %4 \n" \ - " br 4f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 1b, 3b \n" \ - ".long 2b, 3b \n" \ - ".previous \n" \ - "4: \n" \ - : "=r"(err), "=r"(psrc), "=r"(ptr), \ - "=r"(tmp), "=r"(errcode) \ - : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \ - : "memory"); \ -} while (0) - -#define __get_user_nocheck(x, ptr, size) \ -({ \ - long __gu_err; \ - __get_user_size(x, (ptr), (size), __gu_err); \ - __gu_err; \ -}) - -#define __get_user_check(x, ptr, size) \ -({ \ - int __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - if (access_ok(__gu_ptr, size) && __gu_ptr) \ - __get_user_size(x, __gu_ptr, size, __gu_err); \ - __gu_err; \ -}) - -#define __get_user_size(x, ptr, size, retval) \ -do { \ - switch (size) { \ - case 1: \ - __get_user_asm_common((x), ptr, "ldb", retval); \ - break; \ - case 2: \ - __get_user_asm_common((x), ptr, "ldh", retval); \ - break; \ - case 4: \ - __get_user_asm_common((x), ptr, "ldw", retval); \ - break; \ - default: \ - x = 0; \ - (retval) = __get_user_bad(); \ - } \ +#define __put_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + \ + __asm__ __volatile__( \ + " ldw %3, (%1, 0) \n" \ + "1: stw %3, (%2, 0) \n" \ + " ldw %3, (%1, 4) \n" \ + "2: stw %3, (%2, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ + : "memory"); \ } while (0) -#define __get_user_asm_common(x, ptr, ins, err) \ -do { \ - int errcode; \ - asm volatile( \ - "1: " ins " %1, (%4,0) \n" \ - " br 3f \n" \ - /* Fix up codes */ \ - "2: mov %0, %2 \n" \ - " movi %1, 0 \n" \ - " br 3f \n" \ - ".section __ex_table,\"a\" \n" \ - ".align 2 \n" \ - ".long 1b, 2b \n" \ - ".previous \n" \ - "3: \n" \ - : "=r"(err), "=r"(x), "=r"(errcode) \ - : "0"(0), "r"(ptr), "2"(-EFAULT) \ - : "memory"); \ -} while (0) +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + int retval = 0; + u32 tmp; + + switch (size) { + case 1: + tmp = *(u8 *)x; + __put_user_asm_b(tmp, ptr, retval); + break; + case 2: + tmp = *(u16 *)x; + __put_user_asm_h(tmp, ptr, retval); + break; + case 4: + tmp = *(u32 *)x; + __put_user_asm_w(tmp, ptr, retval); + break; + case 8: + __put_user_asm_64(x, (u64 *)ptr, retval); + break; + } + + return retval; +} +#define __put_user_fn __put_user_fn +/* + * __get_user_fn + */ extern int __get_user_bad(void); -#define ___copy_to_user(to, from, n) \ +#define __get_user_asm_common(x, ptr, ins, err) \ do { \ - int w0, w1, w2, w3; \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 8f \n" \ - " mov %3, %1 \n" \ - " or %3, %2 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 16 \n" /* 4W */ \ - " bt 3f \n" \ - " ldw %3, (%2, 0) \n" \ - " ldw %4, (%2, 4) \n" \ - " ldw %5, (%2, 8) \n" \ - " ldw %6, (%2, 12) \n" \ - "2: stw %3, (%1, 0) \n" \ - "9: stw %4, (%1, 4) \n" \ - "10: stw %5, (%1, 8) \n" \ - "11: stw %6, (%1, 12) \n" \ - " addi %2, 16 \n" \ - " addi %1, 16 \n" \ - " subi %0, 16 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" /* 1W */ \ - " bt 5f \n" \ - " ldw %3, (%2, 0) \n" \ - "4: stw %3, (%1, 0) \n" \ - " addi %2, 4 \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" /* 1B */ \ - " bf 13f \n" \ - " ldb %3, (%2, 0) \n" \ - "6: stb %3, (%1, 0) \n" \ - " addi %2, 1 \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - "7: subi %0, 4 \n" \ - "8: subi %0, 4 \n" \ - "12: subi %0, 4 \n" \ - " br 13f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 13f \n" \ - ".long 4b, 13f \n" \ - ".long 6b, 13f \n" \ - ".long 9b, 12b \n" \ - ".long 10b, 8b \n" \ - ".long 11b, 7b \n" \ - ".previous \n" \ - "13: \n" \ - : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ - "=r"(w1), "=r"(w2), "=r"(w3) \ - : "0"(n), "1"(to), "2"(from) \ + int errcode; \ + __asm__ __volatile__( \ + "1: " ins " %1, (%4, 0) \n" \ + " br 3f \n" \ + "2: mov %0, %2 \n" \ + " movi %1, 0 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(errcode) \ + : "0"(0), "r"(ptr), "2"(-EFAULT) \ : "memory"); \ } while (0) -#define ___copy_from_user(to, from, n) \ +#define __get_user_asm_64(x, ptr, err) \ do { \ int tmp; \ - int nsave; \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 7f \n" \ - " mov %3, %1 \n" \ - " or %3, %2 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 16 \n" \ - " bt 3f \n" \ - "2: ldw %3, (%2, 0) \n" \ - "10: ldw %4, (%2, 4) \n" \ - " stw %3, (%1, 0) \n" \ - " stw %4, (%1, 4) \n" \ - "11: ldw %3, (%2, 8) \n" \ - "12: ldw %4, (%2, 12) \n" \ - " stw %3, (%1, 8) \n" \ - " stw %4, (%1, 12) \n" \ - " addi %2, 16 \n" \ - " addi %1, 16 \n" \ - " subi %0, 16 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" \ - " bt 5f \n" \ - "4: ldw %3, (%2, 0) \n" \ - " stw %3, (%1, 0) \n" \ - " addi %2, 4 \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" \ - " bf 7f \n" \ - "6: ldb %3, (%2, 0) \n" \ - " stb %3, (%1, 0) \n" \ - " addi %2, 1 \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - "8: stw %3, (%1, 0) \n" \ - " subi %0, 4 \n" \ - " bf 7f \n" \ - "9: subi %0, 8 \n" \ - " bf 7f \n" \ - "13: stw %3, (%1, 8) \n" \ - " subi %0, 12 \n" \ - " bf 7f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 7f \n" \ - ".long 4b, 7f \n" \ - ".long 6b, 7f \n" \ - ".long 10b, 8b \n" \ - ".long 11b, 9b \n" \ - ".long 12b,13b \n" \ - ".previous \n" \ - "7: \n" \ - : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ - "=r"(tmp) \ - : "0"(n), "1"(to), "2"(from) \ + int errcode; \ + \ + __asm__ __volatile__( \ + "1: ldw %3, (%2, 0) \n" \ + " stw %3, (%1, 0) \n" \ + "2: ldw %3, (%2, 4) \n" \ + " stw %3, (%1, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(0), \ + "4"(-EFAULT) \ : "memory"); \ } while (0) +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + int retval; + u32 tmp; + + switch (size) { + case 1: + __get_user_asm_common(tmp, ptr, "ldb", retval); + *(u8 *)x = (u8)tmp; + break; + case 2: + __get_user_asm_common(tmp, ptr, "ldh", retval); + *(u16 *)x = (u16)tmp; + break; + case 4: + __get_user_asm_common(tmp, ptr, "ldw", retval); + *(u32 *)x = (u32)tmp; + break; + case 8: + __get_user_asm_64(x, ptr, retval); + break; + } + + return retval; +} +#define __get_user_fn __get_user_fn + unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); -unsigned long clear_user(void *to, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n); +#define __clear_user __clear_user -long strncpy_from_user(char *dst, const char *src, long count); long __strncpy_from_user(char *dst, const char *src, long count); +#define __strncpy_from_user __strncpy_from_user -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -long strnlen_user(const char *src, long n); - -#define strlen_user(str) strnlen_user(str, 32767) - -struct exception_table_entry { - unsigned long insn; - unsigned long nextinsn; -}; +long __strnlen_user(const char *s, long n); +#define __strnlen_user __strnlen_user -extern int fixup_exception(struct pt_regs *regs); +#include <asm/segment.h> +#include <asm-generic/uaccess.h> #endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h index eb5142f9c564..bdce581b5fcb 100644 --- a/arch/csky/include/asm/vdso.h +++ b/arch/csky/include/asm/vdso.h @@ -16,7 +16,7 @@ struct vdso_data { * offset of 0, but since the linker must support setting weak undefined * symbols to the absolute address 0 it also happens to support other low * addresses even when the code model suggests those low addresses would not - * otherwise be availiable. + * otherwise be available. */ #define VDSO_SYMBOL(base, name) \ ({ \ diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index c1bd7a6b4ab6..00e3c8ebf9b8 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -9,7 +9,6 @@ #include <asm/unistd.h> #include <asm/asm-offsets.h> #include <linux/threads.h> -#include <asm/setup.h> #include <asm/page.h> #include <asm/thread_info.h> diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c index ae2b1c7b3b5c..ef2bb9bd9605 100644 --- a/arch/csky/kernel/probes/ftrace.c +++ b/arch/csky/kernel/probes/ftrace.c @@ -9,7 +9,7 @@ int arch_check_ftrace_location(struct kprobe *p) return 0; } -/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +/* Ftrace callback handler for kprobes -- called under preepmt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c index 3c9bd645e643..c5d394a0ae78 100644 --- a/arch/csky/lib/usercopy.c +++ b/arch/csky/lib/usercopy.c @@ -7,7 +7,70 @@ unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n) { - ___copy_from_user(to, from, n); + int tmp, nsave; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" + " bt 3f \n" + "2: ldw %3, (%2, 0) \n" + "10: ldw %4, (%2, 4) \n" + " stw %3, (%1, 0) \n" + " stw %4, (%1, 4) \n" + "11: ldw %3, (%2, 8) \n" + "12: ldw %4, (%2, 12) \n" + " stw %3, (%1, 8) \n" + " stw %4, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" + " bt 5f \n" + "4: ldw %3, (%2, 0) \n" + " stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" + " bf 7f \n" + "6: ldb %3, (%2, 0) \n" + " stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "8: stw %3, (%1, 0) \n" + " subi %0, 4 \n" + " bf 7f \n" + "9: subi %0, 8 \n" + " bf 7f \n" + "13: stw %3, (%1, 8) \n" + " subi %0, 12 \n" + " bf 7f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 7f \n" + ".long 4b, 7f \n" + ".long 6b, 7f \n" + ".long 10b, 8b \n" + ".long 11b, 9b \n" + ".long 12b,13b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), + "=r"(tmp) + : "0"(n), "1"(to), "2"(from) + : "memory"); + return n; } EXPORT_SYMBOL(raw_copy_from_user); @@ -15,48 +78,70 @@ EXPORT_SYMBOL(raw_copy_from_user); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n) { - ___copy_to_user(to, from, n); + int w0, w1, w2, w3; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 8f \n" + " mov %3, %1 \n" + " or %3, %2 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 16 \n" /* 4W */ + " bt 3f \n" + " ldw %3, (%2, 0) \n" + " ldw %4, (%2, 4) \n" + " ldw %5, (%2, 8) \n" + " ldw %6, (%2, 12) \n" + "2: stw %3, (%1, 0) \n" + "9: stw %4, (%1, 4) \n" + "10: stw %5, (%1, 8) \n" + "11: stw %6, (%1, 12) \n" + " addi %2, 16 \n" + " addi %1, 16 \n" + " subi %0, 16 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + " ldw %3, (%2, 0) \n" + "4: stw %3, (%1, 0) \n" + " addi %2, 4 \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + " bf 13f \n" + " ldb %3, (%2, 0) \n" + "6: stb %3, (%1, 0) \n" + " addi %2, 1 \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + "7: subi %0, 4 \n" + "8: subi %0, 4 \n" + "12: subi %0, 4 \n" + " br 13f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 13f \n" + ".long 4b, 13f \n" + ".long 6b, 13f \n" + ".long 9b, 12b \n" + ".long 10b, 8b \n" + ".long 11b, 7b \n" + ".previous \n" + "13: \n" + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), + "=r"(w1), "=r"(w2), "=r"(w3) + : "0"(n), "1"(to), "2"(from) + : "memory"); + return n; } EXPORT_SYMBOL(raw_copy_to_user); - -/* - * copy a null terminated string from userspace. - */ -#define __do_strncpy_from_user(dst, src, count, res) \ -do { \ - int tmp; \ - long faultres; \ - asm volatile( \ - " cmpnei %3, 0 \n" \ - " bf 4f \n" \ - "1: cmpnei %1, 0 \n" \ - " bf 5f \n" \ - "2: ldb %4, (%3, 0) \n" \ - " stb %4, (%2, 0) \n" \ - " cmpnei %4, 0 \n" \ - " bf 3f \n" \ - " addi %3, 1 \n" \ - " addi %2, 1 \n" \ - " subi %1, 1 \n" \ - " br 1b \n" \ - "3: subu %0, %1 \n" \ - " br 5f \n" \ - "4: mov %0, %5 \n" \ - " br 5f \n" \ - ".section __ex_table, \"a\" \n" \ - ".align 2 \n" \ - ".long 2b, 4b \n" \ - ".previous \n" \ - "5: \n" \ - : "=r"(res), "=r"(count), "=r"(dst), \ - "=r"(src), "=r"(tmp), "=r"(faultres) \ - : "5"(-EFAULT), "0"(count), "1"(count), \ - "2"(dst), "3"(src) \ - : "memory", "cc"); \ -} while (0) - /* * __strncpy_from_user: - Copy a NUL terminated string from userspace, * with less checking. @@ -80,43 +165,43 @@ do { \ */ long __strncpy_from_user(char *dst, const char *src, long count) { - long res; + long res, faultres; + int tmp; - __do_strncpy_from_user(dst, src, count, res); - return res; -} -EXPORT_SYMBOL(__strncpy_from_user); - -/* - * strncpy_from_user: - Copy a NUL terminated string from userspace. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -long strncpy_from_user(char *dst, const char *src, long count) -{ - long res = -EFAULT; + __asm__ __volatile__( + " cmpnei %3, 0 \n" + " bf 4f \n" + "1: cmpnei %1, 0 \n" + " bf 5f \n" + "2: ldb %4, (%3, 0) \n" + " stb %4, (%2, 0) \n" + " cmpnei %4, 0 \n" + " bf 3f \n" + " addi %3, 1 \n" + " addi %2, 1 \n" + " subi %1, 1 \n" + " br 1b \n" + "3: subu %0, %1 \n" + " br 5f \n" + "4: mov %0, %5 \n" + " br 5f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 4b \n" + ".previous \n" + "5: \n" + : "=r"(res), "=r"(count), "=r"(dst), + "=r"(src), "=r"(tmp), "=r"(faultres) + : "5"(-EFAULT), "0"(count), "1"(count), + "2"(dst), "3"(src) + : "memory"); - if (access_ok(src, 1)) - __do_strncpy_from_user(dst, src, count, res); return res; } -EXPORT_SYMBOL(strncpy_from_user); +EXPORT_SYMBOL(__strncpy_from_user); /* - * strlen_user: - Get the size of a string in user space. + * strnlen_user: - Get the size of a string in user space. * @str: The string to measure. * @n: The maximum valid length * @@ -126,14 +211,11 @@ EXPORT_SYMBOL(strncpy_from_user); * On exception, returns 0. * If the string is too long, returns a value greater than @n. */ -long strnlen_user(const char *s, long n) +long __strnlen_user(const char *s, long n) { unsigned long res, tmp; - if (s == NULL) - return 0; - - asm volatile( + __asm__ __volatile__( " cmpnei %1, 0 \n" " bf 3f \n" "1: cmpnei %0, 0 \n" @@ -156,87 +238,11 @@ long strnlen_user(const char *s, long n) "5: \n" : "=r"(n), "=r"(s), "=r"(res), "=r"(tmp) : "0"(n), "1"(s), "2"(n) - : "memory", "cc"); + : "memory"); return res; } -EXPORT_SYMBOL(strnlen_user); - -#define __do_clear_user(addr, size) \ -do { \ - int __d0, zvalue, tmp; \ - \ - asm volatile( \ - "0: cmpnei %1, 0 \n" \ - " bf 7f \n" \ - " mov %3, %1 \n" \ - " andi %3, 3 \n" \ - " cmpnei %3, 0 \n" \ - " bf 1f \n" \ - " br 5f \n" \ - "1: cmplti %0, 32 \n" /* 4W */ \ - " bt 3f \n" \ - "8: stw %2, (%1, 0) \n" \ - "10: stw %2, (%1, 4) \n" \ - "11: stw %2, (%1, 8) \n" \ - "12: stw %2, (%1, 12) \n" \ - "13: stw %2, (%1, 16) \n" \ - "14: stw %2, (%1, 20) \n" \ - "15: stw %2, (%1, 24) \n" \ - "16: stw %2, (%1, 28) \n" \ - " addi %1, 32 \n" \ - " subi %0, 32 \n" \ - " br 1b \n" \ - "3: cmplti %0, 4 \n" /* 1W */ \ - " bt 5f \n" \ - "4: stw %2, (%1, 0) \n" \ - " addi %1, 4 \n" \ - " subi %0, 4 \n" \ - " br 3b \n" \ - "5: cmpnei %0, 0 \n" /* 1B */ \ - "9: bf 7f \n" \ - "6: stb %2, (%1, 0) \n" \ - " addi %1, 1 \n" \ - " subi %0, 1 \n" \ - " br 5b \n" \ - ".section __ex_table,\"a\" \n" \ - ".align 2 \n" \ - ".long 8b, 9b \n" \ - ".long 10b, 9b \n" \ - ".long 11b, 9b \n" \ - ".long 12b, 9b \n" \ - ".long 13b, 9b \n" \ - ".long 14b, 9b \n" \ - ".long 15b, 9b \n" \ - ".long 16b, 9b \n" \ - ".long 4b, 9b \n" \ - ".long 6b, 9b \n" \ - ".previous \n" \ - "7: \n" \ - : "=r"(size), "=r" (__d0), \ - "=r"(zvalue), "=r"(tmp) \ - : "0"(size), "1"(addr), "2"(0) \ - : "memory", "cc"); \ -} while (0) - -/* - * clear_user: - Zero a block of memory in user space. - * @to: Destination address, in user space. - * @n: Number of bytes to zero. - * - * Zero a block of memory in user space. - * - * Returns number of bytes that could not be cleared. - * On success, this will be zero. - */ -unsigned long -clear_user(void __user *to, unsigned long n) -{ - if (access_ok(to, n)) - __do_clear_user(to, n); - return n; -} -EXPORT_SYMBOL(clear_user); +EXPORT_SYMBOL(__strnlen_user); /* * __clear_user: - Zero a block of memory in user space, with less checking. @@ -252,7 +258,59 @@ EXPORT_SYMBOL(clear_user); unsigned long __clear_user(void __user *to, unsigned long n) { - __do_clear_user(to, n); + int data, value, tmp; + + __asm__ __volatile__( + "0: cmpnei %1, 0 \n" + " bf 7f \n" + " mov %3, %1 \n" + " andi %3, 3 \n" + " cmpnei %3, 0 \n" + " bf 1f \n" + " br 5f \n" + "1: cmplti %0, 32 \n" /* 4W */ + " bt 3f \n" + "8: stw %2, (%1, 0) \n" + "10: stw %2, (%1, 4) \n" + "11: stw %2, (%1, 8) \n" + "12: stw %2, (%1, 12) \n" + "13: stw %2, (%1, 16) \n" + "14: stw %2, (%1, 20) \n" + "15: stw %2, (%1, 24) \n" + "16: stw %2, (%1, 28) \n" + " addi %1, 32 \n" + " subi %0, 32 \n" + " br 1b \n" + "3: cmplti %0, 4 \n" /* 1W */ + " bt 5f \n" + "4: stw %2, (%1, 0) \n" + " addi %1, 4 \n" + " subi %0, 4 \n" + " br 3b \n" + "5: cmpnei %0, 0 \n" /* 1B */ + "9: bf 7f \n" + "6: stb %2, (%1, 0) \n" + " addi %1, 1 \n" + " subi %0, 1 \n" + " br 5b \n" + ".section __ex_table,\"a\" \n" + ".align 2 \n" + ".long 8b, 9b \n" + ".long 10b, 9b \n" + ".long 11b, 9b \n" + ".long 12b, 9b \n" + ".long 13b, 9b \n" + ".long 14b, 9b \n" + ".long 15b, 9b \n" + ".long 16b, 9b \n" + ".long 4b, 9b \n" + ".long 6b, 9b \n" + ".previous \n" + "7: \n" + : "=r"(n), "=r" (data), "=r"(value), "=r"(tmp) + : "0"(n), "1"(to), "2"(0) + : "memory"); + return n; } EXPORT_SYMBOL(__clear_user); diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 1482de56f4f7..466ad949818a 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -12,7 +12,7 @@ int fixup_exception(struct pt_regs *regs) fixup = search_exception_tables(instruction_pointer(regs)); if (fixup) { - regs->pc = fixup->nextinsn; + regs->pc = fixup->fixup; return 1; } diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 894050a8ce09..bf2004aa811a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -107,7 +107,6 @@ void __init mem_init(void) free_highmem_page(page); } #endif - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c index ffade2f9a4c8..4e51d63850c4 100644 --- a/arch/csky/mm/syscache.c +++ b/arch/csky/mm/syscache.c @@ -17,6 +17,7 @@ SYSCALL_DEFINE3(cacheflush, flush_icache_mm_range(current->mm, (unsigned long)addr, (unsigned long)addr + bytes); + fallthrough; case DCACHE: dcache_wb_range((unsigned long)addr, (unsigned long)addr + bytes); diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h index 7aa16c732aa9..c867a80cab5b 100644 --- a/arch/h8300/include/asm/bitops.h +++ b/arch/h8300/include/asm/bitops.h @@ -9,6 +9,10 @@ #include <linux/compiler.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> + #ifdef __KERNEL__ #ifndef _LINUX_BITOPS_H @@ -173,8 +177,4 @@ static inline unsigned long __ffs(unsigned long word) #endif /* __KERNEL__ */ -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls64.h> - #endif /* _H8300_BITOPS_H */ diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 1f3b345d68b9..f7bf4693e3b2 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -98,6 +98,4 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - - mem_init_print_info(NULL); } diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile index c168c6980d05..74b644ea8a00 100644 --- a/arch/hexagon/Makefile +++ b/arch/hexagon/Makefile @@ -10,6 +10,9 @@ LDFLAGS_vmlinux += -G0 # Do not use single-byte enums; these will overflow. KBUILD_CFLAGS += -fno-short-enums +# We must use long-calls: +KBUILD_CFLAGS += -mlong-calls + # Modules must use either long-calls, or use pic/plt. # Use long-calls for now, it's easier. And faster. # KBUILD_CFLAGS_MODULE += -fPIC @@ -30,9 +33,6 @@ TIR_NAME := r19 KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME) -LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null) -libs-y += $(LIBGCC) - head-y := arch/hexagon/kernel/head.o core-y += arch/hexagon/kernel/ \ diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index f19ae2ab0aaa..9b2b1cc0794a 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -34,7 +34,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_SERIO is not set # CONFIG_CONSOLE_TRANSLATIONS is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set CONFIG_SPI=y CONFIG_SPI_DEBUG=y @@ -81,4 +80,3 @@ CONFIG_FRAME_WARN=0 CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index 6b9c554aee78..9fb00a0ae89f 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -21,7 +21,7 @@ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: %1 = #%5;\n" \ - " jump 3b\n" \ + " jump ##3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ ".long 1b,4b,2b,4b\n" \ @@ -90,7 +90,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" - " jump 3b\n" + " jump ##3b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".long 1b,4b,2b,4b\n" diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index bda2a9c2df78..c33241425a5c 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -64,7 +64,6 @@ static inline void *phys_to_virt(unsigned long address) * convert a physical pointer to a virtual kernel pointer for * /dev/mem access. */ -#define xlate_dev_kmem_ptr(p) __va(p) #define xlate_dev_mem_ptr(p) __va(p) /* diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h index 78338d8ada83..8d4ec76fceb4 100644 --- a/arch/hexagon/include/asm/timex.h +++ b/arch/hexagon/include/asm/timex.h @@ -8,6 +8,7 @@ #include <asm-generic/timex.h> #include <asm/timer-regs.h> +#include <asm/hexagon_vm.h> /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ #define CLOCK_TICK_RATE TCX0_CLK_RATE @@ -16,7 +17,7 @@ static inline int read_current_timer(unsigned long *timer_val) { - *timer_val = (unsigned long) __vmgettime(); + *timer_val = __vmgettime(); return 0; } diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index 6fb1aaab1c29..35545a7386a0 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -35,8 +35,8 @@ EXPORT_SYMBOL(_dflt_cache_att); DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes); /* Additional functions */ -DECLARE_EXPORT(__divsi3); -DECLARE_EXPORT(__modsi3); -DECLARE_EXPORT(__udivsi3); -DECLARE_EXPORT(__umodsi3); +DECLARE_EXPORT(__hexagon_divsi3); +DECLARE_EXPORT(__hexagon_modsi3); +DECLARE_EXPORT(__hexagon_udivsi3); +DECLARE_EXPORT(__hexagon_umodsi3); DECLARE_EXPORT(csum_tcpudp_magic); diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index a5a89e944257..8975f9b4cedf 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -35,7 +35,7 @@ void user_disable_single_step(struct task_struct *child) static int genregs_get(struct task_struct *target, const struct user_regset *regset, - srtuct membuf to) + struct membuf to) { struct pt_regs *regs = task_pt_regs(target); @@ -54,7 +54,7 @@ static int genregs_get(struct task_struct *target, membuf_store(&to, regs->m0); membuf_store(&to, regs->m1); membuf_store(&to, regs->usr); - membuf_store(&to, regs->p3_0); + membuf_store(&to, regs->preds); membuf_store(&to, regs->gp); membuf_store(&to, regs->ugp); membuf_store(&to, pt_elr(regs)); // pc diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile index 54be529d17a2..a64641e89d5f 100644 --- a/arch/hexagon/lib/Makefile +++ b/arch/hexagon/lib/Makefile @@ -2,4 +2,5 @@ # # Makefile for hexagon-specific library files. # -obj-y = checksum.o io.o memcpy.o memset.o +obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \ + divsi3.o modsi3.o udivsi3.o umodsi3.o diff --git a/arch/hexagon/lib/divsi3.S b/arch/hexagon/lib/divsi3.S new file mode 100644 index 000000000000..783e09424c2c --- /dev/null +++ b/arch/hexagon/lib/divsi3.S @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_divsi3) + { + p0 = cmp.gt(r0,#-1) + p1 = cmp.gt(r1,#-1) + r3:2 = vabsw(r1:0) + } + { + p3 = xor(p0,p1) + r4 = sub(r2,r3) + r6 = cl0(r2) + p0 = cmp.gtu(r3,r2) + } + { + r0 = mux(p3,#-1,#1) + r7 = cl0(r3) + p1 = cmp.gtu(r3,r4) + } + { + r0 = mux(p0,#0,r0) + p0 = or(p0,p1) + if (p0.new) jumpr:nt r31 + r6 = sub(r7,r6) + } + { + r7 = r6 + r5:4 = combine(#1,r3) + r6 = add(#1,lsr(r6,#1)) + p0 = cmp.gtu(r6,#4) + } + { + r5:4 = vaslw(r5:4,r7) + if (!p0) r6 = #3 + } + { + loop0(1f,r6) + r7:6 = vlsrw(r5:4,#1) + r1:0 = #0 + } + .falign +1: + { + r5:4 = vlsrw(r5:4,#2) + if (!p0.new) r0 = add(r0,r5) + if (!p0.new) r2 = sub(r2,r4) + p0 = cmp.gtu(r4,r2) + } + { + r7:6 = vlsrw(r7:6,#2) + if (!p0.new) r0 = add(r0,r7) + if (!p0.new) r2 = sub(r2,r6) + p0 = cmp.gtu(r6,r2) + }:endloop0 + { + if (!p0) r0 = add(r0,r7) + } + { + if (p3) r0 = sub(r1,r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_divsi3) diff --git a/arch/hexagon/lib/memcpy_likely_aligned.S b/arch/hexagon/lib/memcpy_likely_aligned.S new file mode 100644 index 000000000000..6a541fb90a54 --- /dev/null +++ b/arch/hexagon/lib/memcpy_likely_aligned.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) + { + p0 = bitsclr(r1,#7) + p0 = bitsclr(r0,#7) + if (p0.new) r5:4 = memd(r1) + if (p0.new) r7:6 = memd(r1+#8) + } + { + if (!p0) jump:nt .Lmemcpy_call + if (p0) r9:8 = memd(r1+#16) + if (p0) r11:10 = memd(r1+#24) + p0 = cmp.gtu(r2,#64) + } + { + if (p0) jump:nt .Lmemcpy_call + if (!p0) memd(r0) = r5:4 + if (!p0) memd(r0+#8) = r7:6 + p0 = cmp.gtu(r2,#32) + } + { + p1 = cmp.gtu(r2,#40) + p2 = cmp.gtu(r2,#48) + if (p0) r13:12 = memd(r1+#32) + if (p1.new) r15:14 = memd(r1+#40) + } + { + memd(r0+#16) = r9:8 + memd(r0+#24) = r11:10 + } + { + if (p0) memd(r0+#32) = r13:12 + if (p1) memd(r0+#40) = r15:14 + if (!p2) jumpr:t r31 + } + { + p0 = cmp.gtu(r2,#56) + r5:4 = memd(r1+#48) + if (p0.new) r7:6 = memd(r1+#56) + } + { + memd(r0+#48) = r5:4 + if (p0) memd(r0+#56) = r7:6 + jumpr r31 + } + +.Lmemcpy_call: + jump memcpy + +SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) diff --git a/arch/hexagon/lib/modsi3.S b/arch/hexagon/lib/modsi3.S new file mode 100644 index 000000000000..9ea1c86efac2 --- /dev/null +++ b/arch/hexagon/lib/modsi3.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_modsi3) + { + p2 = cmp.ge(r0,#0) + r2 = abs(r0) + r1 = abs(r1) + } + { + r3 = cl0(r2) + r4 = cl0(r1) + p0 = cmp.gtu(r1,r2) + } + { + r3 = sub(r4,r3) + if (p0) jumpr r31 + } + { + p1 = cmp.eq(r3,#0) + loop0(1f,r3) + r0 = r2 + r2 = lsl(r1,r3) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + if (p2) jumpr r31 + } + { + r0 = neg(r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_modsi3) diff --git a/arch/hexagon/lib/udivsi3.S b/arch/hexagon/lib/udivsi3.S new file mode 100644 index 000000000000..477f27b9311c --- /dev/null +++ b/arch/hexagon/lib/udivsi3.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_udivsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + r5:4 = combine(#1,#0) + p0 = cmp.gtu(r1,r0) + } + { + r6 = sub(r3,r2) + r4 = r1 + r1:0 = combine(r0,r4) + if (p0) jumpr r31 + } + { + r3:2 = vlslw(r5:4,r6) + loop0(1f,r6) + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r0 = add(r0,r3) + jumpr r31 + } +SYM_FUNC_END(__hexagon_udivsi3) diff --git a/arch/hexagon/lib/umodsi3.S b/arch/hexagon/lib/umodsi3.S new file mode 100644 index 000000000000..280bf06a55e7 --- /dev/null +++ b/arch/hexagon/lib/umodsi3.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_umodsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + p0 = cmp.gtu(r1,r0) + } + { + r2 = sub(r3,r2) + if (p0) jumpr r31 + } + { + loop0(1f,r2) + p1 = cmp.eq(r2,#0) + r2 = lsl(r1,r2) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + jumpr r31 + } +SYM_FUNC_END(__hexagon_umodsi3) diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index f2e6c868e477..f01e91e10d95 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -55,7 +55,6 @@ void __init mem_init(void) { /* No idea where this is actually declared. Seems to evade LXR. */ memblock_free_all(); - mem_init_print_info(NULL); /* * To-Do: someone somewhere should wipe out the bootmem map diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2ad7a8d29fcc..279252e3e0f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -13,6 +13,8 @@ config IA64 select ARCH_MIGHT_HAVE_PC_SERIO select ACPI select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_SUPPORTS_ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI @@ -32,6 +34,7 @@ config IA64 select TTY select HAVE_ARCH_TRACEHOOK select HAVE_VIRT_CPU_ACCOUNTING + select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -82,11 +85,6 @@ config STACKTRACE_SUPPORT config GENERIC_LOCKBREAK def_bool n -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE - default y - config GENERIC_CALIBRATE_DELAY bool default y @@ -250,12 +248,6 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config SCHED_SMT bool "SMT scheduler support" depends on SMP @@ -286,15 +278,6 @@ config FORCE_CPEI_RETARGET config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_DISCONTIGMEM_ENABLE - def_bool y - depends on BROKEN - help - Say Y to support efficient handling of discontiguous physical memory, - for architectures which are either NUMA (Non-Uniform Memory Access) - or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa.rst> for more. - config ARCH_FLATMEM_ENABLE def_bool y @@ -325,22 +308,8 @@ config NODES_SHIFT MAX_NUMNODES will be 2^(This value). If in doubt, use the default. -# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. -# VIRTUAL_MEM_MAP has been retained for historical reasons. -config VIRTUAL_MEM_MAP - bool "Virtual mem map" - depends on !SPARSEMEM && !FLATMEM - default y - help - Say Y to compile the kernel with support for a virtual mem map. - This code also only takes effect if a memory hole of greater than - 1 Gb is found during boot. You must turn this option on if you - require the DISCONTIGMEM option for your machine. If you are - unsure, say Y. - config HOLES_IN_ZONE bool - default y if VIRTUAL_MEM_MAP config HAVE_ARCH_NODEDATA_EXTENSION def_bool y diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index c409756b5396..0341a67cc1bf 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -9,7 +9,6 @@ CONFIG_SGI_PARTITION=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y -# CONFIG_VIRTUAL_MEM_MAP is not set CONFIG_IA64_PALINFO=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index ca0d596c800d..8916a2850c48 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_QLOGIC_1280=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y CONFIG_SATA_VITESSE=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 3d666a11a2de..6d93b923b379 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -277,7 +277,6 @@ extern void memset_io(volatile void __iomem *s, int c, long n); #define memcpy_fromio memcpy_fromio #define memcpy_toio memcpy_toio #define memset_io memset_io -#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr #include <asm-generic/io.h> #undef PCI_IOBASE diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index e789c0818edb..6c47a239fc26 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -58,15 +58,4 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end); extern int register_active_ranges(u64 start, u64 len, int nid); -#ifdef CONFIG_VIRTUAL_MEM_MAP - extern unsigned long VMALLOC_END; - extern struct page *vmem_map; - extern int create_mem_map_page_table(u64 start, u64 end, void *arg); - extern int vmemmap_find_next_valid_pfn(int, int); -#else -static inline int vmemmap_find_next_valid_pfn(int node, int i) -{ - return i + 1; -} -#endif #endif /* meminit_h */ diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index 5a29652e6def..7271b9c5fc76 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h @@ -14,16 +14,20 @@ struct elf64_shdr; /* forward declration */ struct mod_arch_specific { + /* Used only at module load time. */ struct elf64_shdr *core_plt; /* core PLT section */ struct elf64_shdr *init_plt; /* init PLT section */ struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *unwind; /* unwind-table section */ unsigned long gp; /* global-pointer for module */ + unsigned int next_got_entry; /* index of next available got entry */ + /* Used at module run and cleanup time. */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *init_unw_table; /* init unwind-table cookie returned by unwinder */ - unsigned int next_got_entry; /* index of next available got entry */ + void *opd_addr; /* symbolize uses .opd to get to actual function */ + unsigned long opd_size; }; #define ARCH_SHF_SMALL SHF_IA_64_SHORT diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index b69a5499d75b..f4dc81fa7146 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -95,31 +95,10 @@ do { \ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern int ia64_pfn_valid (unsigned long pfn); -#else -# define ia64_pfn_valid(pfn) 1 -#endif - -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern struct page *vmem_map; -#ifdef CONFIG_DISCONTIGMEM -# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) -# define pfn_to_page(pfn) (vmem_map + (pfn)) -# define __pfn_to_phys(pfn) PFN_PHYS(pfn) -#else -# include <asm-generic/memory_model.h> -#endif -#else -# include <asm-generic/memory_model.h> -#endif +#include <asm-generic/memory_model.h> #ifdef CONFIG_FLATMEM -# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -#elif defined(CONFIG_DISCONTIGMEM) -extern unsigned long min_low_pfn; -extern unsigned long max_low_pfn; -# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) +# define pfn_valid(pfn) ((pfn) < max_mapnr) #endif #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9b4efe89e62d..d765fd948fae 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -223,10 +223,6 @@ ia64_phys_addr_valid (unsigned long addr) #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) -extern unsigned long VMALLOC_END; -#else #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) @@ -234,7 +230,6 @@ extern unsigned long VMALLOC_END; #else # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) #endif -#endif /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) @@ -328,7 +323,7 @@ extern void __ia64_sync_icache_dcache(pte_t pteval); static inline void set_pte(pte_t *ptep, pte_t pteval) { /* page is present && page is user && page is executable - * && (page swapin or new page or page migraton + * && (page swapin or new page or page migration * || copy_on_write with page copying.) */ if (pte_present_exec_user(pteval) && diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index b3aa46090101..08179135905c 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -54,8 +54,7 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) { - /* FIXME: should this be bspstore + nr_dirty regs? */ - return regs->ar_bspstore; + return regs->r12; } static inline int is_syscall_success(struct pt_regs *regs) @@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs) unsigned long __ip = instruction_pointer(regs); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \ }) -/* - * Why not default? Because user_stack_pointer() on ia64 gives register - * stack backing store instead... - */ -#define current_user_stack_pointer() (current_pt_regs()->r12) /* given a pointer to a task_struct, return the user's pt_regs */ # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 179243c3dfc7..e19d2dcc0ced 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -272,22 +272,4 @@ xlate_dev_mem_ptr(phys_addr_t p) return ptr; } -/* - * Convert a virtual cached kernel memory pointer to an uncached pointer - */ -static __inline__ void * -xlate_dev_kmem_ptr(void *p) -{ - struct page *page; - void *ptr; - - page = virt_to_page((unsigned long)p); - if (PageUncached(page)) - ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; - else - ptr = p; - - return ptr; -} - #endif /* _ASM_IA64_UACCESS_H */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 78717819131c..08d4a2ba0652 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -9,7 +9,7 @@ endif extra-y := head.o vmlinux.lds -obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ +obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a5636524af76..e2af6b172200 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void) if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); - return; + slit_distance(0, 0) = LOCAL_DISTANCE; + goto out; } /* @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void) for (j = 0; j < MAX_NUMNODES; j++) slit_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return; + goto out; } memset(numa_slit, -1, sizeof(numa_slit)); @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void) printk("\n"); } #endif +out: + node_possible_map = node_online_map; } #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c5fe21de46a8..31149e41f9be 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -415,10 +415,10 @@ efi_get_pal_addr (void) mask = ~((1 << IA64_GRANULE_SHIFT) - 1); printk(KERN_INFO "CPU %d: mapping PAL code " - "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", - smp_processor_id(), md->phys_addr, - md->phys_addr + efi_md_size(md), - vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); + "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n", + smp_processor_id(), md->phys_addr, + md->phys_addr + efi_md_size(md), + vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif return __va(md->phys_addr); } @@ -560,6 +560,7 @@ efi_init (void) { efi_memory_desc_t *md; void *p; + unsigned int i; for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) @@ -586,7 +587,7 @@ efi_init (void) } printk("mem%02d: %s " - "range=[0x%016lx-0x%016lx) (%4lu%s)\n", + "range=[0x%016llx-0x%016llx) (%4lu%s)\n", i, efi_md_typeattr_format(buf, sizeof(buf), md), md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e98e3dafffd8..5eba3fb2e311 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1420,10 +1420,9 @@ END(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ -#define __SYSCALL(nr, entry, nargs) data8 entry +#define __SYSCALL(nr, entry) data8 entry .rodata .align 8 .globl sys_call_table sys_call_table: #include <asm/syscall_table.h> -#undef __SYSCALL diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 8b5b8e6bc9d9..dd5bfed52031 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 cpu=dev->id; \ - return sprintf(buf, "%lx\n", name[cpu]); \ + return sprintf(buf, "%llx\n", name[cpu]); \ } #define store(name) \ @@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr, #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); - printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); - printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); - printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", + printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); + printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); + printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); @@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr, #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); - printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]); - printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); + printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]); + printk(KERN_DEBUG "resources=%llx\n", resources[cpu]); #endif return size; } @@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; - return sprintf(buf, "%lx\n", phys_addr[cpu]); + return sprintf(buf, "%llx\n", phys_addr[cpu]); } static ssize_t @@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG - printk("Virtual address %lx is not existing.\n",virt_addr); + printk("Virtual address %llx is not existing.\n", virt_addr); #endif return -EINVAL; } @@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev, { unsigned int cpu=dev->id; - return sprintf(buf, "%lx, %lx, %lx\n", + return sprintf(buf, "%llx, %llx, %llx\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); @@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev, int ret; #ifdef ERR_INJ_DEBUG - printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", + printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3, cpu); #endif - ret=sscanf(buf, "%lx, %lx, %lx", + ret = sscanf(buf, "%llx, %llx, %llx", &err_data_buffer[cpu].data1, &err_data_buffer[cpu].data2, &err_data_buffer[cpu].data3); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0750a716adc7..2094f3249019 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -172,7 +172,7 @@ ENTRY(fsys_gettimeofday) // r25 = itc_lastcycle value // r26 = address clocksource cycle_last // r27 = (not used) - // r28 = sequence number at the beginning of critcal section + // r28 = sequence number at the beginning of critical section // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result @@ -432,7 +432,7 @@ GLOBAL_ENTRY(fsys_bubble_down) * - r29: psr * * We used to clear some PSR bits here but that requires slow - * serialization. Fortuntely, that isn't really necessary. + * serialization. Fortunately, that isn't really necessary. * The rationale is as follows: we used to clear bits * ~PSR_PRESERVED_BITS in PSR.L. Since * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 30f1ef760136..f22469f1c1fc 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -33,7 +33,6 @@ #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> -#include <linux/pgtable.h> #include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU @@ -405,11 +404,6 @@ start_ap: // This is executed by the bootstrap processor (bsp) only: -#ifdef CONFIG_IA64_FW_EMU - // initialize PAL & SAL emulator: - br.call.sptk.many rp=sys_fw_init -.ret1: -#endif br.call.sptk.many rp=start_kernel .ret2: addl r3=@ltoff(halt_msg),gp ;; diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c deleted file mode 100644 index f8150ee74f29..000000000000 --- a/arch/ia64/kernel/ia64_ksyms.c +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Architecture-specific kernel symbols - */ - -#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM) -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/memblock.h> -EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -#endif diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index af310dc8a356..4db9ca144fa5 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -143,7 +143,7 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { -#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM) +#if defined(CONFIG_SPARSEMEM) VMCOREINFO_SYMBOL(pgdat_list); VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); #endif diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d4cae2fc69ca..cdbac4b52f30 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -109,9 +109,9 @@ #include "irq.h" #if defined(IA64_MCA_DEBUG_INFO) -# define IA64_MCA_DEBUG(fmt...) printk(fmt) +# define IA64_MCA_DEBUG(fmt...) printk(fmt) #else -# define IA64_MCA_DEBUG(fmt...) +# define IA64_MCA_DEBUG(fmt...) do {} while (0) #endif #define NOTIFY_INIT(event, regs, arg, spin) \ @@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = (void *)__get_free_pages(GFP_KERNEL, + data = (void *)__get_free_pages(GFP_ATOMIC, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 00a496cb346f..2cba53c1da82 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod) int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + struct mod_arch_specific *mas = &mod->arch; + DEBUGP("%s: init: entry=%p\n", __func__, mod->init); - if (mod->arch.unwind) + if (mas->unwind) register_unwind_table(mod); + + /* + * ".opd" was already relocated to the final destination. Store + * it's address for use in symbolizer. + */ + mas->opd_addr = (void *)mas->opd->sh_addr; + mas->opd_size = mas->opd->sh_size; + + /* + * Module relocation was already done at this point. Section + * headers are about to be deleted. Wipe out load-time context. + */ + mas->core_plt = NULL; + mas->init_plt = NULL; + mas->got = NULL; + mas->opd = NULL; + mas->unwind = NULL; + mas->gp = 0; + mas->next_got_entry = 0; + return 0; } @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod) void *dereference_module_function_descriptor(struct module *mod, void *ptr) { - Elf64_Shdr *opd = mod->arch.opd; + struct mod_arch_specific *mas = &mod->arch; - if (ptr < (void *)opd->sh_addr || - ptr >= (void *)(opd->sh_addr + opd->sh_size)) + if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size) return ptr; return dereference_function_descriptor(ptr); diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index d3e22c018b68..06d01a070aae 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S @@ -86,7 +86,7 @@ GLOBAL_ENTRY(ia64_pal_call_static) mov ar.pfs = loc1 mov rp = loc0 ;; - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) EXPORT_SYMBOL(ia64_pal_call_static) @@ -194,7 +194,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) EXPORT_SYMBOL(ia64_pal_call_phys_static) @@ -252,7 +252,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) EXPORT_SYMBOL(ia64_pal_call_phys_stacked) diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile index bf4bda0f63eb..14f40ecf8b65 100644 --- a/arch/ia64/kernel/syscalls/Makefile +++ b/arch/ia64/kernel/syscalls/Makefile @@ -6,26 +6,18 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --offset __NR_Linux $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ -syshdr_offset_unistd_64 := __NR_Linux $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_offset_syscall_table := 1024 $(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index d89231166e19..1ee8e736a48e 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -363,3 +363,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/ia64/kernel/syscalls/syscallhdr.sh b/arch/ia64/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index f407b6e53283..000000000000 --- a/arch/ia64/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_IA64_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/ia64/kernel/syscalls/syscalltbl.sh b/arch/ia64/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad..000000000000 --- a/arch/ia64/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index 99a35039b548..c03f63c62ac4 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_SPARSEMEM) += discontig.o obj-$(CONFIG_FLATMEM) += contig.o diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 62fe80a16f42..42e025cfbd08 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -153,11 +153,7 @@ find_memory (void) efi_memmap_walk(find_max_min_low_pfn, NULL); max_pfn = max_low_pfn; -#ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(filter_memory, register_active_ranges); -#else memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); -#endif find_initrd(); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 03b3a02375ff..791d4176e4a6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ -static int __meminit early_nr_cpus_node(int node) +static int early_nr_cpus_node(int node) { int cpu, n = 0; @@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node) * compute_pernodesize - compute size of pernode data * @node: the node id. */ -static unsigned long __meminit compute_pernodesize(int node) +static unsigned long compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; @@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void) } } -static void __meminit scatter_node_data(void) +static void scatter_node_data(void) { pg_data_t **dst; int node; @@ -585,25 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) } } -static void __init virtual_map_init(void) -{ -#ifdef CONFIG_VIRTUAL_MEM_MAP - int node; - - VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * - sizeof(struct page)); - vmem_map = (struct page *) VMALLOC_END; - efi_memmap_walk(create_mem_map_page_table, NULL); - printk("Virtual mem_map starts at 0x%p\n", vmem_map); - - for_each_online_node(node) { - unsigned long pfn_offset = mem_data[node].min_pfn; - - NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; - } -#endif -} - /** * paging_init - setup page tables * @@ -619,8 +600,6 @@ void __init paging_init(void) sparse_init(); - virtual_map_init(); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA32] = max_dma; max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index cd9766d2b6e0..02de2e70c587 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -84,18 +84,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (faulthandler_disabled() || !mm) goto no_context; -#ifdef CONFIG_VIRTUAL_MEM_MAP - /* - * If fault is in region 5 and we are in the kernel, we may already - * have the mmap_lock (pfn_valid macro is called during mmap). There - * is no vma for region 5 addr's anyway, so skip getting the semaphore - * and go directly to the exception handling code. - */ - - if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) - goto bad_area_no_up; -#endif - /* * This is to handle the kprobes on user space access instructions */ @@ -213,9 +201,6 @@ retry: bad_area: mmap_read_unlock(mm); -#ifdef CONFIG_VIRTUAL_MEM_MAP - bad_area_no_up: -#endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 16d0d7d22657..064a967a7b6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -43,13 +43,6 @@ extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; -#ifdef CONFIG_VIRTUAL_MEM_MAP -unsigned long VMALLOC_END = VMALLOC_END_INIT; -EXPORT_SYMBOL(VMALLOC_END); -struct page *vmem_map; -EXPORT_SYMBOL(vmem_map); -#endif - struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); @@ -373,212 +366,6 @@ void ia64_mmu_init(void *my_cpu_data) #endif } -#ifdef CONFIG_VIRTUAL_MEM_MAP -int vmemmap_find_next_valid_pfn(int node, int i) -{ - unsigned long end_address, hole_next_pfn; - unsigned long stop_address; - pg_data_t *pgdat = NODE_DATA(node); - - end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; - end_address = PAGE_ALIGN(end_address); - stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; - - do { - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset_k(end_address); - if (pgd_none(*pgd)) { - end_address += PGDIR_SIZE; - continue; - } - - p4d = p4d_offset(pgd, end_address); - if (p4d_none(*p4d)) { - end_address += P4D_SIZE; - continue; - } - - pud = pud_offset(p4d, end_address); - if (pud_none(*pud)) { - end_address += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, end_address); - if (pmd_none(*pmd)) { - end_address += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, end_address); -retry_pte: - if (pte_none(*pte)) { - end_address += PAGE_SIZE; - pte++; - if ((end_address < stop_address) && - (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) - goto retry_pte; - continue; - } - /* Found next valid vmem_map page */ - break; - } while (end_address < stop_address); - - end_address = min(end_address, stop_address); - end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; - hole_next_pfn = end_address / sizeof(struct page); - return hole_next_pfn - pgdat->node_start_pfn; -} - -int __init create_mem_map_page_table(u64 start, u64 end, void *arg) -{ - unsigned long address, start_page, end_page; - struct page *map_start, *map_end; - int node; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - start_page = (unsigned long) map_start & PAGE_MASK; - end_page = PAGE_ALIGN((unsigned long) map_end); - node = paddr_to_nid(__pa(start)); - - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) { - p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!p4d) - goto err_alloc; - pgd_populate(&init_mm, pgd, p4d); - } - p4d = p4d_offset(pgd, address); - - if (p4d_none(*p4d)) { - pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pud) - goto err_alloc; - p4d_populate(&init_mm, p4d, pud); - } - pud = pud_offset(p4d, address); - - if (pud_none(*pud)) { - pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pmd) - goto err_alloc; - pud_populate(&init_mm, pud, pmd); - } - pmd = pmd_offset(pud, address); - - if (pmd_none(*pmd)) { - pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pte) - goto err_alloc; - pmd_populate_kernel(&init_mm, pmd, pte); - } - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) { - void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, - node); - if (!page) - goto err_alloc; - set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, - PAGE_KERNEL)); - } - } - return 0; - -err_alloc: - panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", - __func__, PAGE_SIZE, PAGE_SIZE, node); - return -ENOMEM; -} - -struct memmap_init_callback_data { - struct page *start; - struct page *end; - int nid; - unsigned long zone; -}; - -static int __meminit -virtual_memmap_init(u64 start, u64 end, void *arg) -{ - struct memmap_init_callback_data *args; - struct page *map_start, *map_end; - - args = (struct memmap_init_callback_data *) arg; - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - if (map_start < args->start) - map_start = args->start; - if (map_end > args->end) - map_end = args->end; - - /* - * We have to initialize "out of bounds" struct page elements that fit completely - * on the same pages that were allocated for the "in bounds" elements because they - * may be referenced later (and found to be "reserved"). - */ - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); - map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_range((unsigned long)(map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - return 0; -} - -void __meminit memmap_init_zone(struct zone *zone) -{ - int nid = zone_to_nid(zone), zone_id = zone_idx(zone); - unsigned long start_pfn = zone->zone_start_pfn; - unsigned long size = zone->spanned_pages; - - if (!vmem_map) { - memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size, - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - } else { - struct page *start; - struct memmap_init_callback_data args; - - start = pfn_to_page(start_pfn); - args.start = start; - args.end = start + size; - args.nid = nid; - args.zone = zone_id; - - efi_memmap_walk(virtual_memmap_init, &args); - } -} - -int -ia64_pfn_valid (unsigned long pfn) -{ - char byte; - struct page *pg = pfn_to_page(pfn); - - return (__get_user(byte, (char __user *) pg) == 0) - && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) - || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); -} -EXPORT_SYMBOL(ia64_pfn_valid); - -#endif /* CONFIG_VIRTUAL_MEM_MAP */ - int __init register_active_ranges(u64 start, u64 len, int nid) { u64 end = start + len; @@ -644,13 +431,16 @@ mem_init (void) * _before_ any drivers that may need the PCI DMA interface are * initialized or bootmem has been freed. */ + do { #ifdef CONFIG_INTEL_IOMMU - detect_intel_iommu(); - if (!iommu_detected) + detect_intel_iommu(); + if (iommu_detected) + break; #endif #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif + } while (0); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); @@ -659,7 +449,6 @@ mem_init (void) set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index ea14f2046fb4..82620f14124d 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -16,7 +16,7 @@ KBUILD_DEFCONFIG := multi_defconfig -ifneq ($(SUBARCH),$(ARCH)) +ifdef cross_compiling ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := $(call cc-cross-prefix, \ m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-) diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c index 1068670cb741..7e44d0e9d0f8 100644 --- a/arch/m68k/atari/time.c +++ b/arch/m68k/atari/time.c @@ -317,10 +317,3 @@ int atari_tt_hwclk( int op, struct rtc_time *t ) return( 0 ); } - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c index 15c4b7a6e38f..f7c2c41b3156 100644 --- a/arch/m68k/coldfire/intc-simr.c +++ b/arch/m68k/coldfire/intc-simr.c @@ -68,9 +68,9 @@ static void intc_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_SIMR && (irq > 128)) + if (MCFINTC2_SIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_SIMR); - else if (MCFINTC1_SIMR && (irq > 64)) + else if (MCFINTC1_SIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_SIMR); else __raw_writeb(irq, MCFINTC0_SIMR); @@ -80,9 +80,9 @@ static void intc_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_CIMR && (irq > 128)) + if (MCFINTC2_CIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_CIMR); - else if (MCFINTC1_CIMR && (irq > 64)) + else if (MCFINTC1_CIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_CIMR); else __raw_writeb(irq, MCFINTC0_CIMR); @@ -115,9 +115,9 @@ static unsigned int intc_irq_startup(struct irq_data *d) } irq -= MCFINT_VECBASE; - if (MCFINTC2_ICR0 && (irq > 128)) + if (MCFINTC2_ICR0 && (irq > 127)) __raw_writeb(5, MCFINTC2_ICR0 + irq - 128); - else if (MCFINTC1_ICR0 && (irq > 64)) + else if (MCFINTC1_ICR0 && (irq > 63)) __raw_writeb(5, MCFINTC1_ICR0 + irq - 64); else __raw_writeb(5, MCFINTC0_ICR0 + irq); diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 3a84f24d41c8..6d9ed2198170 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -60,7 +60,6 @@ CONFIG_DM9000=y # CONFIG_VT is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_DEVMEM is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_CONSOLE=y diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 786656090c50..59b727b69357 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -580,12 +580,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -598,7 +594,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 9bb12be4a38e..8d4ddcebe7b8 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -536,12 +536,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -554,7 +550,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 413232626d9d..9cc9f1a06516 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -558,12 +558,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -576,7 +572,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 819cc70b06d8..c3f3f462e6ce 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 8f8d5968713b..8c908fc5c191 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -538,12 +538,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -556,7 +552,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index bf15e6c1c939..4e68b72d9c50 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -561,12 +561,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -579,7 +575,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 5466d48fcd9d..d31896293c39 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -647,12 +647,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -665,7 +661,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 93c305918838..c7442f9dd469 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -528,12 +528,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -546,7 +542,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index cacd6c617f69..233b82ea103a 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 3ae421cb24a4..664025a0f6a4 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -547,12 +547,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -565,7 +561,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 6da97e28c48e..73293a0b3dc8 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -531,12 +531,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -549,7 +545,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index f54481bb789a..bca8a6f3e92f 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -530,12 +530,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -548,7 +544,6 @@ CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m diff --git a/arch/m68k/fpsp040/Makefile b/arch/m68k/fpsp040/Makefile index aab04d372ae7..834ae9471b88 100644 --- a/arch/m68k/fpsp040/Makefile +++ b/arch/m68k/fpsp040/Makefile @@ -10,7 +10,3 @@ obj-y := bindec.o binstr.o decbin.o do_func.o gen_except.o get_op.o \ ssin.o ssinh.o stan.o stanh.o sto_res.o stwotox.o tbldo.o util.o \ x_bsun.o x_fline.o x_operr.o x_ovfl.o x_snan.o x_store.o \ x_unfl.o x_unimp.o x_unsupp.o bugfix.o skeleton.o - -EXTRA_LDFLAGS := -x - -$(OS_OBJS): fpsp.h diff --git a/arch/m68k/ifpsp060/Makefile b/arch/m68k/ifpsp060/Makefile index 43b435049452..56b530a96c2f 100644 --- a/arch/m68k/ifpsp060/Makefile +++ b/arch/m68k/ifpsp060/Makefile @@ -5,5 +5,3 @@ # for more details. obj-y := fskeleton.o iskeleton.o os.o - -EXTRA_LDFLAGS := -x diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 10133a968c8e..7b414099e5fc 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -440,8 +440,6 @@ static inline unsigned long ffz(unsigned long word) #endif -#include <asm-generic/bitops/find.h> - #ifdef __KERNEL__ #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) @@ -525,10 +523,12 @@ static inline int __fls(int x) #define __clear_bit_unlock clear_bit_unlock #include <asm-generic/bitops/ext2-atomic.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/le.h> #endif /* __KERNEL__ */ +#include <asm-generic/bitops/find.h> + #endif /* _M68K_BITOPS_H */ diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 819f611dccf2..d41fa488453b 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -397,11 +397,6 @@ static inline void isa_delay(void) */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h index 257b29184af9..e28eb1c0e0bf 100644 --- a/arch/m68k/include/asm/mvme147hw.h +++ b/arch/m68k/include/asm/mvme147hw.h @@ -66,6 +66,9 @@ struct pcc_regs { #define PCC_INT_ENAB 0x08 #define PCC_TIMER_INT_CLR 0x80 + +#define PCC_TIMER_TIC_EN 0x01 +#define PCC_TIMER_COC_EN 0x02 #define PCC_TIMER_CLR_OVF 0x04 #define PCC_LEVEL_ABORT 0x07 diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 9e8f0cc30a2c..2411ea9ef578 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void) ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ }) #else -#define ARCH_PFN_OFFSET (m68k_memory[0].addr) +#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT) #include <asm-generic/memory_model.h> #endif diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h index 93f2a8431c0e..bce8aabb5380 100644 --- a/arch/m68k/include/asm/sun3xflop.h +++ b/arch/m68k/include/asm/sun3xflop.h @@ -106,7 +106,7 @@ static void sun3x_82072_fd_outb(unsigned char value, int port) case 4: /* FD_STATUS */ *(sun3x_fdc.status_r) = value; break; - }; + } return; } diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 349570f16a78..a4b7ee1df211 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -622,6 +622,9 @@ static inline void siginfo_build_tests(void) /* _sigfault._addr_pkey */ BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12); + /* _sigfault._perf */ + BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10); + /* _sigpoll */ BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x10); diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 1c235d8f53f3..f55bdcb8e4f1 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; + + mmap_read_lock(current->mm); } else { struct vm_area_struct *vma; diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile index 285aaba832d9..6713c65a25e1 100644 --- a/arch/m68k/kernel/syscalls/Makefile +++ b/arch/m68k/kernel/syscalls/Makefile @@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 72bde6707dd3..0dd019dc2136 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -442,3 +442,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/m68k/kernel/syscalls/syscallhdr.sh b/arch/m68k/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 6f357d68ef44..000000000000 --- a/arch/m68k/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_M68K_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad..000000000000 --- a/arch/m68k/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index d329cc7b481c..e25ef4a9df30 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -18,9 +18,8 @@ #define sys_mmap2 sys_mmap_pgoff #endif -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry .section .rodata ALIGN ENTRY(sys_call_table) #include <asm/syscall_table.h> -#undef __SYSCALL diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 14c1e541451c..1759ab875d47 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -153,5 +153,4 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); init_pointer_tables(); - mem_init_print_info(NULL); } diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index cfdc7f912e14..e1e90c49a496 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id) unsigned long flags; local_irq_save(flags); - m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; - m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF; + m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN | + PCC_TIMER_TIC_EN; + m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR | + PCC_LEVEL_TIMER1; clk_total += PCC_TIMER_CYCLES; legacy_timer_tick(1); local_irq_restore(flags); @@ -133,10 +135,10 @@ void mvme147_sched_init (void) /* Init the clock with a value */ /* The clock counter increments until 0xFFFF then reloads */ m147_pcc->t1_preload = PCC_TIMER_PRELOAD; - m147_pcc->t1_cntrl = 0x0; /* clear timer */ - m147_pcc->t1_cntrl = 0x3; /* start timer */ - m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */ - m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1; + m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN | + PCC_TIMER_TIC_EN; + m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR | + PCC_LEVEL_TIMER1; clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ); } diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 30357fe4ba6c..b59593c7cfb9 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -366,6 +366,7 @@ static u32 clk_total; #define PCCTOVR1_COC_EN 0x02 #define PCCTOVR1_OVR_CLR 0x04 +#define PCCTIC1_INT_LEVEL 6 #define PCCTIC1_INT_CLR 0x08 #define PCCTIC1_INT_EN 0x10 @@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) unsigned long flags; local_irq_save(flags); - out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR); - out_8(PCCTOVR1, PCCTOVR1_OVR_CLR); + out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); + out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL); clk_total += PCC_TIMER_CYCLES; legacy_timer_tick(1); local_irq_restore(flags); @@ -389,14 +390,15 @@ void mvme16x_sched_init(void) int irq; /* Using PCCchip2 or MC2 chip tick timer 1 */ - out_be32(PCCTCNT1, 0); - out_be32(PCCTCMP1, PCC_TIMER_CYCLES); - out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); - out_8(PCCTIC1, PCCTIC1_INT_EN | 6); if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer", NULL)) panic ("Couldn't register timer int"); + out_be32(PCCTCNT1, 0); + out_be32(PCCTCMP1, PCC_TIMER_CYCLES); + out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); + out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL); + clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ); if (brdno == 0x0162 || brdno == 0x172) diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts index 5b236527176e..b7ee1056779e 100644 --- a/arch/microblaze/boot/dts/system.dts +++ b/arch/microblaze/boot/dts/system.dts @@ -310,14 +310,6 @@ xlnx,odd-parity = <0x0>; xlnx,use-parity = <0x0>; } ; - SysACE_CompactFlash: sysace@83600000 { - compatible = "xlnx,xps-sysace-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 4 2 >; - reg = < 0x83600000 0x10000 >; - xlnx,family = "virtex5"; - xlnx,mem-width = <0x10>; - } ; debug_module: debug@84400000 { compatible = "xlnx,mdm-1.00.d"; reg = < 0x84400000 0x10000 >; diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h index 5db7f4489f05..6a92bed37794 100644 --- a/arch/microblaze/include/asm/ftrace.h +++ b/arch/microblaze/include/asm/ftrace.h @@ -13,7 +13,7 @@ extern void ftrace_call_graph(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index ce006646f741..3bc60a2b159e 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry ENTRY(sys_call_table) #include <asm/syscall_table.h> -#undef __SYSCALL diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile index 285aaba832d9..6713c65a25e1 100644 --- a/arch/microblaze/kernel/syscalls/Makefile +++ b/arch/microblaze/kernel/syscalls/Makefile @@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index d603a5ec9338..2ac716984ca2 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -448,3 +448,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index a914854f8d9f..000000000000 --- a/arch/microblaze/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_MICROBLAZE_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad..000000000000 --- a/arch/microblaze/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index f536e81b8168..63041fdf916d 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -68,9 +68,11 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) case 1: *dst++ = *src++; --c; + fallthrough; case 2: *dst++ = *src++; --c; + fallthrough; case 3: *dst++ = *src++; --c; @@ -176,8 +178,10 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) switch (c) { case 3: *dst++ = *src++; + fallthrough; case 2: *dst++ = *src++; + fallthrough; case 1: *dst++ = *src++; } diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c index 3611ce70415b..9862f6b1e59d 100644 --- a/arch/microblaze/lib/memmove.c +++ b/arch/microblaze/lib/memmove.c @@ -90,9 +90,11 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) case 3: *--dst = *--src; --c; + fallthrough; case 2: *--dst = *--src; --c; + fallthrough; case 1: *--dst = *--src; --c; @@ -201,10 +203,13 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) switch (c) { case 4: *--dst = *--src; + fallthrough; case 3: *--dst = *--src; + fallthrough; case 2: *--dst = *--src; + fallthrough; case 1: *--dst = *--src; } diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index 04ea72c8a81d..eb6c8988af02 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c @@ -69,9 +69,11 @@ void *memset(void *v_src, int c, __kernel_size_t n) case 1: *src++ = c; --n; + fallthrough; case 2: *src++ = c; --n; + fallthrough; case 3: *src++ = c; --n; diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S index 0e8cc2710c27..eca290090038 100644 --- a/arch/microblaze/lib/uaccess_old.S +++ b/arch/microblaze/lib/uaccess_old.S @@ -188,7 +188,7 @@ w2: sw r4, r5, r3 .text .align 4 /* Alignment is important to keep icache happy */ -page: /* Create room on stack and save registers for storign values */ +page: /* Create room on stack and save registers for storing values */ addik r1, r1, -40 swi r5, r1, 0 swi r6, r1, 4 diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 181e48782e6c..ab55c70380a5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -52,7 +52,7 @@ static void __init highmem_init(void) pkmap_page_table = virt_to_kpte(PKMAP_BASE); } -static void highmem_setup(void) +static void __meminit highmem_setup(void) { unsigned long pfn; @@ -131,7 +131,6 @@ void __init mem_init(void) highmem_setup(); #endif - mem_init_print_info(NULL); mem_init_done = 1; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d89efba3d8a4..ed51970c08e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,18 +4,22 @@ config MIPS default y select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT + select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_KEEP_MEMBLOCK if DEBUG_KERNEL + select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_SUPPORTS_HUGETLBFS if CPU_SUPPORTS_HUGEPAGES select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -26,6 +30,7 @@ config MIPS select GENERIC_ATOMIC64 if !64BIT select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_FIND_FIRST_BIT select GENERIC_GETTIMEOFDAY select GENERIC_IOMAP select GENERIC_IRQ_PROBE @@ -91,7 +96,6 @@ config MIPS select PERF_USE_VMALLOC select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select RTC_LIB - select SET_FS select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS select ARCH_HAS_ELFCORE_COMPAT @@ -712,6 +716,7 @@ config SGI_IP27 select ARC_CMDLINE_ONLY select BOOT_ELF64 select DEFAULT_SGI_PARTITION + select FORCE_PCI select SYS_HAS_EARLY_PRINTK select HAVE_PCI select IRQ_MIPS_CPU @@ -774,6 +779,7 @@ config SGI_IP30 select BOOT_ELF64 select CEVT_R4K select CSRC_R4K + select FORCE_PCI select SYNC_R4K if SMP select ZONE_DMA32 select HAVE_PCI @@ -998,6 +1004,7 @@ config CAVIUM_OCTEON_SOC select NR_CPUS_DEFAULT_64 select MIPS_NR_CPU_NR_MAP_1024 select BUILTIN_DTB + select MTD select MTD_COMPLEX_MAPPINGS select SWIOTLB select SYS_SUPPORTS_RELOCATABLE @@ -1281,11 +1288,6 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool -config SYS_SUPPORTS_HUGETLBFS - bool - depends on CPU_SUPPORTS_HUGEPAGES - default y - config MIPS_HUGE_TLB_SUPPORT def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE @@ -2118,7 +2120,7 @@ config CPU_MIPS32 config CPU_MIPS64 bool default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \ - CPU_MIPS64_R6 + CPU_MIPS64_R6 || CPU_LOONGSON64 || CPU_CAVIUM_OCTEON # # These indicate the revision of the architecture @@ -2185,7 +2187,8 @@ config CPU_SUPPORTS_HUGEPAGES depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) config MIPS_PGD_C0_CONTEXT bool - default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP + depends on 64BIT + default y if (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP # # Set to y for ptrace access to watch registers. @@ -2219,23 +2222,6 @@ config 64BIT endchoice -config KVM_GUEST - bool "KVM Guest Kernel" - depends on CPU_MIPS32_R2 - depends on !64BIT && BROKEN_ON_SMP - help - Select this option if building a guest kernel for KVM (Trap & Emulate) - mode. - -config KVM_GUEST_TIMER_FREQ - int "Count/Compare Timer Frequency (MHz)" - depends on KVM_GUEST - default 100 - help - Set this to non-zero if building a guest kernel for KVM to skip RTC - emulation when determining guest CPU Frequency. Instead, the guest's - timer frequency is specified directly. - config MIPS_VA_BITS_48 bool "48 bits virtual memory" depends on 64BIT diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 7a8d94cdd493..f5832a49a881 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -77,6 +77,7 @@ config CMDLINE_OVERRIDE config SB1XXX_CORELIS bool "Corelis Debugger" depends on SIBYTE_SB1xxx_SOC + select DEBUG_KERNEL if !COMPILE_TEST select DEBUG_INFO if !COMPILE_TEST help Select compile flags that produce code that can be processed by the diff --git a/arch/mips/Makefile b/arch/mips/Makefile index e71d587af49c..258234c35a09 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -50,7 +50,7 @@ tool-archpref = $(64bit-tool-archpref) UTS_MACHINE := mips64 endif -ifneq ($(SUBARCH),$(ARCH)) +ifdef cross_compiling ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-) endif diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index f0c830337104..c01be8c45271 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -111,7 +111,7 @@ static struct clk_aliastable { /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ static spinlock_t alchemy_clk_fg0_lock; static spinlock_t alchemy_clk_fg1_lock; -static spinlock_t alchemy_clk_csrc_lock; +static DEFINE_SPINLOCK(alchemy_clk_csrc_lock); /* CPU Core clock *****************************************************/ @@ -996,7 +996,6 @@ static int __init alchemy_clk_setup_imux(int ctype) if (!a) return -ENOMEM; - spin_lock_init(&alchemy_clk_csrc_lock); ret = 0; for (i = 0; i < 6; i++) { diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 164115944a7f..5a3e325275d0 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -76,7 +76,7 @@ static struct clk clk_enet_misc = { }; /* - * Ethernet MAC clocks: only revelant on 6358, silently enable misc + * Ethernet MAC clocks: only relevant on 6358, silently enable misc * clocks */ static void enetx_set(struct clk *clk, int enable) diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index 16f353ac3441..5c4a233db55f 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c @@ -43,8 +43,7 @@ static void bcm63xx_gpio_set(struct gpio_chip *chip, u32 *v; unsigned long flags; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = gpio_out_low_reg; @@ -70,8 +69,7 @@ static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio) u32 reg; u32 mask; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = gpio_out_low_reg; @@ -92,8 +90,7 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip, u32 tmp; unsigned long flags; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = GPIO_CTL_LO_REG; diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c index 49061b870680..915ce4b189c1 100644 --- a/arch/mips/bmips/dma.c +++ b/arch/mips/bmips/dma.c @@ -10,7 +10,7 @@ #include <linux/device.h> #include <linux/dma-direction.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index f93f72bcba97..e4b7839293e1 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -18,7 +18,7 @@ include $(srctree)/arch/mips/Kbuild.platforms BOOT_HEAP_SIZE := 0x400000 # Disable Function Tracer -KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS)) +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE), $(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts index ed6023a91763..d702a843c74a 100644 --- a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts +++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3368.dtsi" +#include "bcm3368.dtsi" / { compatible = "netgear,cvg834g", "brcm,bcm3368"; diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi index 69cbef472377..883ca8bed8e7 100644 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi @@ -1,4 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm3368-clock.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -59,7 +62,7 @@ periph_cntl: syscon@fff8c008 { compatible = "syscon"; - reg = <0xfff8c000 0x4>; + reg = <0xfff8c008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts index 8d010b919de2..b511bc7125d5 100644 --- a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts +++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm63268.dtsi" +#include "bcm63268.dtsi" / { compatible = "comtrend,vr-3032u", "brcm,bcm63268"; diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi index e0021ff9f144..c3ce49ec675f 100644 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm63268-clock.h" +#include "dt-bindings/reset/bcm63268-reset.h" +#include "dt-bindings/soc/bcm63268-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,29 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + + #clock-cells = <0>; + + clock-frequency = <400000000>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +69,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm63268-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@10000008 { + pll_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,6 +105,16 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000009c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000009c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + uart0: serial@10000180 { compatible = "brcm,bcm6345-uart"; reg = <0x10000180 0x18>; @@ -95,12 +122,34 @@ interrupt-parent = <&periph_intc>; interrupts = <5>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v4.0", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x100000b0 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <50>; + + clocks = <&periph_clk BCM63268_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + uart1: serial@100001a0 { compatible = "brcm,bcm6345-uart"; reg = <0x100001a0 0x18>; @@ -108,17 +157,44 @@ interrupt-parent = <&periph_intc>; interrupts = <34>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; - leds0: led-controller@10001900 { + lsspi: spi@10000800 { #address-cells = <1>; #size-cells = <0>; - compatible = "brcm,bcm6328-leds"; - reg = <0x10001900 0x24>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <80>; + + clocks = <&periph_clk BCM63268_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM63268_RST_SPI>; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <6>; + + clocks = <&periph_clk BCM63268_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM63268_RST_SPI>; status = "disabled"; }; @@ -129,6 +205,15 @@ #power-domain-cells = <1>; }; + leds0: led-controller@10001900 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-leds"; + reg = <0x10001900 0x24>; + + status = "disabled"; + }; + ehci: usb@10002500 { compatible = "brcm,bcm63268-ehci", "generic-ehci"; reg = <0x10002500 0x100>; @@ -137,6 +222,9 @@ interrupt-parent = <&periph_intc>; interrupts = <10>; + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -149,6 +237,25 @@ interrupt-parent = <&periph_intc>; interrupts = <9>; + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm63268-usbh-phy"; + reg = <0x10002700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM63268_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM63268_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM63268_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi index 9dc558763c46..634618d4377e 100644 --- a/arch/mips/boot/dts/brcm/bcm6328.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6328-clock.h" +#include "dt-bindings/reset/bcm6328-reset.h" +#include "dt-bindings/soc/bcm6328-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,26 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <133333333>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,7 +66,7 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6328-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; @@ -75,37 +90,71 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + + soft_reset: syscon@10000068 { + compatible = "syscon"; + reg = <0x10000068 0x4>; + native-endian; + + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; + }; + uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <28>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; uart1: serial@10000120 { compatible = "brcm,bcm6345-uart"; reg = <0x10000120 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <39>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; - timer: syscon@10000040 { - compatible = "syscon"; - reg = <0x10000040 0x2c>; - native-endian; - }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.2", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000400 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; - reboot: syscon-reboot@10000068 { - compatible = "syscon-reboot"; - regmap = <&timer>; - offset = <0x28>; - mask = <0x1>; + interrupt-parent = <&periph_intc>; + interrupts = <0>; + + status = "disabled"; }; leds0: led-controller@10000800 { @@ -113,6 +162,27 @@ #size-cells = <0>; compatible = "brcm,bcm6328-leds"; reg = <0x10000800 0x24>; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <29>; + + clocks = <&periph_clk BCM6328_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM6328_RST_SPI>; + reset-names = "hsspi"; + status = "disabled"; }; @@ -126,8 +196,13 @@ compatible = "brcm,bcm6328-ehci", "generic-ehci"; reg = <0x10002500 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <42>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -136,8 +211,29 @@ reg = <0x10002600 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <41>; + + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm6328-usbh-phy"; + reg = <0x10002700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM6328_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM6328_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM6328_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts index 53e57cc29291..c646690ee3df 100644 --- a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts +++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6358.dtsi" +#include "bcm6358.dtsi" / { compatible = "sfr,nb4-ser", "brcm,bcm6358"; diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi index 9d93e7f5e6fc..777c4379ed03 100644 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi @@ -1,4 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6358-clock.h" +#include "dt-bindings/reset/bcm6358-reset.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +28,19 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; }; }; aliases { + pflash = &pflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +58,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@fffe0004 { + periph_clk: clock-controller@fffe0004 { compatible = "brcm,bcm6358-clocks"; reg = <0xfffe0004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@fffe0008 { + pll_cntl: syscon@fffe0008 { compatible = "syscon"; - reg = <0xfffe0000 0x4>; + reg = <0xfffe0008 0x4>; native-endian; - }; - reboot: syscon-reboot@fffe0008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_intc: interrupt-controller@fffe000c { @@ -88,6 +94,16 @@ #reset-cells = <1>; }; + wdt: watchdog@fffe005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0xfffe005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + leds0: led-controller@fffe00d0 { #address-cells = <1>; #size-cells = <0>; @@ -104,7 +120,7 @@ interrupt-parent = <&periph_intc>; interrupts = <2>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; @@ -117,18 +133,41 @@ interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + lsspi: spi@fffe0800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0xfffe0800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <1>; + + clocks = <&periph_clk BCM6358_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6358_RST_SPI>; + reset-names = "spi"; + + status = "disabled"; + }; + ehci: usb@fffe1300 { compatible = "brcm,bcm6358-ehci", "generic-ehci"; reg = <0xfffe1300 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <10>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -137,9 +176,35 @@ reg = <0xfffe1400 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <5>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; + + usbh: usb-phy@fffe1500 { + compatible = "brcm,bcm6358-usbh-phy"; + reg = <0xfffe1500 0x38>; + #phy-cells = <1>; + + resets = <&periph_rst BCM6358_RST_USBH>; + reset-names = "usbh"; + + status = "disabled"; + }; + }; + + pflash: nor@1e000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x1e000000 0x2000000>; + bank-width = <2>; + + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts index 3e83bee5b91e..f83d95ca0514 100644 --- a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts +++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6362.dtsi" +#include "bcm6362.dtsi" / { compatible = "sfr,nb6-ser", "brcm,bcm6362"; diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi index eb10341b75ba..d74021925c53 100644 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6362-clock.h" +#include "dt-bindings/reset/bcm6362-reset.h" +#include "dt-bindings/soc/bcm6362-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,29 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + + #clock-cells = <0>; + + clock-frequency = <400000000>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +69,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6362-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@10000008 { + pll_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,6 +105,16 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; @@ -95,7 +122,7 @@ interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; @@ -108,12 +135,72 @@ interrupt-parent = <&periph_intc>; interrupts = <4>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.2", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <12>; + + clocks = <&periph_clk BCM6362_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + + lsspi: spi@10000800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <2>; + + clocks = <&periph_clk BCM6362_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6362_RST_SPI>; + reset-names = "spi"; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <5>; + + clocks = <&periph_clk BCM6362_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM6362_RST_SPI>; + reset-names = "hsspi"; + + status = "disabled"; + }; + periph_pwr: power-controller@10001848 { compatible = "brcm,bcm6362-power-controller"; reg = <0x10001848 0x4>; @@ -137,6 +224,9 @@ interrupt-parent = <&periph_intc>; interrupts = <10>; + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -149,6 +239,26 @@ interrupt-parent = <&periph_intc>; interrupts = <9>; + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm6362-usbh-phy"; + reg = <0x10002700 0x38>; + + #phy-cells = <1>; + + clocks = <&periph_clk BCM6362_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM6362_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM6362_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi index 52c19f40b9cc..fc15e200877d 100644 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi @@ -1,4 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6368-clock.h" +#include "dt-bindings/reset/bcm6368-reset.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +28,20 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; }; }; aliases { + nflash = &nflash; + pflash = &pflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +59,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6368-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@100000008 { + pll_cntl: syscon@100000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,31 +95,88 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + leds0: led-controller@100000d0 { #address-cells = <1>; #size-cells = <0>; compatible = "brcm,bcm6358-leds"; reg = <0x100000d0 0x8>; + status = "disabled"; }; uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <2>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; uart1: serial@10000120 { compatible = "brcm,bcm6345-uart"; reg = <0x10000120 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + + status = "disabled"; + }; + + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.1", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <10>; + + clocks = <&periph_clk BCM6368_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + + lsspi: spi@10000800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <1>; + + clocks = <&periph_clk BCM6368_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6368_RST_SPI>; + reset-names = "spi"; + status = "disabled"; }; @@ -120,8 +184,13 @@ compatible = "brcm,bcm6368-ehci", "generic-ehci"; reg = <0x10001500 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <7>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -130,9 +199,49 @@ reg = <0x10001600 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <5>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; + + usbh: usb-phy@10001700 { + compatible = "brcm,bcm6368-usbh-phy"; + reg = <0x10001700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM6368_CLK_USBH>; + clock-names = "usbh"; + + resets = <&periph_rst BCM6368_RST_USBH>; + reset-names = "usbh"; + + status = "disabled"; + }; + + random: rng@10004180 { + compatible = "brcm,bcm6368-rng"; + reg = <0x10004180 0x14>; + + clocks = <&periph_clk BCM6368_CLK_IPSEC>; + clock-names = "ipsec"; + + resets = <&periph_rst BCM6368_RST_IPSEC>; + reset-names = "ipsec"; + }; + }; + + pflash: nor@18000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x18000000 0x2000000>; + bank-width = <2>; + + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg.dts b/arch/mips/boot/dts/brcm/bcm93384wvg.dts index 601e4d9293ab..7d3f181b8980 100644 --- a/arch/mips/boot/dts/brcm/bcm93384wvg.dts +++ b/arch/mips/boot/dts/brcm/bcm93384wvg.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3384_zephyr.dtsi" +#include "bcm3384_zephyr.dtsi" / { compatible = "brcm,bcm93384wvg", "brcm,bcm3384"; diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts index 938a8e66128c..f845faa0d682 100644 --- a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts +++ b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3384_viper.dtsi" +#include "bcm3384_viper.dtsi" / { compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper"; diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts index 6d772c394e41..f5e955085308 100644 --- a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts +++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6368.dtsi" +#include "bcm6368.dtsi" / { compatible = "brcm,bcm96368mvwg", "brcm,bcm6368"; diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts index 79e9769f7e00..bda5f796251a 100644 --- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7125.dtsi" +#include "bcm7125.dtsi" / { compatible = "brcm,bcm97125cbmb", "brcm,bcm7125"; diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts index 28370ff77eeb..9f73735e815c 100644 --- a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7346.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7346.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346"; diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts index 41c1b510c230..522f2c40d6e6 100644 --- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7358.dtsi" -/include/ "bcm97xxx-nand-cs1-bch4.dtsi" +#include "bcm7358.dtsi" +#include "bcm97xxx-nand-cs1-bch4.dtsi" / { compatible = "brcm,bcm97358svmb", "brcm,bcm7358"; diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts index 9f6c6c9b7ea7..01f215b08dba 100644 --- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7360.dtsi" +#include "bcm7360.dtsi" / { compatible = "brcm,bcm97360svmb", "brcm,bcm7360"; diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts index df8b755c390f..97aeb51b6831 100644 --- a/arch/mips/boot/dts/brcm/bcm97362svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7362.dtsi" -/include/ "bcm97xxx-nand-cs1-bch4.dtsi" +#include "bcm7362.dtsi" +#include "bcm97xxx-nand-cs1-bch4.dtsi" / { compatible = "brcm,bcm97362svmb", "brcm,bcm7362"; diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts index 086faeaa384a..cc70c2dd4d85 100644 --- a/arch/mips/boot/dts/brcm/bcm97420c.dts +++ b/arch/mips/boot/dts/brcm/bcm97420c.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7420.dtsi" +#include "bcm7420.dtsi" / { compatible = "brcm,bcm97420c", "brcm,bcm7420"; diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts index 0ed22217bf3a..9efecfe1e05c 100644 --- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7425.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7425.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97425svmb", "brcm,bcm7425"; diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts index 2c145a883aef..b653c6ff74b5 100644 --- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7435.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7435.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97435svmb", "brcm,bcm7435"; diff --git a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts index 8d58c1971b30..615d2b97770e 100644 --- a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts +++ b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6328.dtsi" +#include "bcm6328.dtsi" / { compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328"; diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts index bc72304a2440..f4c04f2263ea 100644 --- a/arch/mips/boot/dts/ingenic/gcw0.dts +++ b/arch/mips/boot/dts/ingenic/gcw0.dts @@ -345,7 +345,6 @@ spi-max-frequency = <3125000>; spi-3wire; - spi-cs-high; reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile index 8fd0efb37423..72267bfda9b4 100644 --- a/arch/mips/boot/dts/loongson/Makefile +++ b/arch/mips/boot/dts/loongson/Makefile @@ -1,4 +1,5 @@ # SPDX_License_Identifier: GPL_2.0 +dtb-$(CONFIG_MACH_LOONGSON64) += loongson64_2core_2k1000.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi new file mode 100644 index 000000000000..569e814def83 --- /dev/null +++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 + +/dts-v1/; + +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + compatible = "loongson,loongson2k1000"; + + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "loongson,gs264"; + reg = <0x0>; + #clock-cells = <1>; + clocks = <&cpu_clk>; + }; + }; + + memory { + compatible = "memory"; + device_type = "memory"; + reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */ + <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */ + <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */ + }; + + cpu_clk: cpu_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <800000000>; + }; + + cpuintc: interrupt-controller { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + package0: bus@10000000 { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* ioports */ + 0 0x40000000 0 0x40000000 0 0x40000000 + 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>; + + liointc0: interrupt-controller@1fe11400 { + compatible = "loongson,liointc-2.0"; + reg = <0 0x1fe11400 0 0x40>, + <0 0x1fe11040 0 0x8>, + <0 0x1fe11140 0 0x8>; + reg-names = "main", "isr0", "isr1"; + + interrupt-controller; + #interrupt-cells = <2>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + interrupt-names = "int0"; + + loongson,parent_int_map = <0xffffffff>, /* int0 */ + <0x00000000>, /* int1 */ + <0x00000000>, /* int2 */ + <0x00000000>; /* int3 */ + }; + + liointc1: interrupt-controller@1fe11440 { + compatible = "loongson,liointc-2.0"; + reg = <0 0x1fe11440 0 0x40>, + <0 0x1fe11048 0 0x8>, + <0 0x1fe11148 0 0x8>; + reg-names = "main", "isr0", "isr1"; + + interrupt-controller; + #interrupt-cells = <2>; + + interrupt-parent = <&cpuintc>; + interrupts = <3>; + interrupt-names = "int1"; + + loongson,parent_int_map = <0x00000000>, /* int0 */ + <0xffffffff>, /* int1 */ + <0x00000000>, /* int2 */ + <0x00000000>; /* int3 */ + }; + + uart0: serial@1fe00000 { + compatible = "ns16550a"; + reg = <0 0x1fe00000 0 0x8>; + clock-frequency = <125000000>; + interrupt-parent = <&liointc0>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + no-loopback-test; + }; + + pci@1a000000 { + compatible = "loongson,ls2k-pci"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <2>; + + reg = <0 0x1a000000 0 0x02000000>, + <0xfe 0x00000000 0 0x20000000>; + + ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>, + <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>; + + ehci@4,1 { + compatible = "pci0014,7a14.0", + "pci0014,7a14", + "pciclass0c0320", + "pciclass0c03"; + + reg = <0x2100 0x0 0x0 0x0 0x0>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + }; + + ohci@4,2 { + compatible = "pci0014,7a24.0", + "pci0014,7a24", + "pciclass0c0310", + "pciclass0c03"; + + reg = <0x2200 0x0 0x0 0x0 0x0>; + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + }; + + sata@8,0 { + compatible = "pci0014,7a08.0", + "pci0014,7a08", + "pciclass010601", + "pciclass0106"; + + reg = <0x4000 0x0 0x0 0x0 0x0>; + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc0>; + }; + + pci_bridge@9,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x4800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@a,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x5000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@b,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x5800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@c,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x6000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@d,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x6800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@e,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x7000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + }; + }; +}; + diff --git a/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts new file mode 100644 index 000000000000..e31d2ee65cd5 --- /dev/null +++ b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +/dts-v1/; + +#include "loongson64-2k1000.dtsi" + +/ { + compatible = "loongson,loongson64-2core-2k1000"; +}; + diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c index 99e27155b399..6a4694538bb6 100644 --- a/arch/mips/cavium-octeon/oct_ilm.c +++ b/arch/mips/cavium-octeon/oct_ilm.c @@ -62,7 +62,7 @@ static int reset_statistics(void *data, u64 value) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); static void init_debugfs(void) { diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 0a7c9834b81c..600d018cf354 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -150,8 +150,12 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ __memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +FEXPORT(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) /* * Note: dst & src may be unaligned, len may be 0 * Temps diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index eea9b613bb74..d83e7d600b0a 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -105,10 +105,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_EEPROM_LEGACY=y CONFIG_EEPROM_MAX6875=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_IDETAPE=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig new file mode 100644 index 000000000000..e948ca487e2d --- /dev/null +++ b/arch/mips/configs/loongson2k_defconfig @@ -0,0 +1,353 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZMA=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +CONFIG_MACH_LOONGSON64=y +# CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION is not set +CONFIG_HZ_256=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_MQ_IOSCHED_DEADLINE=m +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_KSM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_IP_VS=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_IP_SCTP=m +CONFIG_L2TP=m +CONFIG_BRIDGE=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_MTD=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_RAID_ATTRS=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_PATA_ATIIXP=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NETDEVICES=y +CONFIG_TUN=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IXGB=y +CONFIG_IXGBE=y +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_ATH9K=m +CONFIG_HOSTAP=m +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_SPARSEKMAP=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_XTKBD=m +# CONFIG_MOUSE_PS2 is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_HW_RANDOM=y +CONFIG_RAW_DRIVER=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_PIIX4=y +CONFIG_GPIO_LOONGSON=y +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_W83627HF=m +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_FB_RADEON=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_PLATFORM=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQ_DUMMY=m +# CONFIG_SND_ISA is not set +CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_ANALOG=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_VIA=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +# CONFIG_SND_USB is not set +CONFIG_SND_SOC=y +CONFIG_HID_A4TECH=m +CONFIG_HID_SUNPLUS=m +CONFIG_USB=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=m +CONFIG_USB_STORAGE=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_DMADEVICES=y +# CONFIG_CPU_HWMON is not set +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_PRINTK_TIME=y +CONFIG_FRAME_WARN=1024 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 0e79f81217bc..a18609cf0e5e 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -26,6 +26,7 @@ CONFIG_SCHED_AUTOGROUP=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_BPF_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y CONFIG_MACH_LOONGSON64=y @@ -39,7 +40,7 @@ CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=m -CONFIG_KVM_MIPS_VZ=y +CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y @@ -129,6 +130,7 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_BPF_JIT=y CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -318,6 +320,7 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m CONFIG_USB_STORAGE=m CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_OPTION=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y @@ -405,8 +408,10 @@ CONFIG_CRYPTO_DEFLATE=m CONFIG_PRINTK_TIME=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FTRACE_SYSCALLS=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="ieee754=relaxed" diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 211bd3d6e6cb..9cb2cf2595e0 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -227,7 +227,6 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_UBI=m CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 62b1969b4f55..5924e48fd3ec 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -232,16 +232,12 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_UBI=m CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig deleted file mode 100644 index 9185e0a0aa45..000000000000 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ /dev/null @@ -1,436 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_LOG_BUF_SHIFT=15 -CONFIG_NAMESPACES=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_KVM_GUEST=y -CONFIG_PAGE_SIZE_16KB=y -# CONFIG_MIPS_MT_SMP is not set -CONFIG_HZ_100=y -CONFIG_PCI=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_SCTP=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_PHONET=m -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_CFG80211=m -CONFIG_MAC80211=m -CONFIG_MAC80211_MESH=y -CONFIG_RFKILL=m -CONFIG_DEVTMPFS=y -CONFIG_CONNECTOR=m -CONFIG_MTD=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_OOPS=m -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_GLUEBI=m -CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m -CONFIG_RAID_ATTRS=m -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_FC_ATTRS=m -CONFIG_ISCSI_TCP=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_PATA_IT8213=m -CONFIG_PATA_OLDPIIX=y -CONFIG_PATA_MPIIX=y -CONFIG_ATA_GENERIC=y -CONFIG_PATA_LEGACY=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_IFB=m -CONFIG_MACVLAN=m -CONFIG_TUN=m -CONFIG_VETH=m -CONFIG_VIRTIO_NET=y -CONFIG_PCNET32=y -CONFIG_CHELSIO_T3=m -CONFIG_AX88796=m -CONFIG_NETXEN_NIC=m -CONFIG_TC35815=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_PRISM54=m -CONFIG_LIBERTAS=m -CONFIG_INPUT_MOUSEDEV=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_PIIX4_POWEROFF=y -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_CIRRUS=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_HID=m -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_CMOS=y -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_MMIO=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_QUOTA=y -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=m -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_AFFS_FS=m -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_RUBIN=y -CONFIG_CRAMFS=m -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 636311d67a53..c0d3156ef640 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -230,16 +230,12 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_UBI=m CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index 5e389db35fa7..69f2300107f9 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -44,9 +44,6 @@ CONFIG_MTD_NAND_TXX9NDFMC=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDE_TX4938=y -CONFIG_BLK_DEV_IDE_TX4939=y CONFIG_NETDEVICES=y CONFIG_NE2000=y CONFIG_SMC91X=y diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig index bb0b1b22ebe1..de94bf756a93 100644 --- a/arch/mips/configs/sb1250_swarm_defconfig +++ b/arch/mips/configs/sb1250_swarm_defconfig @@ -17,7 +17,6 @@ CONFIG_64BIT=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_HZ_1000=y -CONFIG_PCI=y CONFIG_MIPS32_O32=y CONFIG_PM=y CONFIG_MODULES=y @@ -34,25 +33,28 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_TCP_MD5SIG=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_RFKILL=m +CONFIG_PCI=y CONFIG_FW_LOADER=m CONFIG_CONNECTOR=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=9220 CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_IDETAPE=y CONFIG_RAID_ATTRS=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +# CONFIG_ATA_BMDMA is not set +CONFIG_PATA_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_MACVLAN=m CONFIG_SB1250_MAC=y @@ -88,18 +90,14 @@ CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=m diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig index 891a5f77305d..4798dc86c9ce 100644 --- a/arch/mips/configs/workpad_defconfig +++ b/arch/mips/configs/workpad_defconfig @@ -26,9 +26,12 @@ CONFIG_IP_MULTICAST=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_BLK_DEV_RAM=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECS=m -CONFIG_IDE_GENERIC=y +# CONFIG_SCSI_PROC_FS is not set +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +# CONFIG_ATA_FORCE is not set +# CONFIG_ATA_BMDMA is not set CONFIG_NETDEVICES=y CONFIG_PCMCIA_3C574=m CONFIG_PCMCIA_3C589=m diff --git a/arch/mips/crypto/.gitignore b/arch/mips/crypto/.gitignore new file mode 100644 index 000000000000..0d47d4f21c6d --- /dev/null +++ b/arch/mips/crypto/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c index fc881b46d911..bc6110fb98e0 100644 --- a/arch/mips/crypto/poly1305-glue.c +++ b/arch/mips/crypto/poly1305-glue.c @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key); asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_mips(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S index a7f51f97b910..c45ad2759421 100644 --- a/arch/mips/generic/board-boston.its.S +++ b/arch/mips/generic/board-boston.its.S @@ -1,22 +1,22 @@ / { images { - fdt@boston { + fdt-boston { description = "img,boston Device Tree"; data = /incbin/("boot/dts/img/boston.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@boston { + conf-boston { description = "Boston Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@boston"; + kernel = "kernel"; + fdt = "fdt-boston"; }; }; }; diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S index fb0e589eeff7..c2b8d479b26c 100644 --- a/arch/mips/generic/board-jaguar2.its.S +++ b/arch/mips/generic/board-jaguar2.its.S @@ -1,23 +1,23 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@jaguar2_pcb110 { + fdt-jaguar2_pcb110 { description = "MSCC Jaguar2 PCB110 Device Tree"; data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; - fdt@jaguar2_pcb111 { + fdt-jaguar2_pcb111 { description = "MSCC Jaguar2 PCB111 Device Tree"; data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -26,14 +26,14 @@ configurations { pcb110 { description = "Jaguar2 Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@jaguar2_pcb110"; + kernel = "kernel"; + fdt = "fdt-jaguar2_pcb110"; ramdisk = "ramdisk"; }; pcb111 { description = "Jaguar2 Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@jaguar2_pcb111"; + kernel = "kernel"; + fdt = "fdt-jaguar2_pcb111"; ramdisk = "ramdisk"; }; }; diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S index 39a543f62f25..bd9837c9af97 100644 --- a/arch/mips/generic/board-luton.its.S +++ b/arch/mips/generic/board-luton.its.S @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@luton_pcb091 { + fdt-luton_pcb091 { description = "MSCC Luton PCB091 Device Tree"; data = /incbin/("boot/dts/mscc/luton_pcb091.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -16,8 +16,8 @@ configurations { pcb091 { description = "Luton Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@luton_pcb091"; + kernel = "kernel"; + fdt = "fdt-luton_pcb091"; }; }; }; diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S index e4cb4f95a8cc..0a2e8f7a8526 100644 --- a/arch/mips/generic/board-ni169445.its.S +++ b/arch/mips/generic/board-ni169445.its.S @@ -1,22 +1,22 @@ / { images { - fdt@ni169445 { + fdt-ni169445 { description = "NI 169445 device tree"; data = /incbin/("boot/dts/ni/169445.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@ni169445 { + conf-ni169445 { description = "NI 169445 Linux Kernel"; - kernel = "kernel@0"; - fdt = "fdt@ni169445"; + kernel = "kernel"; + fdt = "fdt-ni169445"; }; }; }; diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S index 3da23988149a..8c7e3a1b68d3 100644 --- a/arch/mips/generic/board-ocelot.its.S +++ b/arch/mips/generic/board-ocelot.its.S @@ -1,40 +1,40 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@ocelot_pcb123 { + fdt-ocelot_pcb123 { description = "MSCC Ocelot PCB123 Device Tree"; data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; - fdt@ocelot_pcb120 { + fdt-ocelot_pcb120 { description = "MSCC Ocelot PCB120 Device Tree"; data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@ocelot_pcb123 { + conf-ocelot_pcb123 { description = "Ocelot Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@ocelot_pcb123"; + kernel = "kernel"; + fdt = "fdt-ocelot_pcb123"; }; - conf@ocelot_pcb120 { + conf-ocelot_pcb120 { description = "Ocelot Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@ocelot_pcb120"; + kernel = "kernel"; + fdt = "fdt-ocelot_pcb120"; }; }; }; diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S index 4ea4fc9d757f..dde833efe980 100644 --- a/arch/mips/generic/board-serval.its.S +++ b/arch/mips/generic/board-serval.its.S @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@serval_pcb105 { + fdt-serval_pcb105 { description = "MSCC Serval PCB105 Device Tree"; data = /incbin/("boot/dts/mscc/serval_pcb105.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -16,8 +16,8 @@ configurations { pcb105 { description = "Serval Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@serval_pcb105"; + kernel = "kernel"; + fdt = "fdt-serval_pcb105"; ramdisk = "ramdisk"; }; }; diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S index a2e773d3f14f..08c1e900eb4e 100644 --- a/arch/mips/generic/board-xilfpga.its.S +++ b/arch/mips/generic/board-xilfpga.its.S @@ -1,22 +1,22 @@ / { images { - fdt@xilfpga { + fdt-xilfpga { description = "MIPSfpga (xilfpga) Device Tree"; data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@xilfpga { + conf-xilfpga { description = "MIPSfpga Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@xilfpga"; + kernel = "kernel"; + fdt = "fdt-xilfpga"; }; }; }; diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S index 1a08438fd893..3e254676540f 100644 --- a/arch/mips/generic/vmlinux.its.S +++ b/arch/mips/generic/vmlinux.its.S @@ -6,7 +6,7 @@ #address-cells = <ADDR_CELLS>; images { - kernel@0 { + kernel { description = KERNEL_NAME; data = /incbin/(VMLINUX_BINARY); type = "kernel"; @@ -15,18 +15,18 @@ compression = VMLINUX_COMPRESSION; load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>; entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - default = "conf@default"; + default = "conf-default"; - conf@default { + conf-default { description = "Generic Linux kernel"; - kernel = "kernel@0"; + kernel = "kernel"; }; }; }; diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 8f6fe69674b7..dee172716581 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,9 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # MIPS headers -generated-y += syscall_table_32_o32.h -generated-y += syscall_table_64_n32.h -generated-y += syscall_table_64_n64.h -generated-y += syscall_table_64_o32.h +generated-y += syscall_table_n32.h +generated-y += syscall_table_n64.h +generated-y += syscall_table_o32.h generated-y += unistd_nr_n32.h generated-y += unistd_nr_n64.h generated-y += unistd_nr_o32.h diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 86f2323ebe6b..ca83ada7015f 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -44,8 +44,7 @@ .endm #endif -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ - defined(CONFIG_CPU_MIPSR6) +#ifdef CONFIG_CPU_HAS_DIEI .macro local_irq_enable reg=t0 ei irq_enable_hazard diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 5be10ece3ef0..4c2e8173e6ec 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -107,7 +107,7 @@ extern void (*free_init_pages_eva)(void *begin, void *end); extern char arcs_cmdline[COMMAND_LINE_SIZE]; /* - * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware + * Registers a0, a1, a2 and a3 as passed to the kernel entry by firmware */ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h index dc5ea5736440..ceece76fc971 100644 --- a/arch/mips/include/asm/div64.h +++ b/arch/mips/include/asm/div64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2000, 2004 Maciej W. Rozycki + * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public @@ -9,25 +9,18 @@ #ifndef __ASM_DIV64_H #define __ASM_DIV64_H -#include <asm-generic/div64.h> - -#if BITS_PER_LONG == 64 +#include <asm/bitsperlong.h> -#include <linux/types.h> +#if BITS_PER_LONG == 32 /* * No traps on overflows for any of these... */ -#define __div64_32(n, base) \ -({ \ +#define do_div64_32(res, high, low, base) ({ \ unsigned long __cf, __tmp, __tmp2, __i; \ unsigned long __quot32, __mod32; \ - unsigned long __high, __low; \ - unsigned long long __n; \ \ - __high = *__n >> 32; \ - __low = __n; \ __asm__( \ " .set push \n" \ " .set noat \n" \ @@ -51,18 +44,48 @@ " subu %0, %0, %z6 \n" \ " addiu %2, %2, 1 \n" \ "3: \n" \ - " bnez %4, 0b\n\t" \ - " srl %5, %1, 0x1f\n\t" \ + " bnez %4, 0b \n" \ + " srl %5, %1, 0x1f \n" \ " .set pop" \ : "=&r" (__mod32), "=&r" (__tmp), \ "=&r" (__quot32), "=&r" (__cf), \ "=&r" (__i), "=&r" (__tmp2) \ - : "Jr" (base), "0" (__high), "1" (__low)); \ + : "Jr" (base), "0" (high), "1" (low)); \ \ - (__n) = __quot32; \ + (res) = __quot32; \ __mod32; \ }) -#endif /* BITS_PER_LONG == 64 */ +#define __div64_32(n, base) ({ \ + unsigned long __upper, __low, __high, __radix; \ + unsigned long long __quot; \ + unsigned long long __div; \ + unsigned long __mod; \ + \ + __div = (*n); \ + __radix = (base); \ + \ + __high = __div >> 32; \ + __low = __div; \ + \ + if (__high < __radix) { \ + __upper = __high; \ + __high = 0; \ + } else { \ + __upper = __high % __radix; \ + __high /= __radix; \ + } \ + \ + __mod = do_div64_32(__low, __upper, __low, __radix); \ + \ + __quot = __high; \ + __quot = __quot << 32 | __low; \ + (*n) = __quot; \ + __mod; \ +}) + +#endif /* BITS_PER_LONG == 32 */ + +#include <asm-generic/div64.h> #endif /* __ASM_DIV64_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 78537aa23500..6f5c86d2bab4 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -100,11 +100,23 @@ static inline void set_io_port_base(unsigned long base) * almost all conceivable cases a device driver should not be using * this function */ -static inline unsigned long virt_to_phys(volatile const void *address) +static inline unsigned long __virt_to_phys_nodebug(volatile const void *address) { return __pa(address); } +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(volatile const void *x); +#else +#define __virt_to_phys(x) __virt_to_phys_nodebug(x) +#endif + +#define virt_to_phys virt_to_phys +static inline phys_addr_t virt_to_phys(const volatile void *x) +{ + return __virt_to_phys(x); +} + /* * phys_to_virt - map physical address to virtual * @address: address to remap @@ -552,11 +564,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - void __ioread64_copy(void *to, const void __iomem *from, size_t count); #endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 3a5612e7304c..fca4547d580f 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -88,44 +88,10 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 -#ifdef CONFIG_KVM_MIPS_VZ extern unsigned long GUESTID_MASK; extern unsigned long GUESTID_FIRST_VERSION; extern unsigned long GUESTID_VERSION_MASK; -#endif - - -/* - * Special address that contains the comm page, used for reducing # of traps - * This needs to be within 32Kb of 0x0 (so the zero register can be used), but - * preferably not at 0x0 so that most kernel NULL pointer dereferences can be - * caught. - */ -#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ - (0x8000 - PAGE_SIZE)) - -#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ - ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) - -#define KVM_GUEST_KUSEG 0x00000000UL -#define KVM_GUEST_KSEG0 0x40000000UL -#define KVM_GUEST_KSEG1 0x40000000UL -#define KVM_GUEST_KSEG23 0x60000000UL -#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) -#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) - -#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) -#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) -#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) - -/* - * Map an address to a certain kernel segment - */ -#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) -#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) -#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) -#define KVM_INVALID_PAGE 0xdeadbeef #define KVM_INVALID_ADDR 0xdeadbeef /* @@ -165,7 +131,6 @@ struct kvm_vcpu_stat { u64 fpe_exits; u64 msa_disabled_exits; u64 flush_dcache_exits; -#ifdef CONFIG_KVM_MIPS_VZ u64 vz_gpsi_exits; u64 vz_gsfc_exits; u64 vz_hc_exits; @@ -177,7 +142,6 @@ struct kvm_vcpu_stat { #ifdef CONFIG_CPU_LOONGSON64 u64 vz_cpucfg_exits; #endif -#endif u64 halt_successful_poll; u64 halt_attempted_poll; u64 halt_poll_success_ns; @@ -303,14 +267,6 @@ enum emulation_result { EMULATE_HYPERCALL, /* HYPCALL instruction */ }; -#define mips3_paddr_to_tlbpfn(x) \ - (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) -#define mips3_tlbpfn_to_paddr(x) \ - ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) - -#define MIPS3_PG_SHIFT 6 -#define MIPS3_PG_FRAME 0x3fffffc0 - #if defined(CONFIG_64BIT) #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) #else @@ -337,7 +293,6 @@ struct kvm_mips_tlb { #define KVM_MIPS_AUX_FPU 0x1 #define KVM_MIPS_AUX_MSA 0x2 -#define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *guest_ebase; int (*vcpu_run)(struct kvm_vcpu *vcpu); @@ -370,9 +325,6 @@ struct kvm_vcpu_arch { /* COP0 State */ struct mips_coproc *cop0; - /* Host KSEG0 address of the EI/DI offset */ - void *kseg0_commpage; - /* Resume PC after MMIO completion */ unsigned long io_pc; /* GPR used as IO source/target */ @@ -398,19 +350,9 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; - /* S/W Based TLB for guest */ - struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; - - /* Guest kernel/user [partial] mm */ - struct mm_struct guest_kernel_mm, guest_user_mm; - - /* Guest ASID of last user mode execution */ - unsigned int last_user_gasid; - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; -#ifdef CONFIG_KVM_MIPS_VZ /* vcpu's vzguestid is different on each host cpu in an smp system */ u32 vzguestid[NR_CPUS]; @@ -421,7 +363,6 @@ struct kvm_vcpu_arch { /* emulated guest MAAR registers */ unsigned long maar[6]; -#endif /* Last CPU the VCPU state was loaded on */ int last_sched_cpu; @@ -651,20 +592,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \ __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) -#ifndef CONFIG_KVM_MIPS_VZ - -/* - * T&E (trap & emulate software based virtualisation) - * We generate the common accessors operating exclusively on the saved context - * in RAM. - */ - -#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW -#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW -#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW - -#else - /* * VZ (hardware assisted virtualisation) * These macros use the active guest state in VZ mode (hardware registers), @@ -697,8 +624,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \ */ #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW -#endif - /* * Define accessors for CP0 registers that are accessible to the guest. These * are primarily used by common emulation code, which may need to access the @@ -815,14 +740,7 @@ struct kvm_mips_callbacks { int (*vcpu_init)(struct kvm_vcpu *vcpu); void (*vcpu_uninit)(struct kvm_vcpu *vcpu); int (*vcpu_setup)(struct kvm_vcpu *vcpu); - void (*flush_shadow_all)(struct kvm *kvm); - /* - * Must take care of flushing any cached GPA PTEs (e.g. guest entries in - * VZ root TLB, or T&E GVA page tables and corresponding root TLB - * mappings). - */ - void (*flush_shadow_memslot)(struct kvm *kvm, - const struct kvm_memory_slot *slot); + void (*prepare_flush_shadow)(struct kvm *kvm); gpa_t (*gva_to_gpa)(gva_t gva); void (*queue_timer_int)(struct kvm_vcpu *vcpu); void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); @@ -874,42 +792,9 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu); void kvm_lose_fpu(struct kvm_vcpu *vcpu); /* TLB handling */ -u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); - -u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); - -u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu, bool write_fault); -#endif -extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, - struct kvm_vcpu *vcpu, - bool write_fault); - -extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu); -extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long gva, - bool write_fault); - -extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu, - bool write_fault); - -extern void kvm_mips_dump_host_tlbs(void); -extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); -extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, - bool user, bool kernel); - -extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, - unsigned long entryhi); - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, unsigned long *gpa); @@ -923,58 +808,17 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, void kvm_loongson_clear_guest_vtlb(void); void kvm_loongson_clear_guest_ftlb(void); #endif -#endif - -void kvm_mips_suspend_mm(int cpu); -void kvm_mips_resume_mm(int cpu); /* MMU handling */ -/** - * enum kvm_mips_flush - Types of MMU flushes. - * @KMF_USER: Flush guest user virtual memory mappings. - * Guest USeg only. - * @KMF_KERN: Flush guest kernel virtual memory mappings. - * Guest USeg and KSeg2/3. - * @KMF_GPA: Flush guest physical memory mappings. - * Also includes KSeg0 if KMF_KERN is set. - */ -enum kvm_mips_flush { - KMF_USER = 0x0, - KMF_KERN = 0x1, - KMF_GPA = 0x2, -}; -void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); pgd_t *kvm_pgd_alloc(void); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); -void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, - bool user); -void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); -void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); - -enum kvm_mips_fault_result { - KVM_MIPS_MAPPED = 0, - KVM_MIPS_GVA, - KVM_MIPS_GPA, - KVM_MIPS_TLB, - KVM_MIPS_TLBINV, - KVM_MIPS_TLBMOD, -}; -enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, - unsigned long gva, - bool write); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); /* Emulation */ -int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); @@ -1006,68 +850,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) return false; } -extern enum emulation_result kvm_mips_emulate_inst(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_handle_ri(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu); u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); @@ -1087,26 +869,9 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count); int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, u32 count, int min_drift); -#ifdef CONFIG_KVM_MIPS_VZ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu); void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu); -#else -static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {} -static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {} -#endif -enum emulation_result kvm_mips_check_privilege(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, - u32 *opc, - u32 cause, - struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, - u32 *opc, - u32 cause, - struct kvm_vcpu *vcpu); enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, u32 cause, struct kvm_vcpu *vcpu); @@ -1117,27 +882,12 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, /* COP0 */ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); - /* Hypercalls (hypcall.c) */ enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, union mips_instruction inst); int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu); -/* Dynamic binary translation */ -extern int kvm_mips_trans_cache_index(union mips_instruction inst, - u32 *opc, struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); - /* Misc */ extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); @@ -1154,4 +904,7 @@ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB +int kvm_arch_flush_remote_tlb(struct kvm *kvm); + #endif /* __MIPS_KVM_HOST_H__ */ diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index c38b38ce5a3d..b071a7353ee1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -157,4 +157,12 @@ octeon_main_processor: .macro smp_slave_setup .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + .set push + .set noreorder + synci 0($0) + .set pop + .endm + #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index c3ac06a6acd2..b247575c5e69 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -30,11 +30,7 @@ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define CAC_BASE _AC(0x40000000, UL) -#else #define CAC_BASE _AC(0x80000000, UL) -#endif #ifndef IO_BASE #define IO_BASE _AC(0xa0000000, UL) #endif @@ -43,12 +39,8 @@ #endif #ifndef MAP_BASE -#ifdef CONFIG_KVM_GUEST -#define MAP_BASE _AC(0x60000000, UL) -#else #define MAP_BASE _AC(0xc0000000, UL) #endif -#endif /* * Memory above this physical address will be considered highmem. @@ -100,11 +92,7 @@ #endif #ifndef FIXADDR_TOP -#ifdef CONFIG_KVM_GUEST -#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) -#else #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif -#endif #endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h index 4592841b6b0c..035b1a69e2d0 100644 --- a/arch/mips/include/asm/mach-loongson64/boot_param.h +++ b/arch/mips/include/asm/mach-loongson64/boot_param.h @@ -198,33 +198,6 @@ enum loongson_bridge_type { VIRTUAL = 3 }; -struct loongson_system_configuration { - u32 nr_cpus; - u32 nr_nodes; - int cores_per_node; - int cores_per_package; - u16 boot_cpu_id; - u16 reserved_cpus_mask; - enum loongson_cpu_type cputype; - enum loongson_bridge_type bridgetype; - u64 ht_control_base; - u64 pci_mem_start_addr; - u64 pci_mem_end_addr; - u64 pci_io_base; - u64 restart_addr; - u64 poweroff_addr; - u64 suspend_addr; - u64 vgabios_addr; - u32 dma_mask_bits; - char ecname[32]; - u32 nr_uarts; - struct uart_device uarts[MAX_UARTS]; - u32 nr_sensors; - struct sensor_device sensors[MAX_SENSORS]; - u64 workarounds; - void (*early_config)(void); -}; - extern struct efi_memory_map_loongson *loongson_memmap; extern struct loongson_system_configuration loongson_sysconf; diff --git a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h index 839410cda621..8be710557bdb 100644 --- a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h +++ b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h @@ -8,6 +8,7 @@ #ifndef __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ #define __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ +extern u32 __dtb_loongson64_2core_2k1000_begin[]; extern u32 __dtb_loongson64c_4core_ls7a_begin[]; extern u32 __dtb_loongson64c_4core_rs780e_begin[]; extern u32 __dtb_loongson64c_8core_rs780e_begin[]; diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index e4d77f4f0fe3..13373c5144f8 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -75,4 +75,31 @@ .set pop .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + /* s0:prid s1:initfn */ + /* a0:base t1:cpuid t2:node t9:count */ + mfc0 t1, CP0_EBASE + andi t1, MIPS_EBASE_CPUNUM + dins a0, t1, 8, 2 /* insert core id*/ + dext t2, t1, 2, 2 + dins a0, t2, 44, 2 /* insert node id */ + mfc0 s0, CP0_PRID + andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f + b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */ +1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */ +2: li t9, 0x100 /* wait for init loop */ +3: addiu t9, -1 /* limit mailbox access */ + bnez t9, 3b + lw s1, 0x20(a0) /* check PC as an indicator */ + beqz s1, 2b + ld s1, 0x20(a0) /* get PC via mailbox reg0 */ + ld sp, 0x28(a0) /* get SP via mailbox reg1 */ + ld gp, 0x30(a0) /* get GP via mailbox reg2 */ + ld a1, 0x38(a0) + jr s1 /* jump to initial PC */ + .endm + #endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index ac1c20e172a2..f7c3ab6d724e 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -12,6 +12,30 @@ #include <linux/irq.h> #include <boot_param.h> +enum loongson_fw_interface { + LOONGSON_LEFI, + LOONGSON_DTB, +}; + +/* machine-specific boot configuration */ +struct loongson_system_configuration { + enum loongson_fw_interface fw_interface; + u32 nr_cpus; + u32 nr_nodes; + int cores_per_node; + int cores_per_package; + u16 boot_cpu_id; + u16 reserved_cpus_mask; + enum loongson_cpu_type cputype; + enum loongson_bridge_type bridgetype; + u64 restart_addr; + u64 poweroff_addr; + u64 suspend_addr; + u64 vgabios_addr; + u32 dma_mask_bits; + u64 workarounds; + void (*early_config)(void); +}; /* machine-specific reboot/halt operation */ extern void mach_prepare_reboot(void); @@ -23,7 +47,8 @@ extern u32 memsize, highmemsize; extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ -extern void __init prom_init_env(void); +extern void __init prom_dtb_init_env(void); +extern void __init prom_lefi_init_env(void); extern void __init szmem(unsigned int node); extern void *loongson_fdt_blob; diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index e1af1ba50bd8..6bbf082dd149 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -24,9 +24,10 @@ #define CHIP_REV_VER_SHIFT 8 #define CHIP_REV_ECO_MASK 0xf -#define MT7621_DRAM_BASE 0x0 -#define MT7621_DDR2_SIZE_MIN 32 -#define MT7621_DDR2_SIZE_MAX 256 +#define MT7621_LOWMEM_BASE 0x0 +#define MT7621_LOWMEM_MAX_SIZE 0x1C000000 +#define MT7621_HIGHMEM_BASE 0x20000000 +#define MT7621_HIGHMEM_SIZE 0x4000000 #define MT7621_CHIP_NAME0 0x3637544D #define MT7621_CHIP_NAME1 0x20203132 diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h index fd43d876892e..35fb8ee6dd33 100644 --- a/arch/mips/include/asm/mips-cps.h +++ b/arch/mips/include/asm/mips-cps.h @@ -10,6 +10,8 @@ #include <linux/io.h> #include <linux/types.h> +#include <asm/mips-boards/launch.h> + extern unsigned long __cps_access_bad_size(void) __compiletime_error("Bad size for CPS accessor"); @@ -165,11 +167,30 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster) */ static inline unsigned int mips_cps_numcores(unsigned int cluster) { + unsigned int ncores; + if (!mips_cm_present()) return 0; /* Add one before masking to handle 0xff indicating no cores */ - return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; + ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; + + if (IS_ENABLED(CONFIG_SOC_MT7621)) { + struct cpulaunch *launch; + + /* + * Ralink MT7621S SoC is single core, but the GCR_CONFIG method + * always reports 2 cores. Check the second core's LAUNCH_FREADY + * flag to detect if the second core is missing. This method + * only works before the core has been started. + */ + launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); + launch += 2; /* MT7621 has 2 VPEs per core */ + if (!(launch->flags & LAUNCH_FREADY)) + ncores = 1; + } + + return ncores; } /** diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h index e4444f8c4a61..5df5c90f6a5d 100644 --- a/arch/mips/include/asm/octeon/cvmx-address.h +++ b/arch/mips/include/asm/octeon/cvmx-address.h @@ -152,7 +152,7 @@ typedef union { /* physical mem address */ struct { - /* techically, <47:40> are dont-cares */ + /* technically, <47:40> are dont-cares */ uint64_t zeroes:24; /* the hardware ignores <39:36> in Octeon I */ uint64_t unaddr:4; diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index c114a7ba0bad..0e6bf220db61 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -298,6 +298,7 @@ enum cvmx_board_types_enum { CVMX_BOARD_TYPE_UBNT_E200 = 20003, CVMX_BOARD_TYPE_UBNT_E220 = 20005, CVMX_BOARD_TYPE_CUST_DSR1000N = 20006, + CVMX_BOARD_TYPE_UBNT_E300 = 20300, CVMX_BOARD_TYPE_KONTRON_S1901 = 21901, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, @@ -401,6 +402,7 @@ static inline const char *cvmx_board_type_to_string(enum ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E300) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) } diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 65acab9c41f9..195ff4e9771f 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -210,9 +210,16 @@ static inline unsigned long ___pa(unsigned long x) * also affect MIPS so we keep this one until GCC 3.x has been retired * before we can apply https://patchwork.linux-mips.org/patch/1541/ */ +#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) +#endif #ifndef __pa_symbol -#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) +#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x)) #endif #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 6f48649201c5..9ffc8192adae 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -38,7 +38,6 @@ struct pci_controller { struct resource *io_resource; unsigned long io_offset; unsigned long io_map_base; - struct resource *busn_resource; #ifndef CONFIG_PCI_DOMAINS_GENERIC unsigned int index; diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 7834e7c0c78a..0c3550c82b72 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -32,16 +32,11 @@ extern unsigned int vced_count, vcei_count; extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -/* User space process size is limited to 1GB in KVM Guest Mode */ -#define TASK_SIZE 0x3fff8000UL -#else /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x80000000UL -#endif #define STACK_TOP_MAX TASK_SIZE @@ -226,10 +221,6 @@ struct nlm_cop2_state { #define COP2_INIT #endif -typedef struct { - unsigned long seg; -} mm_segment_t; - #ifdef CONFIG_CPU_HAS_MSA # define ARCH_MIN_TASKALIGN 16 # define FPU_ALIGN __aligned(16) diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index e2c352da3877..0b17aaa9e012 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -28,11 +28,6 @@ struct thread_info { unsigned long tp_value; /* thread pointer */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* - * thread address space limit: - * 0x7fffffff for user-thead - * 0xffffffff for kernel-thread - */ struct pt_regs *regs; long syscall; /* syscall number */ }; @@ -46,7 +41,6 @@ struct thread_info { .flags = _TIF_FIXADE, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 61fc01f177a6..783fecce65c8 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -16,20 +16,9 @@ #include <asm/asm-eva.h> #include <asm/extable.h> -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define __UA_LIMIT 0x40000000UL -#else #define __UA_LIMIT 0x80000000UL -#endif #define __UA_ADDR ".word" #define __UA_LA "la" @@ -54,43 +43,6 @@ extern u64 __ua_limit; #endif /* CONFIG_64BIT */ /* - * USER_DS is a bitmask that has the bits set that may not be set in a valid - * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but - * the arithmetic we're doing only works if the limit is a power of two, so - * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid - * address in this range it's the process's problem, not ours :-) - */ - -#ifdef CONFIG_KVM_GUEST -#define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) -#define USER_DS ((mm_segment_t) { 0xC0000000UL }) -#else -#define KERNEL_DS ((mm_segment_t) { 0UL }) -#define USER_DS ((mm_segment_t) { __UA_LIMIT }) -#endif - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * eva_kernel_access() - determine whether kernel memory access on an EVA system - * - * Determines whether memory accesses should be performed to kernel memory - * on a system using Extended Virtual Addressing (EVA). - * - * Return: true if a kernel memory access on an EVA system, else false. - */ -static inline bool eva_kernel_access(void) -{ - if (!IS_ENABLED(CONFIG_EVA)) - return false; - - return uaccess_kernel(); -} - -/* * Is a address valid? This does a straightforward calculation rather * than tests. * @@ -127,7 +79,9 @@ static inline bool eva_kernel_access(void) static inline int __access_ok(const void __user *p, unsigned long size) { unsigned long addr = (unsigned long)p; - return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; + unsigned long end = addr + size - !!size; + + return (__UA_LIMIT & (addr | end | __ua_size(size))) == 0; } #define access_ok(addr, size) \ @@ -150,8 +104,13 @@ static inline int __access_ok(const void __user *p, unsigned long size) * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ +}) /* * get_user: - Get a simple variable from user space. @@ -171,8 +130,14 @@ static inline int __access_ok(const void __user *p, unsigned long size) * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ + ((x) = 0, -EFAULT); \ +}) /* * __put_user: - Write a simple value into user space, with less checking. @@ -194,8 +159,32 @@ static inline int __access_ok(const void __user *p, unsigned long size) * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + \ + __chk_user_ptr(__pu_ptr); \ + switch (sizeof(*__pu_ptr)) { \ + case 1: \ + __put_data_asm(user_sb, __pu_ptr); \ + break; \ + case 2: \ + __put_data_asm(user_sh, __pu_ptr); \ + break; \ + case 4: \ + __put_data_asm(user_sw, __pu_ptr); \ + break; \ + case 8: \ + __PUT_DW(user_sd, __pu_ptr); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + \ + __pu_err; \ +}) /* * __get_user: - Get a simple variable from user space, with less checking. @@ -218,49 +207,35 @@ static inline int __access_ok(const void __user *p, unsigned long size) * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + int __gu_err = 0; \ + \ + __chk_user_ptr(__gu_ptr); \ + switch (sizeof(*__gu_ptr)) { \ + case 1: \ + __get_data_asm((x), user_lb, __gu_ptr); \ + break; \ + case 2: \ + __get_data_asm((x), user_lh, __gu_ptr); \ + break; \ + case 4: \ + __get_data_asm((x), user_lw, __gu_ptr); \ + break; \ + case 8: \ + __GET_DW((x), user_ld, __gu_ptr); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + \ + __gu_err; \ +}) struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) -/* - * Yuck. We need two variants, one for 64bit operation and one - * for 32 bit mode and old iron. - */ -#ifndef CONFIG_EVA -#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_user_asm for EVA. - */ -#undef _loadd -#undef _loadw -#undef _loadh -#undef _loadb -#ifdef CONFIG_32BIT -#define _loadd _loadw -#else -#define _loadd(reg, addr) "ld " reg ", " addr -#endif -#define _loadw(reg, addr) "lw " reg ", " addr -#define _loadh(reg, addr) "lh " reg ", " addr -#define _loadb(reg, addr) "lb " reg ", " addr - -#define __get_kernel_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, _loadb, ptr); break; \ - case 2: __get_data_asm(val, _loadh, ptr); break; \ - case 4: __get_data_asm(val, _loadw, ptr); break; \ - case 8: __GET_DW(val, _loadd, ptr); break; \ - default: __get_user_unknown(); break; \ - } \ -} while (0) -#endif - #ifdef CONFIG_32BIT #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) #endif @@ -268,49 +243,6 @@ do { \ #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr) #endif -extern void __get_user_unknown(void); - -#define __get_user_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, user_lb, ptr); break; \ - case 2: __get_data_asm(val, user_lh, ptr); break; \ - case 4: __get_data_asm(val, user_lw, ptr); break; \ - case 8: __GET_DW(val, user_ld, ptr); break; \ - default: __get_user_unknown(); break; \ - } \ -} while (0) - -#define __get_user_nocheck(x, ptr, size) \ -({ \ - int __gu_err; \ - \ - if (eva_kernel_access()) { \ - __get_kernel_common((x), size, ptr); \ - } else { \ - __chk_user_ptr(ptr); \ - __get_user_common((x), size, ptr); \ - } \ - __gu_err; \ -}) - -#define __get_user_check(x, ptr, size) \ -({ \ - int __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ - \ - might_fault(); \ - if (likely(access_ok( __gu_ptr, size))) { \ - if (eva_kernel_access()) \ - __get_kernel_common((x), size, __gu_ptr); \ - else \ - __get_user_common((x), size, __gu_ptr); \ - } else \ - (x) = 0; \ - \ - __gu_err; \ -}) - #define __get_data_asm(val, insn, addr) \ { \ long __gu_tmp; \ @@ -364,39 +296,36 @@ do { \ (val) = __gu_tmp.t; \ } -#ifndef CONFIG_EVA -#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_data_asm for EVA. - */ -#undef _stored -#undef _storew -#undef _storeh -#undef _storeb -#ifdef CONFIG_32BIT -#define _stored _storew -#else -#define _stored(reg, addr) "ld " reg ", " addr -#endif - -#define _storew(reg, addr) "sw " reg ", " addr -#define _storeh(reg, addr) "sh " reg ", " addr -#define _storeb(reg, addr) "sb " reg ", " addr +#define HAVE_GET_KERNEL_NOFAULT -#define __put_kernel_common(ptr, size) \ +#define __get_kernel_nofault(dst, src, type, err_label) \ do { \ - switch (size) { \ - case 1: __put_data_asm(_storeb, ptr); break; \ - case 2: __put_data_asm(_storeh, ptr); break; \ - case 4: __put_data_asm(_storew, ptr); break; \ - case 8: __PUT_DW(_stored, ptr); break; \ - default: __put_user_unknown(); break; \ + int __gu_err; \ + \ + switch (sizeof(type)) { \ + case 1: \ + __get_data_asm(*(type *)(dst), kernel_lb, \ + (__force type *)(src)); \ + break; \ + case 2: \ + __get_data_asm(*(type *)(dst), kernel_lh, \ + (__force type *)(src)); \ + break; \ + case 4: \ + __get_data_asm(*(type *)(dst), kernel_lw, \ + (__force type *)(src)); \ + break; \ + case 8: \ + __GET_DW(*(type *)(dst), kernel_ld, \ + (__force type *)(src)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ } \ -} while(0) -#endif + if (unlikely(__gu_err)) \ + goto err_label; \ +} while (0) /* * Yuck. We need two variants, one for 64bit operation and one @@ -409,49 +338,6 @@ do { \ #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr) #endif -#define __put_user_common(ptr, size) \ -do { \ - switch (size) { \ - case 1: __put_data_asm(user_sb, ptr); break; \ - case 2: __put_data_asm(user_sh, ptr); break; \ - case 4: __put_data_asm(user_sw, ptr); break; \ - case 8: __PUT_DW(user_sd, ptr); break; \ - default: __put_user_unknown(); break; \ - } \ -} while (0) - -#define __put_user_nocheck(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __pu_val; \ - int __pu_err = 0; \ - \ - __pu_val = (x); \ - if (eva_kernel_access()) { \ - __put_kernel_common(ptr, size); \ - } else { \ - __chk_user_ptr(ptr); \ - __put_user_common(ptr, size); \ - } \ - __pu_err; \ -}) - -#define __put_user_check(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - int __pu_err = -EFAULT; \ - \ - might_fault(); \ - if (likely(access_ok( __pu_addr, size))) { \ - if (eva_kernel_access()) \ - __put_kernel_common(__pu_addr, size); \ - else \ - __put_user_common(__pu_addr, size); \ - } \ - \ - __pu_err; \ -}) - #define __put_data_asm(insn, ptr) \ { \ __asm__ __volatile__( \ @@ -490,7 +376,33 @@ do { \ "i" (-EFAULT)); \ } -extern void __put_user_unknown(void); +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + type __pu_val; \ + int __pu_err = 0; \ + \ + __pu_val = *(__force type *)(src); \ + switch (sizeof(type)) { \ + case 1: \ + __put_data_asm(kernel_sb, (type *)(dst)); \ + break; \ + case 2: \ + __put_data_asm(kernel_sh, (type *)(dst)); \ + break; \ + case 4: \ + __put_data_asm(kernel_sw, (type *)(dst)) \ + break; \ + case 8: \ + __PUT_DW(kernel_sd, (type *)(dst)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ + } \ + if (unlikely(__pu_err)) \ + goto err_label; \ +} while (0) + /* * We're generating jump to subroutines which will be outside the range of @@ -514,124 +426,85 @@ extern void __put_user_unknown(void); #define DADDI_SCRATCH "$0" #endif -extern size_t __copy_user(void *__to, const void *__from, size_t __n); - -#define __invoke_copy_from(func, to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to(func, to, from, n) \ -({ \ - register void __user *__cu_to_r __asm__("$4"); \ - register const void *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#ifndef CONFIG_EVA -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -#endif /* CONFIG_EVA */ +extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n); static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_to_kernel(to, from, n); - else - return __invoke_copy_to_user(to, from, n); + register void *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_from_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_from_kernel(to, from, n); - else - return __invoke_copy_from_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = (to); + __cu_from_r = (from); + __cu_len_r = (n); + + __asm__ __volatile__( + __MODULE_JAL(__raw_copy_to_user) + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER static inline unsigned long -raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return ___invoke_copy_in_kernel(to, from, n); - else - return ___invoke_copy_in_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_in_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + return __cu_len_r; } -extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); /* @@ -657,28 +530,16 @@ __clear_user(void __user *addr, __kernel_size_t size) #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" #endif /* CONFIG_CPU_MICROMIPS */ - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero_kernel) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : bzero_clobbers); return res; } @@ -692,7 +553,6 @@ __clear_user(void __user *addr, __kernel_size_t size) __cl_size; \ }) -extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len); /* @@ -718,33 +578,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len) { long res; - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } + if (!access_ok(__from, __len)) + return -EFAULT; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); return res; } -extern long __strnlen_kernel_asm(const char __user *s, long n); extern long __strnlen_user_asm(const char __user *s, long n); /* @@ -764,26 +614,18 @@ static inline long strnlen_user(const char __user *s, long n) { long res; + if (!access_ok(s, 1)) + return 0; + might_fault(); - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } else { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); return res; } diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index 2203e2d0ae2a..44a45f3fa4b0 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -20,6 +20,12 @@ #define VDSO_HAS_CLOCK_GETRES 1 +#if MIPS_ISA_REV < 6 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo", +#else +#define VDSO_SYSCALL_CLOBBERS +#endif + static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback( : "=r" (ret), "=r" (error) : "r" (tv), "r" (tz), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index b4a57f1de772..814b3da30501 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -17,10 +17,10 @@ obj-y += cpu-probe.o endif ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_early_printk.o = -pg -CFLAGS_REMOVE_perf_event.o = -pg -CFLAGS_REMOVE_perf_event_mipsxx.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h new file mode 100644 index 000000000000..590388031503 --- /dev/null +++ b/arch/mips/kernel/access-helper.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/uaccess.h> + +static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) +{ + return user ? get_user(*a, (unsigned long __user *)p) : + get_kernel_nofault(*a, p); +} + +static inline int __get_inst16(u16 *i, u16 *p, bool user) +{ + return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p); +} + +static inline int __get_inst32(u32 *i, u32 *p, bool user) +{ + return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p); +} diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index aebfda81120a..5735b2cd6f2a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -98,7 +98,6 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 995ad9e69ded..32ec67c9ab67 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -195,10 +195,6 @@ int c0_compare_int_usable(void) unsigned int delta; unsigned int cnt; -#ifdef CONFIG_KVM_GUEST - return 1; -#endif - /* * IP7 already pending? Try to clear it by acking the timer. */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b71892064f27..0ef240adefb5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_isa(c, MIPS_CPU_ISA_M64R2); break; } - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); break; @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) * register, we correct it here. */ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); - c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: panic("Unknown Loongson Processor ID!"); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 666b9969c1bd..8c401e42301c 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -90,7 +90,6 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) @@ -102,10 +101,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, return -EFAULT; ip -= 4; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } @@ -114,7 +110,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; ip += 4; safe_store_code(new_code2, ip, faulted); @@ -126,10 +121,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, if (unlikely(faulted)) return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7efa0d1a4c2b..bff080db0294 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); - ti->addr_limit = KERNEL_DS; p->thread.reg16 = usp; /* fn */ p->thread.reg17 = kthread_arg; p->thread.reg29 = childksp; @@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->regs[2] = 0; /* Child gets zero as return value */ if (usp) childregs->regs[29] = usp; - ti->addr_limit = USER_DS; p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index ac870893ba2d..f3c908abdbb8 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -11,6 +11,8 @@ #include <asm/stackframe.h> #include <asm/addrspace.h> +#include <kernel-entry-init.h> + LEAF(relocate_new_kernel) PTR_L a0, arg0 PTR_L a1, arg1 @@ -125,11 +127,8 @@ LEAF(kexec_smp_wait) 1: LONG_L s0, (t0) bne s0, zero,1b -#ifdef CONFIG_CPU_CAVIUM_OCTEON - .set push - .set noreorder - synci 0($0) - .set pop +#ifdef USE_KEXEC_SMP_WAIT_FINAL + kexec_smp_wait_final #else sync #endif diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index b449b68662a9..b1b2e106f711 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp) * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ - lw t5, TI_ADDR_LIMIT($28) addu t4, t0, 32 - and t5, t4 - bltz t5, bad_stack # -> sp is bad + bltz t4, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. @@ -217,9 +215,9 @@ einval: li v0, -ENOSYS #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) -#include <asm/syscall_table_32_o32.h> -#undef __SYSCALL +#include <asm/syscall_table_o32.h> diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 35d8c86b160e..f650c55a17dc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -104,5 +104,4 @@ not_n32_scall: #define __SYSCALL(nr, entry) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) -#include <asm/syscall_table_64_n32.h> -#undef __SYSCALL +#include <asm/syscall_table_n32.h> diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index 5e9c497ce099..5d7bfc65e4d0 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -113,5 +113,4 @@ illegal_syscall: .align 3 .type sys_call_table, @object EXPORT(sys_call_table) -#include <asm/syscall_table_64_n64.h> -#undef __SYSCALL +#include <asm/syscall_table_n64.h> diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 50c9a57e0d3a..cedc8bd88804 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -213,9 +213,9 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL(nr, entry) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) -#include <asm/syscall_table_64_o32.h> -#undef __SYSCALL +#include <asm/syscall_table_o32.h> diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 279be0153f8b..23a140327a0b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -43,7 +43,7 @@ #include <asm/prom.h> #ifdef CONFIG_MIPS_ELF_APPENDED_DTB -const char __section(".appended_dtb") __appended_dtb[0x100000]; +char __section(".appended_dtb") __appended_dtb[0x100000]; #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 359b176b665f..b6ef5f7312cf 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -134,17 +134,24 @@ static void __init bmips_smp_setup(void) if (!board_ebase_setup) board_ebase_setup = &bmips_ebase_setup; - __cpu_number_map[boot_cpu] = 0; - __cpu_logical_map[0] = boot_cpu; - - for (i = 0; i < max_cpus; i++) { - if (i != boot_cpu) { - __cpu_number_map[i] = cpu; - __cpu_logical_map[cpu] = i; - cpu++; + if (max_cpus > 1) { + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + + for (i = 0; i < max_cpus; i++) { + if (i != boot_cpu) { + __cpu_number_map[i] = cpu; + __cpu_logical_map[cpu] = i; + cpu++; + } + set_cpu_possible(i, 1); + set_cpu_present(i, 1); } - set_cpu_possible(i, 1); - set_cpu_present(i, 1); + } else { + __cpu_number_map[0] = boot_cpu; + __cpu_logical_map[0] = 0; + set_cpu_possible(0, 1); + set_cpu_present(0, 1); } } diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index ab4e3e1b138d..90f53e041a38 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c @@ -35,7 +35,7 @@ static int ss_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); @@ -114,13 +114,13 @@ static int multi_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); static int __init spinlock_test(void) { - debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL, &fops_ss); - debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, &fops_multi); return 0; } diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 51f8b805f2ed..904452992992 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -8,15 +8,12 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscalln32 := $(src)/syscall_n32.tbl syscalln64 := $(src)/syscall_n64.tbl syscallo32 := $(src)/syscall_o32.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ @@ -25,20 +22,14 @@ quiet_cmd_sysnr = SYSNR $@ '$(sysnr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ -syshdr_offset_unistd_n32 := __NR_Linux $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_n64 := __NR_Linux $(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_o32 := __NR_Linux $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE $(call if_changed,syshdr) @@ -57,33 +48,21 @@ sysnr_offset_unistd_nr_o32 := 4000 $(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_syscall_table_32_o32 := 32_o32 -systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n32 := 64_n32 -systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE +$(kapi)/syscall_table_n64.h: $(syscalln64) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n64 := 64_n64 -systbl_offset_syscall_table_64_n64 := 5000 -$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE - $(call if_changed,systbl) - -systbl_abi_syscall_table_64_o32 := 64_o32 -systbl_offset_syscall_table_64_o32 := 4000 -$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_n32.h \ unistd_n64.h \ unistd_o32.h -kapisyshdr-y += syscall_table_32_o32.h \ - syscall_table_64_n32.h \ - syscall_table_64_n64.h \ - syscall_table_64_o32.h \ +kapisyshdr-y += syscall_table_n32.h \ + syscall_table_n64.h \ + syscall_table_o32.h \ unistd_nr_n32.h \ unistd_nr_n64.h \ unistd_nr_o32.h diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 8fd8c1790941..5e0096657251 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -381,3 +381,7 @@ 440 n32 process_madvise sys_process_madvise 441 n32 epoll_pwait2 compat_sys_epoll_pwait2 442 n32 mount_setattr sys_mount_setattr +443 n32 quotactl_path sys_quotactl_path +444 n32 landlock_create_ruleset sys_landlock_create_ruleset +445 n32 landlock_add_rule sys_landlock_add_rule +446 n32 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 169f21438065..9974f5f8e49b 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -357,3 +357,7 @@ 440 n64 process_madvise sys_process_madvise 441 n64 epoll_pwait2 sys_epoll_pwait2 442 n64 mount_setattr sys_mount_setattr +443 n64 quotactl_path sys_quotactl_path +444 n64 landlock_create_ruleset sys_landlock_create_ruleset +445 n64 landlock_add_rule sys_landlock_add_rule +446 n64 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 090d29ca80ff..39d6e71e57b6 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -430,3 +430,7 @@ 440 o32 process_madvise sys_process_madvise 441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 o32 mount_setattr sys_mount_setattr +443 o32 quotactl_path sys_quotactl_path +444 o32 landlock_create_ruleset sys_landlock_create_ruleset +445 o32 landlock_add_rule sys_landlock_add_rule +446 o32 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 2e241e713a7d..000000000000 --- a/arch/mips/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 1e2570740c20..000000000000 --- a/arch/mips/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 808b8b61ded1..0b4e06303c55 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -72,6 +72,8 @@ #include <asm/mach-loongson64/cpucfg-emul.h> +#include "access-helper.h" + extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); @@ -108,7 +110,8 @@ void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); void(*board_cache_error_setup)(void); -static void show_raw_backtrace(unsigned long reg29, const char *loglvl) +static void show_raw_backtrace(unsigned long reg29, const char *loglvl, + bool user) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; @@ -118,9 +121,7 @@ static void show_raw_backtrace(unsigned long reg29, const char *loglvl) printk("%s\n", loglvl); #endif while (!kstack_end(sp)) { - unsigned long __user *p = - (unsigned long __user *)(unsigned long)sp++; - if (__get_user(addr, p)) { + if (__get_addr(&addr, sp++, user)) { printk("%s (Bad stack address)", loglvl); break; } @@ -141,7 +142,7 @@ __setup("raw_show_trace", set_raw_show_trace); #endif static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, - const char *loglvl) + const char *loglvl, bool user) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; @@ -151,7 +152,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, task = current; if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { - show_raw_backtrace(sp, loglvl); + show_raw_backtrace(sp, loglvl, user); return; } printk("%sCall Trace:\n", loglvl); @@ -167,12 +168,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, * with at least a bit of error checking ... */ static void show_stacktrace(struct task_struct *task, - const struct pt_regs *regs, const char *loglvl) + const struct pt_regs *regs, const char *loglvl, bool user) { const int field = 2 * sizeof(unsigned long); - long stackdata; + unsigned long stackdata; int i; - unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; + unsigned long *sp = (unsigned long *)regs->regs[29]; printk("%sStack :", loglvl); i = 0; @@ -186,7 +187,7 @@ static void show_stacktrace(struct task_struct *task, break; } - if (__get_user(stackdata, sp++)) { + if (__get_addr(&stackdata, sp++, user)) { pr_cont(" (Bad stack address)"); break; } @@ -195,13 +196,12 @@ static void show_stacktrace(struct task_struct *task, i++; } pr_cont("\n"); - show_backtrace(task, regs, loglvl); + show_backtrace(task, regs, loglvl, user); } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { struct pt_regs regs; - mm_segment_t old_fs = get_fs(); regs.cp0_status = KSU_KERNEL; if (sp) { @@ -217,33 +217,41 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) prepare_frametrace(®s); } } - /* - * show_stack() deals exclusively with kernel mode, so be sure to access - * the stack in the kernel (not user) address space. - */ - set_fs(KERNEL_DS); - show_stacktrace(task, ®s, loglvl); - set_fs(old_fs); + show_stacktrace(task, ®s, loglvl, false); } -static void show_code(unsigned int __user *pc) +static void show_code(void *pc, bool user) { long i; - unsigned short __user *pc16 = NULL; + unsigned short *pc16 = NULL; printk("Code:"); if ((unsigned long)pc & 1) - pc16 = (unsigned short __user *)((unsigned long)pc & ~1); + pc16 = (u16 *)((unsigned long)pc & ~1); + for(i = -3 ; i < 6 ; i++) { - unsigned int insn; - if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { - pr_cont(" (Bad address in epc)\n"); - break; + if (pc16) { + u16 insn16; + + if (__get_inst16(&insn16, pc16 + i, user)) + goto bad_address; + + pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>')); + } else { + u32 insn32; + + if (__get_inst32(&insn32, (u32 *)pc + i, user)) + goto bad_address; + + pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>')); } - pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } pr_cont("\n"); + return; + +bad_address: + pr_cont(" (Bad address in epc)\n\n"); } static void __show_regs(const struct pt_regs *regs) @@ -356,7 +364,6 @@ void show_regs(struct pt_regs *regs) void show_registers(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); - mm_segment_t old_fs = get_fs(); __show_regs(regs); print_modules(); @@ -371,13 +378,9 @@ void show_registers(struct pt_regs *regs) printk("*HwTLS: %0*lx\n", field, tls); } - if (!user_mode(regs)) - /* Necessary for getting the correct stack content */ - set_fs(KERNEL_DS); - show_stacktrace(current, regs, KERN_DEFAULT); - show_code((unsigned int __user *) regs->cp0_epc); + show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); + show_code((void *)regs->cp0_epc, user_mode(regs)); printk("\n"); - set_fs(old_fs); } static DEFINE_RAW_SPINLOCK(die_lock); @@ -1022,18 +1025,14 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned long epc = msk_isa16_mode(exception_epc(regs)); unsigned int opcode, bcode; enum ctx_state prev_state; - mm_segment_t seg; - - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); + bool user = user_mode(regs); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { u16 instr[2]; - if (__get_user(instr[0], (u16 __user *)epc)) + if (__get_inst16(&instr[0], (u16 *)epc, user)) goto out_sigsegv; if (!cpu_has_mmips) { @@ -1044,13 +1043,13 @@ asmlinkage void do_bp(struct pt_regs *regs) bcode = instr[0] & 0xf; } else { /* 32-bit microMIPS BREAK */ - if (__get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; bcode = (opcode >> 6) & ((1 << 20) - 1); } } else { - if (__get_user(opcode, (unsigned int __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; bcode = (opcode >> 6) & ((1 << 20) - 1); } @@ -1100,7 +1099,6 @@ asmlinkage void do_bp(struct pt_regs *regs) do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1114,25 +1112,21 @@ asmlinkage void do_tr(struct pt_regs *regs) u32 opcode, tcode = 0; enum ctx_state prev_state; u16 instr[2]; - mm_segment_t seg; + bool user = user_mode(regs); unsigned long epc = msk_isa16_mode(exception_epc(regs)); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); - prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { - if (__get_user(instr[0], (u16 __user *)(epc + 0)) || - __get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) || + __get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) tcode = (opcode >> 12) & ((1 << 4) - 1); } else { - if (__get_user(opcode, (u32 __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) @@ -1142,7 +1136,6 @@ asmlinkage void do_tr(struct pt_regs *regs) do_trap_or_bp(regs, tcode, 0, "Trap"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1591,7 +1584,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs) { int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; - mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1602,12 +1594,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } - if (!user_mode(regs)) - set_fs(KERNEL_DS); - - show_code((unsigned int __user *) regs->cp0_epc); - - set_fs(old_fs); + show_code((void *)regs->cp0_epc, user_mode(regs)); /* * Some chips may have other causes of machine check (e.g. SB1 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 126a5f3f4e4c..df4b708c04a9 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -93,6 +93,8 @@ #include <asm/mmu_context.h> #include <linux/uaccess.h> +#include "access-helper.h" + enum { UNALIGNED_ACTION_QUIET, UNALIGNED_ACTION_SIGNAL, @@ -107,14 +109,13 @@ static u32 unaligned_action; extern void show_registers(struct pt_regs *regs); static void emulate_load_store_insn(struct pt_regs *regs, - void __user *addr, unsigned int __user *pc) + void __user *addr, unsigned int *pc) { unsigned long origpc, orig31, value; union mips_instruction insn; unsigned int res; -#ifdef CONFIG_EVA - mm_segment_t seg; -#endif + bool user = user_mode(regs); + origpc = (unsigned long)pc; orig31 = regs->regs[31]; @@ -123,7 +124,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, /* * This load never faults. */ - __get_user(insn.word, pc); + __get_inst32(&insn.word, pc, user); switch (insn.i_format.opcode) { /* @@ -163,7 +164,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (insn.dsp_format.func == lx_op) { switch (insn.dsp_format.op) { case lwx_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) @@ -172,7 +173,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.dsp_format.rd] = value; break; case lhx_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) @@ -191,93 +192,66 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lwe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } LoadWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lhue_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWUE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case she_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; case swe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; default: - force_uaccess_end(seg); goto sigill; } - force_uaccess_end(seg); } #endif break; case lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHW(addr, value, res); - else - LoadHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWE(addr, value, res); + else LoadHW(addr, value, res); - } if (res) goto fault; @@ -286,17 +260,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadW(addr, value, res); - else - LoadWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadWE(addr, value, res); + else LoadW(addr, value, res); - } if (res) goto fault; @@ -305,17 +275,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHWU(addr, value, res); - else - LoadHWUE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWUE(addr, value, res); + else LoadHWU(addr, value, res); - } if (res) goto fault; @@ -332,7 +298,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -355,7 +321,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -370,40 +336,32 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigill; case sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreHW(addr, value, res); - else - StoreHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreHWE(addr, value, res); + else StoreHW(addr, value, res); - } if (res) goto fault; break; case sw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreW(addr, value, res); - else - StoreWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreWE(addr, value, res); + else StoreW(addr, value, res); - } if (res) goto fault; @@ -418,7 +376,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; compute_return_epc(regs); @@ -626,6 +584,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, unsigned long origpc, contpc; union mips_instruction insn; struct mm_decoded_insn mminsn; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -689,7 +648,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadW(addr, value, res); @@ -708,7 +667,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -728,7 +687,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; LoadDW(addr, value, res); @@ -751,7 +710,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; value = regs->regs[reg]; @@ -774,10 +733,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -810,10 +769,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -847,10 +806,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -888,10 +847,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -1010,7 +969,7 @@ fpu_emul: case mm_lwm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1030,7 +989,7 @@ fpu_emul: case mm_swm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1084,7 +1043,7 @@ fpu_emul: } loadHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1094,7 +1053,7 @@ loadHW: goto success; loadHWU: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1104,7 +1063,7 @@ loadHWU: goto success; loadW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1122,7 +1081,7 @@ loadWU: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1144,7 +1103,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1158,7 +1117,7 @@ loadDW: goto sigill; storeHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; value = regs->regs[reg]; @@ -1168,7 +1127,7 @@ storeHW: goto success; storeW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; value = regs->regs[reg]; @@ -1186,7 +1145,7 @@ storeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -1243,6 +1202,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) union mips16e_instruction mips16inst, oldinst; unsigned int opcode; int extended = 0; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -1344,7 +1304,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; case MIPS16e_lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1355,7 +1315,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) break; case MIPS16e_lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1368,7 +1328,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) case MIPS16e_lw_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1387,7 +1347,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1411,7 +1371,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1426,7 +1386,7 @@ loadDW: goto sigill; case MIPS16e_sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1439,7 +1399,7 @@ loadDW: case MIPS16e_sw_op: case MIPS16e_swsp_op: case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1459,7 +1419,7 @@ writeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1515,8 +1475,7 @@ sigill: asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; - unsigned int __user *pc; - mm_segment_t seg; + unsigned int *pc; prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, @@ -1551,24 +1510,14 @@ asmlinkage void do_ade(struct pt_regs *regs) show_registers(regs); if (cpu_has_mmips) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_microMIPS(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } if (cpu_has_mips16) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_MIPS16e(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } @@ -1577,13 +1526,9 @@ asmlinkage void do_ade(struct pt_regs *regs) if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); - pc = (unsigned int __user *)exception_epc(regs); + pc = (unsigned int *)exception_epc(regs); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); - set_fs(seg); return; diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 7d0b91ad2581..3d0cf471f2fe 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -90,7 +90,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -158,7 +158,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { - gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; + gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; + gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 1234834cc4c4..1f98947fe715 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -176,7 +176,7 @@ SECTIONS .fill : { FILL(0); BYTE(0); - . = ALIGN(8); + STRUCT_ALIGN(); } __appended_dtb = .; /* leave space for appended DTB */ diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 032b3fca6cbb..a77297480f56 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -30,40 +30,6 @@ config KVM help Support for hosting Guest kernels. -choice - prompt "Virtualization mode" - depends on KVM - default KVM_MIPS_TE - -config KVM_MIPS_TE - bool "Trap & Emulate" - depends on CPU_MIPS32_R2 - help - Use trap and emulate to virtualize 32-bit guests in user mode. This - does not require any special hardware Virtualization support beyond - standard MIPS32 r2 or later, but it does require the guest kernel - to be configured with CONFIG_KVM_GUEST=y so that it resides in the - user address segment. - -config KVM_MIPS_VZ - bool "MIPS Virtualization (VZ) ASE" - help - Use the MIPS Virtualization (VZ) ASE to virtualize guests. This - supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n), - but requires hardware support. - -endchoice - -config KVM_MIPS_DYN_TRANS - bool "KVM/MIPS: Dynamic binary translation to reduce traps" - depends on KVM_MIPS_TE - default y - help - When running in Trap & Emulate mode patch privileged - instructions to reduce the number of traps. - - If unsure, say Y. - config KVM_MIPS_DEBUG_COP0_COUNTERS bool "Maintain counters for COP0 accesses" depends on KVM diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 506c4ac0ba1c..30cc060857c7 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -9,7 +9,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ - interrupt.o stats.o commpage.o \ + interrupt.o stats.o \ fpu.o kvm-objs += hypcall.o kvm-objs += mmu.o @@ -17,11 +17,6 @@ ifdef CONFIG_CPU_LOONGSON64 kvm-objs += loongson_ipi.o endif -ifdef CONFIG_KVM_MIPS_VZ kvm-objs += vz.o -else -kvm-objs += dyntrans.o -kvm-objs += trap_emul.o -endif obj-$(CONFIG_KVM) += kvm.o obj-y += callback.o tlb.o diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c deleted file mode 100644 index 5812e6145801..000000000000 --- a/arch/mips/kvm/commpage.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * commpage, currently used for Virtual COP0 registers. - * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/memblock.h> -#include <asm/page.h> -#include <asm/cacheflush.h> -#include <asm/mmu_context.h> - -#include <linux/kvm_host.h> - -#include "commpage.h" - -void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) -{ - struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; - - /* Specific init values for fields */ - vcpu->arch.cop0 = &page->cop0; -} diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h deleted file mode 100644 index 08c5fa2bbc0f..000000000000 --- a/arch/mips/kvm/commpage.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: commpage: mapped into get kernel space - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#ifndef __KVM_MIPS_COMMPAGE_H__ -#define __KVM_MIPS_COMMPAGE_H__ - -struct kvm_mips_commpage { - /* COP0 state is mapped into Guest kernel via commpage */ - struct mips_coproc cop0; -}; - -#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 - -extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); - -#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c deleted file mode 100644 index d77b61b3d6ee..000000000000 --- a/arch/mips/kvm/dyntrans.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/highmem.h> -#include <linux/kvm_host.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/memblock.h> -#include <asm/cacheflush.h> - -#include "commpage.h" - -/** - * kvm_mips_trans_replace() - Replace trapping instruction in guest memory. - * @vcpu: Virtual CPU. - * @opc: PC of instruction to replace. - * @replace: Instruction to write - */ -static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, - union mips_instruction replace) -{ - unsigned long vaddr = (unsigned long)opc; - int err; - -retry: - /* The GVA page table is still active so use the Linux TLB handlers */ - kvm_trap_emul_gva_lockless_begin(vcpu); - err = put_user(replace.word, opc); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (unlikely(err)) { - /* - * We write protect clean pages in GVA page table so normal - * Linux TLB mod handler doesn't silently dirty the page. - * Its also possible we raced with a GVA invalidation. - * Try to force the page to become dirty. - */ - err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); - if (unlikely(err)) { - kvm_info("%s: Address unwriteable: %p\n", - __func__, opc); - return -EFAULT; - } - - /* - * Try again. This will likely trigger a TLB refill, which will - * fetch the new dirty entry from the GVA page table, which - * should then succeed. - */ - goto retry; - } - __local_flush_icache_user_range(vaddr, vaddr + 4); - - return 0; -} - -int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction nop_inst = { 0 }; - - /* Replace the CACHE instruction, with a NOP */ - return kvm_mips_trans_replace(vcpu, opc, nop_inst); -} - -/* - * Address based CACHE instructions are transformed into synci(s). A little - * heavy for just D-cache invalidates, but avoids an expensive trap - */ -int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction synci_inst = { 0 }; - - synci_inst.i_format.opcode = bcond_op; - synci_inst.i_format.rs = inst.i_format.rs; - synci_inst.i_format.rt = synci_op; - if (cpu_has_mips_r6) - synci_inst.i_format.simmediate = inst.spec3_format.simmediate; - else - synci_inst.i_format.simmediate = inst.i_format.simmediate; - - return kvm_mips_trans_replace(vcpu, opc, synci_inst); -} - -int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction mfc0_inst = { 0 }; - u32 rd, sel; - - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - if (rd == MIPS_CP0_ERRCTL && sel == 0) { - mfc0_inst.r_format.opcode = spec_op; - mfc0_inst.r_format.rd = inst.c0r_format.rt; - mfc0_inst.r_format.func = add_op; - } else { - mfc0_inst.i_format.opcode = lw_op; - mfc0_inst.i_format.rt = inst.c0r_format.rt; - mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); -#ifdef CONFIG_CPU_BIG_ENDIAN - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) - mfc0_inst.i_format.simmediate |= 4; -#endif - } - - return kvm_mips_trans_replace(vcpu, opc, mfc0_inst); -} - -int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction mtc0_inst = { 0 }; - u32 rd, sel; - - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - mtc0_inst.i_format.opcode = sw_op; - mtc0_inst.i_format.rt = inst.c0r_format.rt; - mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); -#ifdef CONFIG_CPU_BIG_ENDIAN - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) - mtc0_inst.i_format.simmediate |= 4; -#endif - - return kvm_mips_trans_replace(vcpu, opc, mtc0_inst); -} diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d70c4f8e14e2..22e745e49b0a 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -30,7 +30,6 @@ #define CONFIG_MIPS_MT #include "interrupt.h" -#include "commpage.h" #include "trace.h" @@ -276,7 +275,8 @@ int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) *out = vcpu->arch.host_cp0_badinstr; return 0; } else { - return kvm_get_inst(opc, vcpu, out); + WARN_ONCE(1, "CPU doesn't have BadInstr register\n"); + return -EINVAL; } } @@ -297,7 +297,8 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) *out = vcpu->arch.host_cp0_badinstrp; return 0; } else { - return kvm_get_inst(opc, vcpu, out); + WARN_ONCE(1, "CPU doesn't have BadInstrp register\n"); + return -EINVAL; } } @@ -721,7 +722,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) * preemption until the new value is written to prevent restore of a * GTOffset corresponding to the old CP0_Compare value. */ - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { + if (delta > 0) { preempt_disable(); write_c0_gtoffset(compare - read_c0_count()); back_to_back_c0_hazard(); @@ -734,7 +735,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) if (ack) kvm_mips_callbacks->dequeue_timer_int(vcpu); - else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) + else /* * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so * preserve guest CP0_Cause.TI if we don't want to ack it. @@ -743,15 +744,13 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) kvm_write_c0_guest_compare(cop0, compare); - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - if (delta > 0) - preempt_enable(); + if (delta > 0) + preempt_enable(); - back_to_back_c0_hazard(); + back_to_back_c0_hazard(); - if (!ack && cause & CAUSEF_TI) - kvm_write_c0_guest_cause(cop0, cause); - } + if (!ack && cause & CAUSEF_TI) + kvm_write_c0_guest_cause(cop0, cause); /* resume_hrtimer() takes care of timer interrupts > count */ if (!dc) @@ -762,7 +761,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) * until after the new CP0_Compare is written, otherwise new guest * CP0_Count could hit new guest CP0_Compare. */ - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) + if (delta <= 0) write_c0_gtoffset(compare - read_c0_count()); } @@ -943,29 +942,6 @@ enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) return HRTIMER_RESTART; } -enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; - - if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { - kvm_clear_c0_guest_status(cop0, ST0_ERL); - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); - } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { - kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, - kvm_read_c0_guest_epc(cop0)); - kvm_clear_c0_guest_status(cop0, ST0_EXL); - vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); - - } else { - kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", - vcpu->arch.pc); - er = EMULATE_FAIL; - } - - return er; -} - enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) { kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, @@ -991,609 +967,6 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, - unsigned long entryhi) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - int cpu, i; - u32 nasid = entryhi & KVM_ENTRYHI_ASID; - - if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { - trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & - KVM_ENTRYHI_ASID, nasid); - - /* - * Flush entries from the GVA page tables. - * Guest user page table will get flushed lazily on re-entry to - * guest user if the guest ASID actually changes. - */ - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); - - /* - * Regenerate/invalidate kernel MMU context. - * The user MMU context will be regenerated lazily on re-entry - * to guest user if the guest ASID actually changes. - */ - preempt_disable(); - cpu = smp_processor_id(); - get_new_mmu_context(kern_mm); - for_each_possible_cpu(i) - if (i != cpu) - set_cpu_context(i, kern_mm, 0); - preempt_enable(); - } - kvm_write_c0_guest_entryhi(cop0, entryhi); -} - -enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb; - unsigned long pc = vcpu->arch.pc; - int index; - - index = kvm_read_c0_guest_index(cop0); - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - /* UNDEFINED */ - kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); - index &= KVM_MIPS_GUEST_TLB_SIZE - 1; - } - - tlb = &vcpu->arch.guest_tlb[index]; - kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); - kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); - kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); - kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); - - return EMULATE_DONE; -} - -/** - * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. - * @vcpu: VCPU with changed mappings. - * @tlb: TLB entry being removed. - * - * This is called to indicate a single change in guest MMU mappings, so that we - * can arrange TLB flushes on this and other CPUs. - */ -static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - int cpu, i; - bool user; - - /* No need to flush for entries which are already invalid */ - if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) - return; - /* Don't touch host kernel page tables or TLB mappings */ - if ((unsigned long)tlb->tlb_hi > 0x7fffffff) - return; - /* User address space doesn't need flushing for KSeg2/3 changes */ - user = tlb->tlb_hi < KVM_GUEST_KSEG0; - - preempt_disable(); - - /* Invalidate page table entries */ - kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); - - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); - - /* Invalidate the whole ASID on other CPUs */ - cpu = smp_processor_id(); - for_each_possible_cpu(i) { - if (i == cpu) - continue; - if (user) - set_cpu_context(i, user_mm, 0); - set_cpu_context(i, kern_mm, 0); - } - - preempt_enable(); -} - -/* Write Guest TLB Entry @ Index */ -enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - int index = kvm_read_c0_guest_index(cop0); - struct kvm_mips_tlb *tlb = NULL; - unsigned long pc = vcpu->arch.pc; - - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - kvm_debug("%s: illegal index: %d\n", __func__, index); - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); - index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; - } - - tlb = &vcpu->arch.guest_tlb[index]; - - kvm_mips_invalidate_guest_tlb(vcpu, tlb); - - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); - - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); - - return EMULATE_DONE; -} - -/* Write Guest TLB Entry @ Random Index */ -enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb = NULL; - unsigned long pc = vcpu->arch.pc; - int index; - - index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); - tlb = &vcpu->arch.guest_tlb[index]; - - kvm_mips_invalidate_guest_tlb(vcpu, tlb); - - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); - - kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0)); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - long entryhi = kvm_read_c0_guest_entryhi(cop0); - unsigned long pc = vcpu->arch.pc; - int index = -1; - - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); - - kvm_write_c0_guest_index(cop0, index); - - kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, - index); - - return EMULATE_DONE; -} - -/** - * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config1 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) -{ - unsigned int mask = 0; - - /* Permit FPU to be present if FPU is supported */ - if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) - mask |= MIPS_CONF1_FP; - - return mask; -} - -/** - * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config3 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) -{ - /* Config4 and ULRI are optional */ - unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; - - /* Permit MSA to be present if MSA is supported */ - if (kvm_mips_guest_can_have_msa(&vcpu->arch)) - mask |= MIPS_CONF3_MSA; - - return mask; -} - -/** - * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config4 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) -{ - /* Config5 is optional */ - unsigned int mask = MIPS_CONF_M; - - /* KScrExist */ - mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; - - return mask; -} - -/** - * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config5 CP0 - * register, by the guest itself. - */ -unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) -{ - unsigned int mask = 0; - - /* Permit MSAEn changes if MSA supported and enabled */ - if (kvm_mips_guest_has_msa(&vcpu->arch)) - mask |= MIPS_CONF5_MSAEN; - - /* - * Permit guest FPU mode changes if FPU is enabled and the relevant - * feature exists according to FIR register. - */ - if (kvm_mips_guest_has_fpu(&vcpu->arch)) { - if (cpu_has_fre) - mask |= MIPS_CONF5_FRE; - /* We don't support UFR or UFE */ - } - - return mask; -} - -enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, - u32 *opc, u32 cause, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; - u32 rt, rd, sel; - unsigned long curr_pc; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - if (inst.co_format.co) { - switch (inst.co_format.func) { - case tlbr_op: /* Read indexed TLB entry */ - er = kvm_mips_emul_tlbr(vcpu); - break; - case tlbwi_op: /* Write indexed */ - er = kvm_mips_emul_tlbwi(vcpu); - break; - case tlbwr_op: /* Write random */ - er = kvm_mips_emul_tlbwr(vcpu); - break; - case tlbp_op: /* TLB Probe */ - er = kvm_mips_emul_tlbp(vcpu); - break; - case rfe_op: - kvm_err("!!!COP0_RFE!!!\n"); - break; - case eret_op: - er = kvm_mips_emul_eret(vcpu); - goto dont_update_pc; - case wait_op: - er = kvm_mips_emul_wait(vcpu); - break; - case hypcall_op: - er = kvm_mips_emul_hypcall(vcpu, inst); - break; - } - } else { - rt = inst.c0r_format.rt; - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - switch (inst.c0r_format.rs) { - case mfc_op: -#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[rd][sel]++; -#endif - /* Get reg */ - if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - vcpu->arch.gprs[rt] = - (s32)kvm_mips_read_count(vcpu); - } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { - vcpu->arch.gprs[rt] = 0x0; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mfc0(inst, opc, vcpu); -#endif - } else { - vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mfc0(inst, opc, vcpu); -#endif - } - - trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - break; - - case dmfc_op: - vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; - - trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - break; - - case mtc_op: -#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[rd][sel]++; -#endif - trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - - if ((rd == MIPS_CP0_TLB_INDEX) - && (vcpu->arch.gprs[rt] >= - KVM_MIPS_GUEST_TLB_SIZE)) { - kvm_err("Invalid TLB Index: %ld", - vcpu->arch.gprs[rt]); - er = EMULATE_FAIL; - break; - } - if ((rd == MIPS_CP0_PRID) && (sel == 1)) { - /* - * Preserve core number, and keep the exception - * base in guest KSeg0. - */ - kvm_change_c0_guest_ebase(cop0, 0x1ffff000, - vcpu->arch.gprs[rt]); - } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - kvm_mips_change_entryhi(vcpu, - vcpu->arch.gprs[rt]); - } - /* Are we writing to COUNT */ - else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); - goto done; - } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { - /* If we are writing to COMPARE */ - /* Clear pending timer interrupt, if any */ - kvm_mips_write_compare(vcpu, - vcpu->arch.gprs[rt], - true); - } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { - unsigned int old_val, val, change; - - old_val = kvm_read_c0_guest_status(cop0); - val = vcpu->arch.gprs[rt]; - change = val ^ old_val; - - /* Make sure that the NMI bit is never set */ - val &= ~ST0_NMI; - - /* - * Don't allow CU1 or FR to be set unless FPU - * capability enabled and exists in guest - * configuration. - */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) - val &= ~(ST0_CU1 | ST0_FR); - - /* - * Also don't allow FR to be set if host doesn't - * support it. - */ - if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) - val &= ~ST0_FR; - - - /* Handle changes in FPU mode */ - preempt_disable(); - - /* - * FPU and Vector register state is made - * UNPREDICTABLE by a change of FR, so don't - * even bother saving it. - */ - if (change & ST0_FR) - kvm_drop_fpu(vcpu); - - /* - * If MSA state is already live, it is undefined - * how it interacts with FR=0 FPU state, and we - * don't want to hit reserved instruction - * exceptions trying to save the MSA state later - * when CU=1 && FR=1, so play it safe and save - * it first. - */ - if (change & ST0_CU1 && !(val & ST0_FR) && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) - kvm_lose_fpu(vcpu); - - /* - * Propagate CU1 (FPU enable) changes - * immediately if the FPU context is already - * loaded. When disabling we leave the context - * loaded so it can be quickly enabled again in - * the near future. - */ - if (change & ST0_CU1 && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) - change_c0_status(ST0_CU1, val); - - preempt_enable(); - - kvm_write_c0_guest_status(cop0, val); - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* - * If FPU present, we need CU1/FR bits to take - * effect fairly soon. - */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) - kvm_mips_trans_mtc0(inst, opc, vcpu); -#endif - } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { - unsigned int old_val, val, change, wrmask; - - old_val = kvm_read_c0_guest_config5(cop0); - val = vcpu->arch.gprs[rt]; - - /* Only a few bits are writable in Config5 */ - wrmask = kvm_mips_config5_wrmask(vcpu); - change = (val ^ old_val) & wrmask; - val = old_val ^ change; - - - /* Handle changes in FPU/MSA modes */ - preempt_disable(); - - /* - * Propagate FRE changes immediately if the FPU - * context is already loaded. - */ - if (change & MIPS_CONF5_FRE && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) - change_c0_config5(MIPS_CONF5_FRE, val); - - /* - * Propagate MSAEn changes immediately if the - * MSA context is already loaded. When disabling - * we leave the context loaded so it can be - * quickly enabled again in the near future. - */ - if (change & MIPS_CONF5_MSAEN && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) - change_c0_config5(MIPS_CONF5_MSAEN, - val); - - preempt_enable(); - - kvm_write_c0_guest_config5(cop0, val); - } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { - u32 old_cause, new_cause; - - old_cause = kvm_read_c0_guest_cause(cop0); - new_cause = vcpu->arch.gprs[rt]; - /* Update R/W bits */ - kvm_change_c0_guest_cause(cop0, 0x08800300, - new_cause); - /* DC bit enabling/disabling timer? */ - if ((old_cause ^ new_cause) & CAUSEF_DC) { - if (new_cause & CAUSEF_DC) - kvm_mips_count_disable_cause(vcpu); - else - kvm_mips_count_enable_cause(vcpu); - } - } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { - u32 mask = MIPS_HWRENA_CPUNUM | - MIPS_HWRENA_SYNCISTEP | - MIPS_HWRENA_CC | - MIPS_HWRENA_CCRES; - - if (kvm_read_c0_guest_config3(cop0) & - MIPS_CONF3_ULRI) - mask |= MIPS_HWRENA_ULR; - cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; - } else { - cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mtc0(inst, opc, vcpu); -#endif - } - break; - - case dmtc_op: - kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", - vcpu->arch.pc, rt, rd, sel); - trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - er = EMULATE_FAIL; - break; - - case mfmc0_op: -#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[MIPS_CP0_STATUS][0]++; -#endif - if (rt != 0) - vcpu->arch.gprs[rt] = - kvm_read_c0_guest_status(cop0); - /* EI */ - if (inst.mfmc0_format.sc) { - kvm_debug("[%#lx] mfmc0_op: EI\n", - vcpu->arch.pc); - kvm_set_c0_guest_status(cop0, ST0_IE); - } else { - kvm_debug("[%#lx] mfmc0_op: DI\n", - vcpu->arch.pc); - kvm_clear_c0_guest_status(cop0, ST0_IE); - } - - break; - - case wrpgpr_op: - { - u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; - u32 pss = - (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; - /* - * We don't support any shadow register sets, so - * SRSCtl[PSS] == SRSCtl[CSS] = 0 - */ - if (css || pss) { - er = EMULATE_FAIL; - break; - } - kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, - vcpu->arch.gprs[rt]); - vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; - } - break; - default: - kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", - vcpu->arch.pc, inst.c0r_format.rs); - er = EMULATE_FAIL; - break; - } - } - -done: - /* Rollback PC only if emulation was unsuccessful */ - if (er == EMULATE_FAIL) - vcpu->arch.pc = curr_pc; - -dont_update_pc: - /* - * This is for special instructions whose emulation - * updates the PC, so do not overwrite the PC under - * any circumstances - */ - - return er; -} - enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, u32 cause, struct kvm_vcpu *vcpu) @@ -1623,7 +996,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, goto out_fail; switch (inst.i_format.opcode) { -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case sd_op: run->mmio.len = 8; *(u64 *)data = vcpu->arch.gprs[rt]; @@ -1721,7 +1094,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, vcpu->arch.gprs[rt], *(u32 *)data); break; -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case sdl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -1928,7 +1301,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, vcpu->mmio_needed = 2; /* signed */ switch (op) { -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case ld_op: run->mmio.len = 8; break; @@ -2003,7 +1376,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, } break; -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case ldl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -2135,815 +1508,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, return EMULATE_DO_MMIO; } -#ifndef CONFIG_KVM_MIPS_VZ -static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), - unsigned long curr_pc, - unsigned long addr, - struct kvm_vcpu *vcpu, - u32 cause) -{ - int err; - - for (;;) { - /* Carefully attempt the cache operation */ - kvm_trap_emul_gva_lockless_begin(vcpu); - err = fn(addr); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (likely(!err)) - return EMULATE_DONE; - - /* - * Try to handle the fault and retry, maybe we just raced with a - * GVA invalidation. - */ - switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { - case KVM_MIPS_GVA: - case KVM_MIPS_GPA: - /* bad virtual or physical address */ - return EMULATE_FAIL; - case KVM_MIPS_TLB: - /* no matching guest TLB */ - vcpu->arch.host_cp0_badvaddr = addr; - vcpu->arch.pc = curr_pc; - kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu); - return EMULATE_EXCEPT; - case KVM_MIPS_TLBINV: - /* invalid matching guest TLB */ - vcpu->arch.host_cp0_badvaddr = addr; - vcpu->arch.pc = curr_pc; - kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu); - return EMULATE_EXCEPT; - default: - break; - } - } -} - -enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, - u32 *opc, u32 cause, - struct kvm_vcpu *vcpu) -{ - enum emulation_result er = EMULATE_DONE; - u32 cache, op_inst, op, base; - s16 offset; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long va; - unsigned long curr_pc; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - base = inst.i_format.rs; - op_inst = inst.i_format.rt; - if (cpu_has_mips_r6) - offset = inst.spec3_format.simmediate; - else - offset = inst.i_format.simmediate; - cache = op_inst & CacheOp_Cache; - op = op_inst & CacheOp_Op; - - va = arch->gprs[base] + offset; - - kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); - - /* - * Treat INDEX_INV as a nop, basically issued by Linux on startup to - * invalidate the caches entirely by stepping through all the - * ways/indexes - */ - if (op == Index_Writeback_Inv) { - kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, - arch->gprs[base], offset); - - if (cache == Cache_D) { -#ifdef CONFIG_CPU_R4K_CACHE_TLB - r4k_blast_dcache(); -#else - switch (boot_cpu_type()) { - case CPU_CAVIUM_OCTEON3: - /* locally flush icache */ - local_flush_icache_range(0, 0); - break; - default: - __flush_cache_all(); - break; - } -#endif - } else if (cache == Cache_I) { -#ifdef CONFIG_CPU_R4K_CACHE_TLB - r4k_blast_icache(); -#else - switch (boot_cpu_type()) { - case CPU_CAVIUM_OCTEON3: - /* locally flush icache */ - local_flush_icache_range(0, 0); - break; - default: - flush_icache_all(); - break; - } -#endif - } else { - kvm_err("%s: unsupported CACHE INDEX operation\n", - __func__); - return EMULATE_FAIL; - } - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_cache_index(inst, opc, vcpu); -#endif - goto done; - } - - /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ - if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { - /* - * Perform the dcache part of icache synchronisation on the - * guest's behalf. - */ - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* - * Replace the CACHE instruction, with a SYNCI, not the same, - * but avoids a trap - */ - kvm_mips_trans_cache_va(inst, opc, vcpu); -#endif - } else if (op_inst == Hit_Invalidate_I) { - /* Perform the icache synchronisation on the guest's behalf */ - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; - er = kvm_mips_guest_cache_op(protected_flush_icache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* Replace the CACHE instruction, with a SYNCI */ - kvm_mips_trans_cache_va(inst, opc, vcpu); -#endif - } else { - kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); - er = EMULATE_FAIL; - } - -done: - /* Rollback PC only if emulation was unsuccessful */ - if (er == EMULATE_FAIL) - vcpu->arch.pc = curr_pc; - /* Guest exception needs guest to resume */ - if (er == EMULATE_EXCEPT) - er = EMULATE_DONE; - - return er; -} - -enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction inst; - enum emulation_result er = EMULATE_DONE; - int err; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) - return EMULATE_FAIL; - - switch (inst.r_format.opcode) { - case cop0_op: - er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu); - break; - -#ifndef CONFIG_CPU_MIPSR6 - case cache_op: - ++vcpu->stat.cache_exits; - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); - er = kvm_mips_emulate_cache(inst, opc, cause, vcpu); - break; -#else - case spec3_op: - switch (inst.spec3_format.func) { - case cache6_op: - ++vcpu->stat.cache_exits; - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); - er = kvm_mips_emulate_cache(inst, opc, cause, - vcpu); - break; - default: - goto unknown; - } - break; -unknown: -#endif - - default: - kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, - inst.word); - kvm_arch_vcpu_dump_regs(vcpu); - er = EMULATE_FAIL; - break; - } - - return er; -} -#endif /* CONFIG_KVM_MIPS_VZ */ - -/** - * kvm_mips_guest_exception_base() - Find guest exception vector base address. - * - * Returns: The base address of the current guest exception vector, taking - * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. - */ -long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - if (kvm_read_c0_guest_status(cop0) & ST0_BEV) - return KVM_GUEST_CKSEG1ADDR(0x1fc00200); - else - return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; -} - -enum emulation_result kvm_mips_emulate_syscall(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_SYS << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", - arch->pc); - - /* set pc to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; - - } else { - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", - arch->pc); - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - } - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBL << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = - (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", - arch->pc); - } - - /* set pc to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBL << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; - } else { - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - } - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBS << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - } - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBS << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - struct kvm_vcpu_arch *arch = &vcpu->arch; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", - arch->pc); - } - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MOD << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - } - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_CPU << CAUSEB_EXCCODE)); - kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_RI << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver RI when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_BP << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver BP when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TR << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver TRAP when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_FPE << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver FPE when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MSADIS << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver MSADIS when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - unsigned long curr_pc; - union mips_instruction inst; - int err; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); - return EMULATE_FAIL; - } - - if (inst.r_format.opcode == spec3_op && - inst.r_format.func == rdhwr_op && - inst.r_format.rs == 0 && - (inst.r_format.re >> 3) == 0) { - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); - int rd = inst.r_format.rd; - int rt = inst.r_format.rt; - int sel = inst.r_format.re & 0x7; - - /* If usermode, check RDHWR rd is allowed by guest HWREna */ - if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { - kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", - rd, opc); - goto emulate_ri; - } - switch (rd) { - case MIPS_HWR_CPUNUM: /* CPU number */ - arch->gprs[rt] = vcpu->vcpu_id; - break; - case MIPS_HWR_SYNCISTEP: /* SYNCI length */ - arch->gprs[rt] = min(current_cpu_data.dcache.linesz, - current_cpu_data.icache.linesz); - break; - case MIPS_HWR_CC: /* Read count register */ - arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); - break; - case MIPS_HWR_CCRES: /* Count register resolution */ - switch (current_cpu_data.cputype) { - case CPU_20KC: - case CPU_25KF: - arch->gprs[rt] = 1; - break; - default: - arch->gprs[rt] = 2; - } - break; - case MIPS_HWR_ULR: /* Read UserLocal register */ - arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); - break; - - default: - kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); - goto emulate_ri; - } - - trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), - vcpu->arch.gprs[rt]); - } else { - kvm_debug("Emulate RI not supported @ %p: %#x\n", - opc, inst.word); - goto emulate_ri; - } - - return EMULATE_DONE; - -emulate_ri: - /* - * Rollback PC (if in branch delay slot then the PC already points to - * branch target), and pass the RI exception to the guest OS. - */ - vcpu->arch.pc = curr_pc; - return kvm_mips_emulate_ri_exc(cause, opc, vcpu); -} - enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -3086,207 +1650,3 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) done: return er; } - -static enum emulation_result kvm_mips_emulate_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_change_c0_guest_cause(cop0, (0xff), - (exccode << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - - kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", - exccode, kvm_read_c0_guest_epc(cop0), - kvm_read_c0_guest_badvaddr(cop0)); - } else { - kvm_err("Trying to deliver EXC when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_check_privilege(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - enum emulation_result er = EMULATE_DONE; - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); - - if (usermode) { - switch (exccode) { - case EXCCODE_INT: - case EXCCODE_SYS: - case EXCCODE_BP: - case EXCCODE_RI: - case EXCCODE_TR: - case EXCCODE_MSAFPE: - case EXCCODE_FPE: - case EXCCODE_MSADIS: - break; - - case EXCCODE_CPU: - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) - er = EMULATE_PRIV_FAIL; - break; - - case EXCCODE_MOD: - break; - - case EXCCODE_TLBL: - /* - * We we are accessing Guest kernel space, then send an - * address error exception to the guest - */ - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - kvm_debug("%s: LD MISS @ %#lx\n", __func__, - badvaddr); - cause &= ~0xff; - cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); - er = EMULATE_PRIV_FAIL; - } - break; - - case EXCCODE_TLBS: - /* - * We we are accessing Guest kernel space, then send an - * address error exception to the guest - */ - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - kvm_debug("%s: ST MISS @ %#lx\n", __func__, - badvaddr); - cause &= ~0xff; - cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); - er = EMULATE_PRIV_FAIL; - } - break; - - case EXCCODE_ADES: - kvm_debug("%s: address error ST @ %#lx\n", __func__, - badvaddr); - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { - cause &= ~0xff; - cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); - } - er = EMULATE_PRIV_FAIL; - break; - case EXCCODE_ADEL: - kvm_debug("%s: address error LD @ %#lx\n", __func__, - badvaddr); - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { - cause &= ~0xff; - cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); - } - er = EMULATE_PRIV_FAIL; - break; - default: - er = EMULATE_PRIV_FAIL; - break; - } - } - - if (er == EMULATE_PRIV_FAIL) - kvm_mips_emulate_exc(cause, opc, vcpu); - - return er; -} - -/* - * User Address (UA) fault, this could happen if - * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this - * case we pass on the fault to the guest kernel and let it handle it. - * (2) TLB entry is present in the Guest TLB but not in the shadow, in this - * case we inject the TLB from the Guest TLB into the shadow host TLB - */ -enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu, - bool write_fault) -{ - enum emulation_result er = EMULATE_DONE; - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - unsigned long va = vcpu->arch.host_cp0_badvaddr; - int index; - - kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", - vcpu->arch.host_cp0_badvaddr); - - /* - * KVM would not have got the exception if this entry was valid in the - * shadow host TLB. Check the Guest TLB, if the entry is not there then - * send the guest an exception. The guest exc handler should then inject - * an entry into the guest TLB. - */ - index = kvm_mips_guest_tlb_lookup(vcpu, - (va & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & - KVM_ENTRYHI_ASID)); - if (index < 0) { - if (exccode == EXCCODE_TLBL) { - er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu); - } else if (exccode == EXCCODE_TLBS) { - er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu); - } else { - kvm_err("%s: invalid exc code: %d\n", __func__, - exccode); - er = EMULATE_FAIL; - } - } else { - struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; - - /* - * Check if the entry is valid, if not then setup a TLB invalid - * exception to the guest - */ - if (!TLB_IS_VALID(*tlb, va)) { - if (exccode == EXCCODE_TLBL) { - er = kvm_mips_emulate_tlbinv_ld(cause, opc, - vcpu); - } else if (exccode == EXCCODE_TLBS) { - er = kvm_mips_emulate_tlbinv_st(cause, opc, - vcpu); - } else { - kvm_err("%s: invalid exc code: %d\n", __func__, - exccode); - er = EMULATE_FAIL; - } - } else { - kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", - tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); - /* - * OK we have a Guest TLB entry, now inject it into the - * shadow host TLB - */ - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, - write_fault)) { - kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", - __func__, va, index, vcpu, - read_c0_entryhi()); - er = EMULATE_FAIL; - } - } - } - - return er; -} diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 832475bf2055..8131fb2bdf97 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -305,7 +305,6 @@ static void *kvm_mips_build_enter_guest(void *addr) UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_MTC0(&p, T0, C0_EPC); -#ifdef CONFIG_KVM_MIPS_VZ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ if (cpu_has_ldpte) UASM_i_MFC0(&p, K0, C0_PWBASE); @@ -367,21 +366,6 @@ static void *kvm_mips_build_enter_guest(void *addr) /* Set the root ASID for the Guest */ UASM_i_ADDIU(&p, T1, S0, offsetof(struct kvm, arch.gpa_mm.context.asid)); -#else - /* Set the ASID for the Guest Kernel or User */ - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); - UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), - T0); - uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); - uasm_i_xori(&p, T0, T0, KSU_USER); - uasm_il_bnez(&p, &r, T0, label_kernel_asid); - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, - guest_kernel_mm.context.asid)); - /* else user */ - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, - guest_user_mm.context.asid)); - uasm_l_kernel_asid(&l, p); -#endif /* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */ @@ -406,24 +390,9 @@ static void *kvm_mips_build_enter_guest(void *addr) uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); #endif -#ifndef CONFIG_KVM_MIPS_VZ - /* - * Set up KVM T&E GVA pgd. - * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): - * - call tlbmiss_handler_setup_pgd(mm->pgd) - * - but skips write into CP0_PWBase for now - */ - UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - - (int)offsetof(struct mm_struct, context.asid), T1); - - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); - uasm_i_jalr(&p, RA, T9); - uasm_i_mtc0(&p, K0, C0_ENTRYHI); -#else /* Set up KVM VZ root ASID (!guestid) */ uasm_i_mtc0(&p, K0, C0_ENTRYHI); skip_asid_restore: -#endif uasm_i_ehb(&p); /* Disable RDHWR access */ @@ -720,7 +689,6 @@ void *kvm_mips_build_exit(void *addr) uasm_l_msa_1(&l, p); } -#ifdef CONFIG_KVM_MIPS_VZ /* Restore host ASID */ if (!cpu_has_guestid) { UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), @@ -764,7 +732,6 @@ void *kvm_mips_build_exit(void *addr) MIPS_GCTL1_RID_WIDTH); uasm_i_mtc0(&p, T0, C0_GUESTCTL1); } -#endif /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index d28c2c9c343e..0277942279ea 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c @@ -21,119 +21,6 @@ #include "interrupt.h" -void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) -{ - set_bit(priority, &vcpu->arch.pending_exceptions); -} - -void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) -{ - clear_bit(priority, &vcpu->arch.pending_exceptions); -} - -void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) -{ - /* - * Cause bits to reflect the pending timer interrupt, - * the EXC code will be set when we are actually - * delivering the interrupt: - */ - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); - - /* Queue up an INT exception for the core */ - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); - -} - -void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) -{ - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); -} - -void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq) -{ - int intr = (int)irq->irq; - - /* - * Cause bits to reflect the pending IO interrupt, - * the EXC code will be set when we are actually - * delivering the interrupt: - */ - kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); - kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr)); -} - -void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq) -{ - int intr = (int)irq->irq; - - kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); - kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); -} - -/* Deliver the interrupt of the corresponding priority, if possible. */ -int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause) -{ - int allowed = 0; - u32 exccode, ie; - - struct kvm_vcpu_arch *arch = &vcpu->arch; - struct mips_coproc *cop0 = vcpu->arch.cop0; - - if (priority == MIPS_EXC_MAX) - return 0; - - ie = 1 << (kvm_priority_to_irq[priority] + 8); - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & ie)) { - allowed = 1; - exccode = EXCCODE_INT; - } - - /* Are we allowed to deliver the interrupt ??? */ - if (allowed) { - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); - - } else - kvm_err("Trying to deliver interrupt when EXL is already set\n"); - - kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, - (exccode << CAUSEB_EXCCODE)); - - /* XXXSL Set PC to the interrupt exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu); - if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) - arch->pc += 0x200; - else - arch->pc += 0x180; - - clear_bit(priority, &vcpu->arch.pending_exceptions); - } - - return allowed; -} - -int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause) -{ - return 1; -} - void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) { unsigned long *pending = &vcpu->arch.pending_exceptions; @@ -145,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) priority = __ffs(*pending_clr); while (priority <= MIPS_EXC_MAX) { - if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { - if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) - break; - } + kvm_mips_callbacks->irq_clear(vcpu, priority, cause); priority = find_next_bit(pending_clr, BITS_PER_BYTE * sizeof(*pending_clr), @@ -157,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) priority = __ffs(*pending); while (priority <= MIPS_EXC_MAX) { - if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { - if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) - break; - } + kvm_mips_callbacks->irq_deliver(vcpu, priority, cause); priority = find_next_bit(pending, BITS_PER_BYTE * sizeof(*pending), diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index c3e878ca3e07..e529ea2bb34b 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -31,29 +31,9 @@ #define C_TI (_ULCAST_(1) << 30) -#ifdef CONFIG_KVM_MIPS_VZ -#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1) -#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1) -#else -#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) -#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) -#endif - extern u32 *kvm_priority_to_irq; u32 kvm_irq_to_priority(u32 irq); -void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); -void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); -void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); -void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); -void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); -void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); -int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause); -int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause); void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 58a8812e2fa5..4d4af97dcc88 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -30,7 +30,6 @@ #include <linux/kvm_host.h> #include "interrupt.h" -#include "commpage.h" #define CREATE_TRACE_POINTS #include "trace.h" @@ -58,7 +57,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("fpe", fpe_exits), VCPU_STAT("msa_disabled", msa_disabled_exits), VCPU_STAT("flush_dcache", flush_dcache_exits), -#ifdef CONFIG_KVM_MIPS_VZ VCPU_STAT("vz_gpsi", vz_gpsi_exits), VCPU_STAT("vz_gsfc", vz_gsfc_exits), VCPU_STAT("vz_hc", vz_hc_exits), @@ -70,7 +68,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { #ifdef CONFIG_CPU_LOONGSON64 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), #endif -#endif VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll), VCPU_STAT("halt_poll_invalid", halt_poll_invalid), @@ -139,11 +136,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) switch (type) { case KVM_VM_MIPS_AUTO: break; -#ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: -#else - case KVM_VM_MIPS_TE: -#endif break; default: /* Unsupported KVM type */ @@ -204,9 +197,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) { /* Flush whole GPA */ kvm_mips_flush_gpa_pt(kvm, 0, ~0); - - /* Let implementation do the rest */ - kvm_mips_callbacks->flush_shadow_all(kvm); + kvm_flush_remote_tlbs(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, @@ -221,8 +212,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, /* Flush slot from GPA */ kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, slot->base_gfn + slot->npages - 1); - /* Let implementation do the rest */ - kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); + kvm_arch_flush_remote_tlbs_memslot(kvm, slot); spin_unlock(&kvm->mmu_lock); } @@ -262,9 +252,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, /* Write protect GPA page table entries */ needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, new->base_gfn + new->npages - 1); - /* Let implementation do the rest */ if (needs_flush) - kvm_mips_callbacks->flush_shadow_memslot(kvm, new); + kvm_arch_flush_remote_tlbs_memslot(kvm, new); spin_unlock(&kvm->mmu_lock); } } @@ -361,7 +350,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ refill_start = gebase; - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) + if (IS_ENABLED(CONFIG_64BIT)) refill_start += 0x080; refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); @@ -397,20 +386,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); - /* - * Allocate comm page for guest kernel, a TLB will be reserved for - * mapping GVA @ 0xFFFF8000 to this page - */ - vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); - - if (!vcpu->arch.kseg0_commpage) { - err = -ENOMEM; - goto out_free_gebase; - } - - kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); - kvm_mips_commpage_init(vcpu); - /* Init */ vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_exec_cpu = -1; @@ -418,12 +393,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Initial guest state */ err = kvm_mips_callbacks->vcpu_setup(vcpu); if (err) - goto out_free_commpage; + goto out_free_gebase; return 0; -out_free_commpage: - kfree(vcpu->arch.kseg0_commpage); out_free_gebase: kfree(gebase); out_uninit_vcpu: @@ -439,7 +412,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); - kfree(vcpu->arch.kseg0_commpage); kvm_mips_callbacks->vcpu_uninit(vcpu); } @@ -996,11 +968,16 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } +int kvm_arch_flush_remote_tlb(struct kvm *kvm) +{ + kvm_mips_callbacks->prepare_flush_shadow(kvm); + return 1; +} + void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { - /* Let implementation handle TLB/GVA invalidation */ - kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); + kvm_flush_remote_tlbs(kvm); } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) @@ -1212,10 +1189,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; - /* re-enable HTW before enabling interrupts */ - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) - htw_start(); - /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; @@ -1232,22 +1205,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) cause, opc, run, vcpu); trace_kvm_exit(vcpu, exccode); - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - /* - * Do a privilege check, if in UM most of these exit conditions - * end up causing an exception to be delivered to the Guest - * Kernel - */ - er = kvm_mips_check_privilege(cause, opc, vcpu); - if (er == EMULATE_PRIV_FAIL) { - goto skip_emul; - } else if (er == EMULATE_FAIL) { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - goto skip_emul; - } - } - switch (exccode) { case EXCCODE_INT: kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); @@ -1357,7 +1314,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) } -skip_emul: local_irq_disable(); if (ret == RESUME_GUEST) @@ -1406,11 +1362,6 @@ skip_emul: read_c0_config5() & MIPS_CONF5_MSAEN) __kvm_restore_msacsr(&vcpu->arch); } - - /* Disable HTW before returning to guest or host */ - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) - htw_stop(); - return ret; } @@ -1429,10 +1380,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) * FR=0 FPU state, and we don't want to hit reserved instruction * exceptions trying to save the MSA state later when CU=1 && FR=1, so * play it safe and save it first. - * - * In theory we shouldn't ever hit this case since kvm_lose_fpu() should - * get called when guest CU1 is set, however we can't trust the guest - * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) @@ -1553,11 +1500,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_disable(); if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - set_c0_config5(MIPS_CONF5_MSAEN); - enable_fpu_hazard(); - } - __kvm_save_msa(&vcpu->arch); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); @@ -1569,11 +1511,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) } vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - set_c0_status(ST0_CU1); - enable_fpu_hazard(); - } - __kvm_save_fpu(&vcpu->arch); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 3dabeda82458..6d1f68cf4edf 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -439,85 +439,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, end_gfn << PAGE_SHIFT); } -static int handle_hva_to_gpa(struct kvm *kvm, - unsigned long start, - unsigned long end, - int (*handler)(struct kvm *kvm, gfn_t gfn, - gpa_t gfn_end, - struct kvm_memory_slot *memslot, - void *data), - void *data) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int ret = 0; - - slots = kvm_memslots(kvm); - - /* we only care about the pages that the guest sees */ - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - ret |= handler(kvm, gfn, gfn_end, memslot, data); - } - - return ret; -} - - -static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) -{ - kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); + kvm_mips_flush_gpa_pt(kvm, range->start, range->end); return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - - kvm_mips_callbacks->flush_shadow_all(kvm); - return 0; -} - -static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) -{ - gpa_t gpa = gfn << PAGE_SHIFT; - pte_t hva_pte = *(pte_t *)data; + gpa_t gpa = range->start << PAGE_SHIFT; + pte_t hva_pte = range->pte; pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); pte_t old_pte; if (!gpa_pte) - return 0; + return false; /* Mapping may need adjusting depending on memslot flags */ old_pte = *gpa_pte; - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) + if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) hva_pte = pte_mkclean(hva_pte); - else if (memslot->flags & KVM_MEM_READONLY) + else if (range->slot->flags & KVM_MEM_READONLY) hva_pte = pte_wrprotect(hva_pte); set_pte(gpa_pte, hva_pte); /* Replacing an absent or old page doesn't need flushes */ if (!pte_present(old_pte) || !pte_young(old_pte)) - return 0; + return false; /* Pages swapped, aged, moved, or cleaned require flushes */ return !pte_present(hva_pte) || @@ -526,27 +475,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, (pte_dirty(old_pte) && !pte_dirty(hva_pte)); } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - unsigned long end = hva + PAGE_SIZE; - int ret; - - ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); - if (ret) - kvm_mips_callbacks->flush_shadow_all(kvm); - return 0; + return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); } -static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); -} - -static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, - struct kvm_memory_slot *memslot, void *data) -{ - gpa_t gpa = gfn << PAGE_SHIFT; + gpa_t gpa = range->start << PAGE_SHIFT; pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); if (!gpa_pte) @@ -554,16 +490,6 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return pte_young(*gpa_pte); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) -{ - return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); -} - -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); -} - /** * _kvm_mips_map_page_fast() - Fast path GPA fault handler. * @vcpu: VCPU pointer. @@ -756,209 +682,6 @@ out: return err; } -static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, - unsigned long addr) -{ - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; - pgd_t *pgdp; - int ret; - - /* We need a minimum of cached pages ready for page table creation */ - ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); - if (ret) - return NULL; - - if (KVM_GUEST_KERNEL_MODE(vcpu)) - pgdp = vcpu->arch.guest_kernel_mm.pgd; - else - pgdp = vcpu->arch.guest_user_mm.pgd; - - return kvm_mips_walk_pgd(pgdp, memcache, addr); -} - -void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, - bool user) -{ - pgd_t *pgdp; - pte_t *ptep; - - addr &= PAGE_MASK << 1; - - pgdp = vcpu->arch.guest_kernel_mm.pgd; - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); - if (ptep) { - ptep[0] = pfn_pte(0, __pgprot(0)); - ptep[1] = pfn_pte(0, __pgprot(0)); - } - - if (user) { - pgdp = vcpu->arch.guest_user_mm.pgd; - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); - if (ptep) { - ptep[0] = pfn_pte(0, __pgprot(0)); - ptep[1] = pfn_pte(0, __pgprot(0)); - } - } -} - -/* - * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. - * Flush a range of guest physical address space from the VM's GPA page tables. - */ - -static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, - unsigned long end_gva) -{ - int i_min = pte_index(start_gva); - int i_max = pte_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); - int i; - - /* - * There's no freeing to do, so there's no point clearing individual - * entries unless only part of the last level page table needs flushing. - */ - if (safe_to_remove) - return true; - - for (i = i_min; i <= i_max; ++i) { - if (!pte_present(pte[i])) - continue; - - set_pte(pte + i, __pte(0)); - } - return false; -} - -static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, - unsigned long end_gva) -{ - pte_t *pte; - unsigned long end = ~0ul; - int i_min = pmd_index(start_gva); - int i_max = pmd_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pmd_present(pmd[i])) - continue; - - pte = pte_offset_kernel(pmd + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { - pmd_clear(pmd + i); - pte_free_kernel(NULL, pte); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, - unsigned long end_gva) -{ - pmd_t *pmd; - unsigned long end = ~0ul; - int i_min = pud_index(start_gva); - int i_max = pud_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pud_present(pud[i])) - continue; - - pmd = pmd_offset(pud + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { - pud_clear(pud + i); - pmd_free(NULL, pmd); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, - unsigned long end_gva) -{ - p4d_t *p4d; - pud_t *pud; - unsigned long end = ~0ul; - int i_min = pgd_index(start_gva); - int i_max = pgd_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pgd_present(pgd[i])) - continue; - - p4d = p4d_offset(pgd, 0); - pud = pud_offset(p4d + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { - pgd_clear(pgd + i); - pud_free(NULL, pud); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) -{ - if (flags & KMF_GPA) { - /* all of guest virtual address space could be affected */ - if (flags & KMF_KERN) - /* useg, kseg0, seg2/3 */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); - else - /* useg */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); - } else { - /* useg */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); - - /* kseg2/3 */ - if (flags & KMF_KERN) - kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); - } -} - -static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte) -{ - /* - * Don't leak writeable but clean entries from GPA page tables. We don't - * want the normal Linux tlbmod handler to handle dirtying when KVM - * accesses guest memory. - */ - if (!pte_dirty(pte)) - pte = pte_wrprotect(pte); - - return pte; -} - -static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) -{ - /* Guest EntryLo overrides host EntryLo */ - if (!(entrylo & ENTRYLO_D)) - pte = pte_mkclean(pte); - - return kvm_mips_gpa_pte_to_gva_unmapped(pte); -} - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu, bool write_fault) @@ -972,125 +695,6 @@ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, /* Invalidate this entry in the TLB */ return kvm_vz_host_tlb_inv(vcpu, badvaddr); } -#endif - -/* XXXKYMA: Must be called with interrupts disabled */ -int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu, - bool write_fault) -{ - unsigned long gpa; - pte_t pte_gpa[2], *ptep_gva; - int idx; - - if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { - kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); - kvm_mips_dump_host_tlbs(); - return -1; - } - - /* Get the GPA page table entry */ - gpa = KVM_GUEST_CPHYSADDR(badvaddr); - idx = (badvaddr >> PAGE_SHIFT) & 1; - if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], - &pte_gpa[!idx]) < 0) - return -1; - - /* Get the GVA page table entry */ - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE); - if (!ptep_gva) { - kvm_err("No ptep for gva %lx\n", badvaddr); - return -1; - } - - /* Copy a pair of entries from GPA page table to GVA page table */ - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]); - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); - - /* Invalidate this entry in the TLB, guest kernel ASID only */ - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); - return 0; -} - -int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long gva, - bool write_fault) -{ - struct kvm *kvm = vcpu->kvm; - long tlb_lo[2]; - pte_t pte_gpa[2], *ptep_buddy, *ptep_gva; - unsigned int idx = TLB_LO_IDX(*tlb, gva); - bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); - - tlb_lo[0] = tlb->tlb_lo[0]; - tlb_lo[1] = tlb->tlb_lo[1]; - - /* - * The commpage address must not be mapped to anything else if the guest - * TLB contains entries nearby, or commpage accesses will break. - */ - if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) - tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0; - - /* Get the GPA page table entry */ - if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]), - write_fault, &pte_gpa[idx], NULL) < 0) - return -1; - - /* And its GVA buddy's GPA page table entry if it also exists */ - pte_gpa[!idx] = pfn_pte(0, __pgprot(0)); - if (tlb_lo[!idx] & ENTRYLO_V) { - spin_lock(&kvm->mmu_lock); - ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL, - mips3_tlbpfn_to_paddr(tlb_lo[!idx])); - if (ptep_buddy) - pte_gpa[!idx] = *ptep_buddy; - spin_unlock(&kvm->mmu_lock); - } - - /* Get the GVA page table entry pair */ - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); - if (!ptep_gva) { - kvm_err("No ptep for gva %lx\n", gva); - return -1; - } - - /* Copy a pair of entries from GPA page table to GVA page table */ - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]); - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); - - /* Invalidate this entry in the TLB, current guest mode ASID only */ - kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); - - kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, - tlb->tlb_lo[0], tlb->tlb_lo[1]); - - return 0; -} - -int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu) -{ - kvm_pfn_t pfn; - pte_t *ptep; - pgprot_t prot; - - ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr); - if (!ptep) { - kvm_err("No ptep for commpage %lx\n", badvaddr); - return -1; - } - - pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); - /* Also set valid and dirty, so refill handler doesn't have to */ - prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED); - *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot))); - - /* Invalidate this entry in the TLB, guest kernel ASID only */ - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); - return 0; -} /** * kvm_mips_migrate_count() - Migrate timer. @@ -1153,86 +757,3 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) local_irq_restore(flags); } - -/** - * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault. - * @vcpu: Virtual CPU. - * @gva: Guest virtual address to be accessed. - * @write: True if write attempted (must be dirtied and made writable). - * - * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and - * dirtying the page if @write so that guest instructions can be modified. - * - * Returns: KVM_MIPS_MAPPED on success. - * KVM_MIPS_GVA if bad guest virtual address. - * KVM_MIPS_GPA if bad guest physical address. - * KVM_MIPS_TLB if guest TLB not present. - * KVM_MIPS_TLBINV if guest TLB present but not valid. - * KVM_MIPS_TLBMOD if guest TLB read only. - */ -enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, - unsigned long gva, - bool write) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb; - int index; - - if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { - if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) - return KVM_MIPS_GPA; - } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || - KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { - /* Address should be in the guest TLB */ - index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID)); - if (index < 0) - return KVM_MIPS_TLB; - tlb = &vcpu->arch.guest_tlb[index]; - - /* Entry should be valid, and dirty for writes */ - if (!TLB_IS_VALID(*tlb, gva)) - return KVM_MIPS_TLBINV; - if (write && !TLB_IS_DIRTY(*tlb, gva)) - return KVM_MIPS_TLBMOD; - - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) - return KVM_MIPS_GPA; - } else { - return KVM_MIPS_GVA; - } - - return KVM_MIPS_MAPPED; -} - -int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) -{ - int err; - - if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ), - "Expect BadInstr/BadInstrP registers to be used with VZ\n")) - return -EINVAL; - -retry: - kvm_trap_emul_gva_lockless_begin(vcpu); - err = get_user(*out, opc); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (unlikely(err)) { - /* - * Try to handle the fault, maybe we just raced with a GVA - * invalidation. - */ - err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc, - false); - if (unlikely(err)) { - kvm_err("%s: illegal address: %p\n", - __func__, opc); - return -EFAULT; - } - - /* Hopefully it'll work now */ - goto retry; - } - return 0; -} diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 1c1fbce3f566..1088114e5482 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -30,10 +30,6 @@ #include <asm/r4kcache.h> #define CONFIG_MIPS_MT -#define KVM_GUEST_PC_TLB 0 -#define KVM_GUEST_SP_TLB 1 - -#ifdef CONFIG_KVM_MIPS_VZ unsigned long GUESTID_MASK; EXPORT_SYMBOL_GPL(GUESTID_MASK); unsigned long GUESTID_FIRST_VERSION; @@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) else return cpu_asid(smp_processor_id(), gpa_mm); } -#endif - -static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - int cpu = smp_processor_id(); - - return cpu_asid(cpu, kern_mm); -} - -static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) -{ - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - int cpu = smp_processor_id(); - - return cpu_asid(cpu, user_mm); -} - -/* Structure defining an tlb entry data set. */ - -void kvm_mips_dump_host_tlbs(void) -{ - unsigned long flags; - - local_irq_save(flags); - - kvm_info("HOST TLBs:\n"); - dump_tlb_regs(); - pr_info("\n"); - dump_tlb_all(); - - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); - -void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb tlb; - int i; - - kvm_info("Guest TLBs:\n"); - kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); - - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { - tlb = vcpu->arch.guest_tlb[i]; - kvm_info("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V - ? ' ' : '*', - i, tlb.tlb_hi); - kvm_info("Lo0=0x%09llx %c%c attr %lx ", - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), - (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', - (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', - (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); - kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), - (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', - (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', - (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, - tlb.tlb_mask); - } -} -EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); - -int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) -{ - int i; - int index = -1; - struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; - - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { - if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && - TLB_HI_ASID_HIT(tlb[i], entryhi)) { - index = i; - break; - } - } - - kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", - __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); - - return index; -} -EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); static int _kvm_mips_host_tlb_inv(unsigned long entryhi) { @@ -163,54 +74,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) return idx; } -int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, - bool user, bool kernel) -{ - /* - * Initialize idx_user and idx_kernel to workaround bogus - * maybe-initialized warning when using GCC 6. - */ - int idx_user = 0, idx_kernel = 0; - unsigned long flags, old_entryhi; - - local_irq_save(flags); - - old_entryhi = read_c0_entryhi(); - - if (user) - idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | - kvm_mips_get_user_asid(vcpu)); - if (kernel) - idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | - kvm_mips_get_kernel_asid(vcpu)); - - write_c0_entryhi(old_entryhi); - mtc0_tlbw_hazard(); - - local_irq_restore(flags); - - /* - * We don't want to get reserved instruction exceptions for missing tlb - * entries. - */ - if (cpu_has_vtag_icache) - flush_icache_all(); - - if (user && idx_user >= 0) - kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", - __func__, (va & VPN2_MASK) | - kvm_mips_get_user_asid(vcpu), idx_user); - if (kernel && idx_kernel >= 0) - kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", - __func__, (va & VPN2_MASK) | - kvm_mips_get_kernel_asid(vcpu), idx_kernel); - - return 0; -} -EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); - -#ifdef CONFIG_KVM_MIPS_VZ - /* GuestID management */ /** @@ -661,40 +524,3 @@ void kvm_loongson_clear_guest_ftlb(void) } EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); #endif - -#endif - -/** - * kvm_mips_suspend_mm() - Suspend the active mm. - * @cpu The CPU we're running on. - * - * Suspend the active_mm, ready for a switch to a KVM guest virtual address - * space. This is left active for the duration of guest context, including time - * with interrupts enabled, so we need to be careful not to confuse e.g. cache - * management IPIs. - * - * kvm_mips_resume_mm() should be called before context switching to a different - * process so we don't need to worry about reference counting. - * - * This needs to be in static kernel code to avoid exporting init_mm. - */ -void kvm_mips_suspend_mm(int cpu) -{ - cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); - current->active_mm = &init_mm; -} -EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); - -/** - * kvm_mips_resume_mm() - Resume the current process mm. - * @cpu The CPU we're running on. - * - * Resume the mm of the current process, after a switch back from a KVM guest - * virtual address space (see kvm_mips_suspend_mm()). - */ -void kvm_mips_resume_mm(int cpu) -{ - cpumask_set_cpu(cpu, mm_cpumask(current->mm)); - current->active_mm = current->mm; -} -EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c deleted file mode 100644 index 0788c00d7e94..000000000000 --- a/arch/mips/kvm/trap_emul.c +++ /dev/null @@ -1,1306 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/kvm_host.h> -#include <linux/log2.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <asm/mmu_context.h> -#include <asm/pgalloc.h> - -#include "interrupt.h" - -static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) -{ - gpa_t gpa; - gva_t kseg = KSEGX(gva); - gva_t gkseg = KVM_GUEST_KSEGX(gva); - - if ((kseg == CKSEG0) || (kseg == CKSEG1)) - gpa = CPHYSADDR(gva); - else if (gkseg == KVM_GUEST_KSEG0) - gpa = KVM_GUEST_CPHYSADDR(gva); - else { - kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); - kvm_mips_dump_host_tlbs(); - gpa = KVM_INVALID_ADDR; - } - - kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); - - return gpa; -} - -static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 inst = 0; - - /* - * Fetch the instruction. - */ - if (cause & CAUSEF_BD) - opc += 1; - kvm_get_badinstr(opc, vcpu, &inst); - - kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", - exccode, opc, inst, badvaddr, - kvm_read_c0_guest_status(vcpu->arch.cop0)); - kvm_arch_vcpu_dump_regs(vcpu); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; -} - -static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { - /* FPU Unusable */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch) || - (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { - /* - * Unusable/no FPU in guest: - * deliver guest COP1 Unusable Exception - */ - er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu); - } else { - /* Restore FPU state */ - kvm_own_fpu(vcpu); - er = EMULATE_DONE; - } - } else { - er = kvm_mips_emulate_inst(cause, opc, vcpu); - } - - switch (er) { - case EMULATE_DONE: - ret = RESUME_GUEST; - break; - - case EMULATE_FAIL: - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - break; - - case EMULATE_WAIT: - vcpu->run->exit_reason = KVM_EXIT_INTR; - ret = RESUME_HOST; - break; - - case EMULATE_HYPERCALL: - ret = kvm_mips_handle_hypcall(vcpu); - break; - - default: - BUG(); - } - return ret; -} - -static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) -{ - enum emulation_result er; - union mips_instruction inst; - int err; - - /* A code fetch fault doesn't count as an MMIO */ - if (kvm_is_ifetch_fault(&vcpu->arch)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Emulate the load */ - er = kvm_mips_emulate_load(inst, cause, vcpu); - if (er == EMULATE_FAIL) { - kvm_err("Emulate load from MMIO space failed\n"); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - } else { - vcpu->run->exit_reason = KVM_EXIT_MMIO; - } - return RESUME_HOST; -} - -static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) -{ - enum emulation_result er; - union mips_instruction inst; - int err; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Emulate the store */ - er = kvm_mips_emulate_store(inst, cause, vcpu); - if (er == EMULATE_FAIL) { - kvm_err("Emulate store to MMIO space failed\n"); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - } else { - vcpu->run->exit_reason = KVM_EXIT_MMIO; - } - return RESUME_HOST; -} - -static int kvm_mips_bad_access(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu, bool store) -{ - if (store) - return kvm_mips_bad_store(cause, opc, vcpu); - else - return kvm_mips_bad_load(cause, opc, vcpu); -} - -static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - struct kvm_mips_tlb *tlb; - unsigned long entryhi; - int index; - - if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - /* - * First find the mapping in the guest TLB. If the failure to - * write was due to the guest TLB, it should be up to the guest - * to handle it. - */ - entryhi = (badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); - - /* - * These should never happen. - * They would indicate stale host TLB entries. - */ - if (unlikely(index < 0)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - tlb = vcpu->arch.guest_tlb + index; - if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* - * Guest entry not dirty? That would explain the TLB modified - * exception. Relay that on to the guest so it can handle it. - */ - if (!TLB_IS_DIRTY(*tlb, badvaddr)) { - kvm_mips_emulate_tlbmod(cause, opc, vcpu); - return RESUME_GUEST; - } - - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, - true)) - /* Not writable, needs handling as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - return RESUME_GUEST; - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) - /* Not writable, needs handling as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - return RESUME_GUEST; - } else { - /* host kernel addresses are all handled as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - } -} - -static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) -{ - struct kvm_run *run = vcpu->run; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) - && KVM_GUEST_KERNEL_MODE(vcpu)) { - if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", - store ? "ST" : "LD", cause, opc, badvaddr); - - /* - * User Address (UA) fault, this could happen if - * (1) TLB entry not present/valid in both Guest and shadow host - * TLBs, in this case we pass on the fault to the guest - * kernel and let it handle it. - * (2) TLB entry is present in the Guest TLB but not in the - * shadow, in this case we inject the TLB from the Guest TLB - * into the shadow host TLB - */ - - er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - /* - * All KSEG0 faults are handled by KVM, as the guest kernel does - * not expect to ever get them - */ - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) - ret = kvm_mips_bad_access(cause, opc, vcpu, store); - } else if (KVM_GUEST_KERNEL_MODE(vcpu) - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { - /* - * With EVA we may get a TLB exception instead of an address - * error when the guest performs MMIO to KSeg1 addresses. - */ - ret = kvm_mips_bad_access(cause, opc, vcpu, store); - } else { - kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", - store ? "ST" : "LD", cause, opc, badvaddr); - kvm_mips_dump_host_tlbs(); - kvm_arch_vcpu_dump_regs(vcpu); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) -{ - return kvm_trap_emul_handle_tlb_miss(vcpu, true); -} - -static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) -{ - return kvm_trap_emul_handle_tlb_miss(vcpu, false); -} - -static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - int ret = RESUME_GUEST; - - if (KVM_GUEST_KERNEL_MODE(vcpu) - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { - ret = kvm_mips_bad_store(cause, opc, vcpu); - } else { - kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - int ret = RESUME_GUEST; - - if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { - ret = kvm_mips_bad_load(cause, opc, vcpu); - } else { - kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_syscall(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_handle_ri(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_bp_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_trap_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -/** - * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. - * @vcpu: Virtual CPU context. - * - * Handle when the guest attempts to use MSA when it is disabled. - */ -static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (!kvm_mips_guest_has_msa(&vcpu->arch) || - (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { - /* - * No MSA in guest, or FPU enabled and not in FR=1 mode, - * guest reserved instruction exception - */ - er = kvm_mips_emulate_ri_exc(cause, opc, vcpu); - } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { - /* MSA disabled by guest, guest MSA disabled exception */ - er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu); - } else { - /* Restore MSA/FPU state */ - kvm_own_msa(vcpu); - er = EMULATE_DONE; - } - - switch (er) { - case EMULATE_DONE: - ret = RESUME_GUEST; - break; - - case EMULATE_FAIL: - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - break; - - default: - BUG(); - } - return ret; -} - -static int kvm_trap_emul_hardware_enable(void) -{ - return 0; -} - -static void kvm_trap_emul_hardware_disable(void) -{ -} - -static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) -{ - int r; - - switch (ext) { - case KVM_CAP_MIPS_TE: - r = 1; - break; - case KVM_CAP_IOEVENTFD: - r = 1; - break; - default: - r = 0; - break; - } - - return r; -} - -static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - - /* - * Allocate GVA -> HPA page tables. - * MIPS doesn't use the mm_struct pointer argument. - */ - kern_mm->pgd = pgd_alloc(kern_mm); - if (!kern_mm->pgd) - return -ENOMEM; - - user_mm->pgd = pgd_alloc(user_mm); - if (!user_mm->pgd) { - pgd_free(kern_mm, kern_mm->pgd); - return -ENOMEM; - } - - return 0; -} - -static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) -{ - /* Don't free host kernel page tables copied from init_mm.pgd */ - const unsigned long end = 0x80000000; - unsigned long pgd_va, pud_va, pmd_va; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i, j, k; - - for (i = 0; i < USER_PTRS_PER_PGD; i++) { - if (pgd_none(pgd[i])) - continue; - - pgd_va = (unsigned long)i << PGDIR_SHIFT; - if (pgd_va >= end) - break; - p4d = p4d_offset(pgd, 0); - pud = pud_offset(p4d + i, 0); - for (j = 0; j < PTRS_PER_PUD; j++) { - if (pud_none(pud[j])) - continue; - - pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); - if (pud_va >= end) - break; - pmd = pmd_offset(pud + j, 0); - for (k = 0; k < PTRS_PER_PMD; k++) { - if (pmd_none(pmd[k])) - continue; - - pmd_va = pud_va | (k << PMD_SHIFT); - if (pmd_va >= end) - break; - pte = pte_offset_kernel(pmd + k, 0); - pte_free_kernel(NULL, pte); - } - pmd_free(NULL, pmd); - } - pud_free(NULL, pud); - } - pgd_free(NULL, pgd); -} - -static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); -} - -static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 config, config1; - int vcpu_id = vcpu->vcpu_id; - - /* Start off the timer at 100 MHz */ - kvm_mips_init_count(vcpu, 100*1000*1000); - - /* - * Arch specific stuff, set up config registers properly so that the - * guest will come up as expected - */ -#ifndef CONFIG_CPU_MIPSR6 - /* r2-r5, simulate a MIPS 24kc */ - kvm_write_c0_guest_prid(cop0, 0x00019300); -#else - /* r6+, simulate a generic QEMU machine */ - kvm_write_c0_guest_prid(cop0, 0x00010000); -#endif - /* - * Have config1, Cacheable, noncoherent, write-back, write allocate. - * Endianness, arch revision & virtually tagged icache should match - * host. - */ - config = read_c0_config() & MIPS_CONF_AR; - config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; -#ifdef CONFIG_CPU_BIG_ENDIAN - config |= CONF_BE; -#endif - if (cpu_has_vtag_icache) - config |= MIPS_CONF_VI; - kvm_write_c0_guest_config(cop0, config); - - /* Read the cache characteristics from the host Config1 Register */ - config1 = (read_c0_config1() & ~0x7f); - - /* DCache line size not correctly reported in Config1 on Octeon CPUs */ - if (cpu_dcache_line_size()) { - config1 &= ~MIPS_CONF1_DL; - config1 |= ((ilog2(cpu_dcache_line_size()) - 1) << - MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL; - } - - /* Set up MMU size */ - config1 &= ~(0x3f << 25); - config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); - - /* We unset some bits that we aren't emulating */ - config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | - MIPS_CONF1_WR | MIPS_CONF1_CA); - kvm_write_c0_guest_config1(cop0, config1); - - /* Have config3, no tertiary/secondary caches implemented */ - kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); - /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ - - /* Have config4, UserLocal */ - kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); - - /* Have config5 */ - kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); - - /* No config6 */ - kvm_write_c0_guest_config5(cop0, 0); - - /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ - kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); - - /* Status */ - kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); - - /* - * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) - */ - kvm_write_c0_guest_intctl(cop0, 0xFC000000); - - /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ - kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | - (vcpu_id & MIPS_EBASE_CPUNUM)); - - /* Put PC at guest reset vector */ - vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); - - return 0; -} - -static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) -{ - /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ - kvm_flush_remote_tlbs(kvm); -} - -static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, - const struct kvm_memory_slot *slot) -{ - kvm_trap_emul_flush_shadow_all(kvm); -} - -static u64 kvm_trap_emul_get_one_regs[] = { - KVM_REG_MIPS_CP0_INDEX, - KVM_REG_MIPS_CP0_ENTRYLO0, - KVM_REG_MIPS_CP0_ENTRYLO1, - KVM_REG_MIPS_CP0_CONTEXT, - KVM_REG_MIPS_CP0_USERLOCAL, - KVM_REG_MIPS_CP0_PAGEMASK, - KVM_REG_MIPS_CP0_WIRED, - KVM_REG_MIPS_CP0_HWRENA, - KVM_REG_MIPS_CP0_BADVADDR, - KVM_REG_MIPS_CP0_COUNT, - KVM_REG_MIPS_CP0_ENTRYHI, - KVM_REG_MIPS_CP0_COMPARE, - KVM_REG_MIPS_CP0_STATUS, - KVM_REG_MIPS_CP0_INTCTL, - KVM_REG_MIPS_CP0_CAUSE, - KVM_REG_MIPS_CP0_EPC, - KVM_REG_MIPS_CP0_PRID, - KVM_REG_MIPS_CP0_EBASE, - KVM_REG_MIPS_CP0_CONFIG, - KVM_REG_MIPS_CP0_CONFIG1, - KVM_REG_MIPS_CP0_CONFIG2, - KVM_REG_MIPS_CP0_CONFIG3, - KVM_REG_MIPS_CP0_CONFIG4, - KVM_REG_MIPS_CP0_CONFIG5, - KVM_REG_MIPS_CP0_CONFIG7, - KVM_REG_MIPS_CP0_ERROREPC, - KVM_REG_MIPS_CP0_KSCRATCH1, - KVM_REG_MIPS_CP0_KSCRATCH2, - KVM_REG_MIPS_CP0_KSCRATCH3, - KVM_REG_MIPS_CP0_KSCRATCH4, - KVM_REG_MIPS_CP0_KSCRATCH5, - KVM_REG_MIPS_CP0_KSCRATCH6, - - KVM_REG_MIPS_COUNT_CTL, - KVM_REG_MIPS_COUNT_RESUME, - KVM_REG_MIPS_COUNT_HZ, -}; - -static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) -{ - return ARRAY_SIZE(kvm_trap_emul_get_one_regs); -} - -static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, - u64 __user *indices) -{ - if (copy_to_user(indices, kvm_trap_emul_get_one_regs, - sizeof(kvm_trap_emul_get_one_regs))) - return -EFAULT; - indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); - - return 0; -} - -static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 *v) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - switch (reg->id) { - case KVM_REG_MIPS_CP0_INDEX: - *v = (long)kvm_read_c0_guest_index(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYLO0: - *v = kvm_read_c0_guest_entrylo0(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYLO1: - *v = kvm_read_c0_guest_entrylo1(cop0); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - *v = (long)kvm_read_c0_guest_context(cop0); - break; - case KVM_REG_MIPS_CP0_USERLOCAL: - *v = (long)kvm_read_c0_guest_userlocal(cop0); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - *v = (long)kvm_read_c0_guest_pagemask(cop0); - break; - case KVM_REG_MIPS_CP0_WIRED: - *v = (long)kvm_read_c0_guest_wired(cop0); - break; - case KVM_REG_MIPS_CP0_HWRENA: - *v = (long)kvm_read_c0_guest_hwrena(cop0); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - *v = (long)kvm_read_c0_guest_badvaddr(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - *v = (long)kvm_read_c0_guest_entryhi(cop0); - break; - case KVM_REG_MIPS_CP0_COMPARE: - *v = (long)kvm_read_c0_guest_compare(cop0); - break; - case KVM_REG_MIPS_CP0_STATUS: - *v = (long)kvm_read_c0_guest_status(cop0); - break; - case KVM_REG_MIPS_CP0_INTCTL: - *v = (long)kvm_read_c0_guest_intctl(cop0); - break; - case KVM_REG_MIPS_CP0_CAUSE: - *v = (long)kvm_read_c0_guest_cause(cop0); - break; - case KVM_REG_MIPS_CP0_EPC: - *v = (long)kvm_read_c0_guest_epc(cop0); - break; - case KVM_REG_MIPS_CP0_PRID: - *v = (long)kvm_read_c0_guest_prid(cop0); - break; - case KVM_REG_MIPS_CP0_EBASE: - *v = (long)kvm_read_c0_guest_ebase(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG: - *v = (long)kvm_read_c0_guest_config(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG1: - *v = (long)kvm_read_c0_guest_config1(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG2: - *v = (long)kvm_read_c0_guest_config2(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG3: - *v = (long)kvm_read_c0_guest_config3(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG4: - *v = (long)kvm_read_c0_guest_config4(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG5: - *v = (long)kvm_read_c0_guest_config5(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG7: - *v = (long)kvm_read_c0_guest_config7(cop0); - break; - case KVM_REG_MIPS_CP0_COUNT: - *v = kvm_mips_read_count(vcpu); - break; - case KVM_REG_MIPS_COUNT_CTL: - *v = vcpu->arch.count_ctl; - break; - case KVM_REG_MIPS_COUNT_RESUME: - *v = ktime_to_ns(vcpu->arch.count_resume); - break; - case KVM_REG_MIPS_COUNT_HZ: - *v = vcpu->arch.count_hz; - break; - case KVM_REG_MIPS_CP0_ERROREPC: - *v = (long)kvm_read_c0_guest_errorepc(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH1: - *v = (long)kvm_read_c0_guest_kscratch1(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH2: - *v = (long)kvm_read_c0_guest_kscratch2(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH3: - *v = (long)kvm_read_c0_guest_kscratch3(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH4: - *v = (long)kvm_read_c0_guest_kscratch4(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH5: - *v = (long)kvm_read_c0_guest_kscratch5(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH6: - *v = (long)kvm_read_c0_guest_kscratch6(cop0); - break; - default: - return -EINVAL; - } - return 0; -} - -static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 v) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - int ret = 0; - unsigned int cur, change; - - switch (reg->id) { - case KVM_REG_MIPS_CP0_INDEX: - kvm_write_c0_guest_index(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYLO0: - kvm_write_c0_guest_entrylo0(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYLO1: - kvm_write_c0_guest_entrylo1(cop0, v); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - kvm_write_c0_guest_context(cop0, v); - break; - case KVM_REG_MIPS_CP0_USERLOCAL: - kvm_write_c0_guest_userlocal(cop0, v); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - kvm_write_c0_guest_pagemask(cop0, v); - break; - case KVM_REG_MIPS_CP0_WIRED: - kvm_write_c0_guest_wired(cop0, v); - break; - case KVM_REG_MIPS_CP0_HWRENA: - kvm_write_c0_guest_hwrena(cop0, v); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - kvm_write_c0_guest_badvaddr(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - kvm_write_c0_guest_entryhi(cop0, v); - break; - case KVM_REG_MIPS_CP0_STATUS: - kvm_write_c0_guest_status(cop0, v); - break; - case KVM_REG_MIPS_CP0_INTCTL: - /* No VInt, so no VS, read-only for now */ - break; - case KVM_REG_MIPS_CP0_EPC: - kvm_write_c0_guest_epc(cop0, v); - break; - case KVM_REG_MIPS_CP0_PRID: - kvm_write_c0_guest_prid(cop0, v); - break; - case KVM_REG_MIPS_CP0_EBASE: - /* - * Allow core number to be written, but the exception base must - * remain in guest KSeg0. - */ - kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, - v); - break; - case KVM_REG_MIPS_CP0_COUNT: - kvm_mips_write_count(vcpu, v); - break; - case KVM_REG_MIPS_CP0_COMPARE: - kvm_mips_write_compare(vcpu, v, false); - break; - case KVM_REG_MIPS_CP0_CAUSE: - /* - * If the timer is stopped or started (DC bit) it must look - * atomic with changes to the interrupt pending bits (TI, IRQ5). - * A timer interrupt should not happen in between. - */ - if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { - if (v & CAUSEF_DC) { - /* disable timer first */ - kvm_mips_count_disable_cause(vcpu); - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, - v); - } else { - /* enable timer last */ - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, - v); - kvm_mips_count_enable_cause(vcpu); - } - } else { - kvm_write_c0_guest_cause(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG: - /* read-only for now */ - break; - case KVM_REG_MIPS_CP0_CONFIG1: - cur = kvm_read_c0_guest_config1(cop0); - change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config1(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG2: - /* read-only for now */ - break; - case KVM_REG_MIPS_CP0_CONFIG3: - cur = kvm_read_c0_guest_config3(cop0); - change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config3(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG4: - cur = kvm_read_c0_guest_config4(cop0); - change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config4(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG5: - cur = kvm_read_c0_guest_config5(cop0); - change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config5(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG7: - /* writes ignored */ - break; - case KVM_REG_MIPS_COUNT_CTL: - ret = kvm_mips_set_count_ctl(vcpu, v); - break; - case KVM_REG_MIPS_COUNT_RESUME: - ret = kvm_mips_set_count_resume(vcpu, v); - break; - case KVM_REG_MIPS_COUNT_HZ: - ret = kvm_mips_set_count_hz(vcpu, v); - break; - case KVM_REG_MIPS_CP0_ERROREPC: - kvm_write_c0_guest_errorepc(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH1: - kvm_write_c0_guest_kscratch1(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH2: - kvm_write_c0_guest_kscratch2(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH3: - kvm_write_c0_guest_kscratch3(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH4: - kvm_write_c0_guest_kscratch4(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH5: - kvm_write_c0_guest_kscratch5(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH6: - kvm_write_c0_guest_kscratch6(cop0, v); - break; - default: - return -EINVAL; - } - return ret; -} - -static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - - /* - * Were we in guest context? If so, restore the appropriate ASID based - * on the mode of the Guest (Kernel/User). - */ - if (current->flags & PF_VCPU) { - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - check_switch_mmu_context(mm); - kvm_mips_suspend_mm(cpu); - ehb(); - } - - return 0; -} - -static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) -{ - kvm_lose_fpu(vcpu); - - if (current->flags & PF_VCPU) { - /* Restore normal Linux process memory map */ - check_switch_mmu_context(current->mm); - kvm_mips_resume_mm(cpu); - ehb(); - } - - return 0; -} - -static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, - bool reload_asid) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - int i; - - if (likely(!kvm_request_pending(vcpu))) - return; - - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { - /* - * Both kernel & user GVA mappings must be invalidated. The - * caller is just about to check whether the ASID is stale - * anyway so no need to reload it here. - */ - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); - for_each_possible_cpu(i) { - set_cpu_context(i, kern_mm, 0); - set_cpu_context(i, user_mm, 0); - } - - /* Generate new ASID for current mode */ - if (reload_asid) { - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - get_new_mmu_context(mm); - htw_stop(); - write_c0_entryhi(cpu_asid(cpu, mm)); - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); - htw_start(); - } - } -} - -/** - * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. - * @vcpu: VCPU pointer. - * - * Call before a GVA space access outside of guest mode, to ensure that - * asynchronous TLB flush requests are handled or delayed until completion of - * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). - * - * Should be called with IRQs already enabled. - */ -void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) -{ - /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ - WARN_ON_ONCE(irqs_disabled()); - - /* - * The caller is about to access the GVA space, so we set the mode to - * force TLB flush requests to send an IPI, and also disable IRQs to - * delay IPI handling until kvm_trap_emul_gva_lockless_end(). - */ - local_irq_disable(); - - /* - * Make sure the read of VCPU requests is not reordered ahead of the - * write to vcpu->mode, or we could miss a TLB flush request while - * the requester sees the VCPU as outside of guest mode and not needing - * an IPI. - */ - smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); - - /* - * If a TLB flush has been requested (potentially while - * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it - * before accessing the GVA space, and be sure to reload the ASID if - * necessary as it'll be immediately used. - * - * TLB flush requests after this check will trigger an IPI due to the - * mode change above, which will be delayed due to IRQs disabled. - */ - kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); -} - -/** - * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. - * @vcpu: VCPU pointer. - * - * Called after a GVA space access outside of guest mode. Should have a matching - * call to kvm_trap_emul_gva_lockless_begin(). - */ -void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) -{ - /* - * Make sure the write to vcpu->mode is not reordered in front of GVA - * accesses, or a TLB flush requester may not think it necessary to send - * an IPI. - */ - smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); - - /* - * Now that the access to GVA space is complete, its safe for pending - * TLB flush request IPIs to be handled (which indicates completion). - */ - local_irq_enable(); -} - -static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - struct mips_coproc *cop0 = vcpu->arch.cop0; - int i, cpu = smp_processor_id(); - unsigned int gasid; - - /* - * No need to reload ASID, IRQs are disabled already so there's no rush, - * and we'll check if we need to regenerate below anyway before - * re-entering the guest. - */ - kvm_trap_emul_check_requests(vcpu, cpu, false); - - if (KVM_GUEST_KERNEL_MODE(vcpu)) { - mm = kern_mm; - } else { - mm = user_mm; - - /* - * Lazy host ASID regeneration / PT flush for guest user mode. - * If the guest ASID has changed since the last guest usermode - * execution, invalidate the stale TLB entries and flush GVA PT - * entries too. - */ - gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; - if (gasid != vcpu->arch.last_user_gasid) { - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); - for_each_possible_cpu(i) - set_cpu_context(i, user_mm, 0); - vcpu->arch.last_user_gasid = gasid; - } - } - - /* - * Check if ASID is stale. This may happen due to a TLB flush request or - * a lazy user MM invalidation. - */ - check_mmu_context(mm); -} - -static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) -{ - int cpu = smp_processor_id(); - int r; - - /* Check if we have any exceptions/interrupts pending */ - kvm_mips_deliver_interrupts(vcpu, - kvm_read_c0_guest_cause(vcpu->arch.cop0)); - - kvm_trap_emul_vcpu_reenter(vcpu); - - /* - * We use user accessors to access guest memory, but we don't want to - * invoke Linux page faulting. - */ - pagefault_disable(); - - /* Disable hardware page table walking while in guest */ - htw_stop(); - - /* - * While in guest context we're in the guest's address space, not the - * host process address space, so we need to be careful not to confuse - * e.g. cache management IPIs. - */ - kvm_mips_suspend_mm(cpu); - - r = vcpu->arch.vcpu_run(vcpu); - - /* We may have migrated while handling guest exits */ - cpu = smp_processor_id(); - - /* Restore normal Linux process memory map */ - check_switch_mmu_context(current->mm); - kvm_mips_resume_mm(cpu); - - htw_start(); - - pagefault_enable(); - - return r; -} - -static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { - /* exit handlers */ - .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, - .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, - .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, - .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, - .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, - .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, - .handle_syscall = kvm_trap_emul_handle_syscall, - .handle_res_inst = kvm_trap_emul_handle_res_inst, - .handle_break = kvm_trap_emul_handle_break, - .handle_trap = kvm_trap_emul_handle_trap, - .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, - .handle_fpe = kvm_trap_emul_handle_fpe, - .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, - .handle_guest_exit = kvm_trap_emul_no_handler, - - .hardware_enable = kvm_trap_emul_hardware_enable, - .hardware_disable = kvm_trap_emul_hardware_disable, - .check_extension = kvm_trap_emul_check_extension, - .vcpu_init = kvm_trap_emul_vcpu_init, - .vcpu_uninit = kvm_trap_emul_vcpu_uninit, - .vcpu_setup = kvm_trap_emul_vcpu_setup, - .flush_shadow_all = kvm_trap_emul_flush_shadow_all, - .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, - .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, - .queue_timer_int = kvm_mips_queue_timer_int_cb, - .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, - .queue_io_int = kvm_mips_queue_io_int_cb, - .dequeue_io_int = kvm_mips_dequeue_io_int_cb, - .irq_deliver = kvm_mips_irq_deliver_cb, - .irq_clear = kvm_mips_irq_clear_cb, - .num_regs = kvm_trap_emul_num_regs, - .copy_reg_indices = kvm_trap_emul_copy_reg_indices, - .get_one_reg = kvm_trap_emul_get_one_reg, - .set_one_reg = kvm_trap_emul_set_one_reg, - .vcpu_load = kvm_trap_emul_vcpu_load, - .vcpu_put = kvm_trap_emul_vcpu_put, - .vcpu_run = kvm_trap_emul_vcpu_run, - .vcpu_reenter = kvm_trap_emul_vcpu_reenter, -}; - -int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) -{ - *install_callbacks = &kvm_trap_emul_callbacks; - return 0; -} diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 2ffbe9264a31..43cad10b877d 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -292,9 +292,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, switch (priority) { case MIPS_EXC_INT_TIMER: /* - * Call to kvm_write_c0_guest_compare() clears Cause.TI in - * kvm_mips_emulate_CP0(). Explicitly clear irq associated with - * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not + * Explicitly clear irq associated with Cause.IP[IPTI] + * if GuestCtl2 virtual interrupt register not * supported or if not using GuestCtl2 Hardware Clear. */ if (cpu_has_guestctl2) { @@ -3211,32 +3210,22 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } -static void kvm_vz_flush_shadow_all(struct kvm *kvm) +static void kvm_vz_prepare_flush_shadow(struct kvm *kvm) { - if (cpu_has_guestid) { - /* Flush GuestID for each VCPU individually */ - kvm_flush_remote_tlbs(kvm); - } else { + if (!cpu_has_guestid) { /* * For each CPU there is a single GPA ASID used by all VCPUs in * the VM, so it doesn't make sense for the VCPUs to handle * invalidation of these ASIDs individually. * * Instead mark all CPUs as needing ASID invalidation in - * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to + * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will * kick any running VCPUs so they check asid_flush_mask. */ cpumask_setall(&kvm->arch.asid_flush_mask); - kvm_flush_remote_tlbs(kvm); } } -static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, - const struct kvm_memory_slot *slot) -{ - kvm_vz_flush_shadow_all(kvm); -} - static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu) { int cpu = smp_processor_id(); @@ -3292,8 +3281,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { .vcpu_init = kvm_vz_vcpu_init, .vcpu_uninit = kvm_vz_vcpu_uninit, .vcpu_setup = kvm_vz_vcpu_setup, - .flush_shadow_all = kvm_vz_flush_shadow_all, - .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, + .prepare_flush_shadow = kvm_vz_prepare_flush_shadow, .gva_to_gpa = kvm_vz_gva_to_gpa_cb, .queue_timer_int = kvm_vz_queue_timer_int_cb, .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 88065ee433cd..e19fb98b5d38 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ .L__memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) +#ifndef CONFIG_EVA +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +FEXPORT(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) +#endif /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user) * __copy_from_user (EVA) */ -LEAF(__copy_from_user_eva) -EXPORT_SYMBOL(__copy_from_user_eva) +LEAF(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP -END(__copy_from_user_eva) +END(__raw_copy_from_user) @@ -692,18 +698,18 @@ END(__copy_from_user_eva) * __copy_to_user (EVA) */ -LEAF(__copy_to_user_eva) -EXPORT_SYMBOL(__copy_to_user_eva) +LEAF(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP -END(__copy_to_user_eva) +END(__raw_copy_to_user) /* * __copy_in_user (EVA) */ -LEAF(__copy_in_user_eva) -EXPORT_SYMBOL(__copy_in_user_eva) +LEAF(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) __BUILD_COPY_USER EVA_MODE USEROP USEROP -END(__copy_in_user_eva) +END(__raw_copy_in_user) #endif diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index d5449e8a3dfc..b0baa3c79fad 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset) #ifndef CONFIG_EVA FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) -#else -FEXPORT(__bzero_kernel) -EXPORT_SYMBOL(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index acdff66bd5d2..556acf684d7b 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -29,19 +29,17 @@ * it happens at most some bytes of the exceptions handlers will be copied. */ - .macro __BUILD_STRNCPY_ASM func -LEAF(__strncpy_from_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a1 - bnez v0, .Lfault\@ - +LEAF(__strncpy_from_user_asm) move t0, zero move v1, a1 -.ifeqs "\func","kernel" -1: EX(lbu, v0, (v1), .Lfault\@) -.else -1: EX(lbue, v0, (v1), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva +1: EX(lbue, v0, (v1), .Lfault) + .set pop +#else +1: EX(lbu, v0, (v1), .Lfault) +#endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) sb v0, (a0) @@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm) bne t0, a2, 1b 2: PTR_ADDU v0, a1, t0 xor v0, a1 - bltz v0, .Lfault\@ + bltz v0, .Lfault move v0, t0 jr ra # return n - END(__strncpy_from_\func\()_asm) + END(__strncpy_from_user_asm) -.Lfault\@: +.Lfault: li v0, -EFAULT jr ra .section __ex_table,"a" - PTR 1b, .Lfault\@ + PTR 1b, .Lfault .previous - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strncpy_from_user_asm - .set __strncpy_from_user_asm, __strncpy_from_kernel_asm -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif - -__BUILD_STRNCPY_ASM kernel -EXPORT_SYMBOL(__strncpy_from_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNCPY_ASM user - .set pop -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif + EXPORT_SYMBOL(__strncpy_from_user_asm) diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index e1bacf5a3abe..92b63f20ec05 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -26,12 +26,7 @@ * bytes. There's nothing secret there. On 64-bit accessing beyond * the maximum is a tad hairier ... */ - .macro __BUILD_STRNLEN_ASM func -LEAF(__strnlen_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault\@ - +LEAF(__strnlen_user_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: @@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm) li AT, 1 #endif beq v0, a1, 1f # limit reached? -.ifeqs "\func", "kernel" - EX(lb, t0, (v0), .Lfault\@) -.else - EX(lbe, t0, (v0), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva + EX(lbe, t0, (v0), .Lfault) + .set pop +#else + EX(lb, t0, (v0), .Lfault) +#endif .set noreorder bnez t0, 1b 1: @@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm) .set reorder PTR_SUBU v0, a0 jr ra - END(__strnlen_\func\()_asm) + END(__strnlen_user_asm) -.Lfault\@: +.Lfault: move v0, zero jr ra - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strnlen_user_asm - .set __strnlen_user_asm, __strnlen_kernel_asm -EXPORT_SYMBOL(__strnlen_user_asm) -#endif - -__BUILD_STRNLEN_ASM kernel -EXPORT_SYMBOL(__strnlen_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNLEN_ASM user - .set pop -EXPORT_SYMBOL(__strnlen_user_asm) -#endif + EXPORT_SYMBOL(__strnlen_user_asm) diff --git a/arch/mips/loongson64/Makefile b/arch/mips/loongson64/Makefile index cc76944b1a9d..e806280bbb85 100644 --- a/arch/mips/loongson64/Makefile +++ b/arch/mips/loongson64/Makefile @@ -2,7 +2,7 @@ # # Makefile for Loongson-3 family machines # -obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o platform.o dma.o \ +obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o dma.o \ setup.o init.o env.o time.o reset.o \ obj-$(CONFIG_SMP) += smp.o diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c index 51a5d050a94c..c8bb75d58f17 100644 --- a/arch/mips/loongson64/env.c +++ b/arch/mips/loongson64/env.c @@ -43,7 +43,18 @@ const char *get_system_type(void) return "Generic Loongson64 System"; } -void __init prom_init_env(void) + +void __init prom_dtb_init_env(void) +{ + if ((fw_arg2 < CKSEG0 || fw_arg2 > CKSEG1) + && (fw_arg2 < XKPHYS || fw_arg2 > XKSEG)) + + loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin; + else + loongson_fdt_blob = (void *)fw_arg2; +} + +void __init prom_lefi_init_env(void) { struct boot_params *boot_p; struct loongson_params *loongson_p; @@ -95,7 +106,6 @@ void __init prom_init_env(void) loongson_freqctrl[1] = 0x900010001fe001d0; loongson_freqctrl[2] = 0x900020001fe001d0; loongson_freqctrl[3] = 0x900030001fe001d0; - loongson_sysconf.ht_control_base = 0x90000EFDFB000000; loongson_sysconf.workarounds = WORKAROUND_CPUFREQ; break; case Legacy_3B: @@ -118,7 +128,6 @@ void __init prom_init_env(void) loongson_freqctrl[1] = 0x900020001fe001d0; loongson_freqctrl[2] = 0x900040001fe001d0; loongson_freqctrl[3] = 0x900060001fe001d0; - loongson_sysconf.ht_control_base = 0x90001EFDFB000000; loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG; break; default: @@ -136,9 +145,6 @@ void __init prom_init_env(void) loongson_sysconf.cores_per_node - 1) / loongson_sysconf.cores_per_node; - loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr; - loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr; - loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr; loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; if (loongson_sysconf.dma_mask_bits < 32 || loongson_sysconf.dma_mask_bits > 64) @@ -153,23 +159,8 @@ void __init prom_init_env(void) loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, loongson_sysconf.vgabios_addr); - memset(loongson_sysconf.ecname, 0, 32); - if (esys->has_ec) - memcpy(loongson_sysconf.ecname, esys->ec_name, 32); loongson_sysconf.workarounds |= esys->workarounds; - loongson_sysconf.nr_uarts = esys->nr_uarts; - if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS) - loongson_sysconf.nr_uarts = 1; - memcpy(loongson_sysconf.uarts, esys->uarts, - sizeof(struct uart_device) * loongson_sysconf.nr_uarts); - - loongson_sysconf.nr_sensors = esys->nr_sensors; - if (loongson_sysconf.nr_sensors > MAX_SENSORS) - loongson_sysconf.nr_sensors = 0; - if (loongson_sysconf.nr_sensors) - memcpy(loongson_sysconf.sensors, esys->sensors, - sizeof(struct sensor_device) * loongson_sysconf.nr_sensors); pr_info("CpuClock = %u\n", cpu_clock_freq); /* Read the ID of PCI host bridge to detect bridge type */ diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c index cfa788bca871..76e0a9636a0e 100644 --- a/arch/mips/loongson64/init.c +++ b/arch/mips/loongson64/init.c @@ -52,6 +52,10 @@ void __init szmem(unsigned int node) static unsigned long num_physpages; u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; + /* Otherwise come from DTB */ + if (loongson_sysconf.fw_interface != LOONGSON_LEFI) + return; + /* Parse memory information and activate */ for (i = 0; i < loongson_memmap->nr_map; i++) { node_id = loongson_memmap->map[i].node_id; @@ -94,12 +98,20 @@ static void __init prom_init_memory(void) void __init prom_init(void) { fw_init_cmdline(); - prom_init_env(); + + if (fw_arg2 == 0 || (fdt_magic(fw_arg2) == FDT_MAGIC)) { + loongson_sysconf.fw_interface = LOONGSON_DTB; + prom_dtb_init_env(); + } else { + loongson_sysconf.fw_interface = LOONGSON_LEFI; + prom_lefi_init_env(); + } /* init base address of io space */ set_io_port_base(PCI_IOBASE); - loongson_sysconf.early_config(); + if (loongson_sysconf.early_config) + loongson_sysconf.early_config(); #ifdef CONFIG_NUMA prom_init_numa_memory(); @@ -108,7 +120,10 @@ void __init prom_init(void) #endif /* Hardcode to CPU UART 0 */ - setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); + if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE), 0, 1024); + else + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); register_smp_ops(&loongson3_smp_ops); board_nmi_handler_setup = mips_nmi_setup; @@ -126,7 +141,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_ return -ENOMEM; range->fwnode = fwnode; - range->size = size; + range->size = size = round_up(size, PAGE_SIZE); range->hw_start = hw_start; range->flags = LOGIC_PIO_CPU_MMIO; diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index a8f57bf01285..fa9b4a487a47 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -27,7 +27,6 @@ #include <boot_param.h> #include <loongson.h> -static struct pglist_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; EXPORT_SYMBOL(__node_distances); struct pglist_data *__node_data[MAX_NUMNODES]; @@ -84,8 +83,12 @@ static void __init init_topology_matrix(void) static void __init node_mem_init(unsigned int node) { + struct pglist_data *nd; unsigned long node_addrspace_offset; unsigned long start_pfn, end_pfn; + unsigned long nd_pa; + int tnid; + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); node_addrspace_offset = nid_to_addrbase(node); pr_info("Node%d's addrspace_offset is 0x%lx\n", @@ -95,8 +98,16 @@ static void __init node_mem_init(unsigned int node) pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", node, start_pfn, end_pfn); - __node_data[node] = prealloc__node_data + node; - + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, node); + nd = __va(nd_pa); + memset(nd, 0, sizeof(struct pglist_data)); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != node) + pr_info("NODE_DATA(%d) on node %d\n", node, tnid); + __node_data[node] = nd; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; @@ -167,7 +178,6 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } /* All PCI device belongs to logical Node-0 */ diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c deleted file mode 100644 index 9674ae1361a8..000000000000 --- a/arch/mips/loongson64/platform.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2009 Lemote Inc. - * Author: Wu Zhangjin, wuzhangjin@gmail.com - * Xiang Yu, xiangy@lemote.com - * Chen Huacai, chenhc@lemote.com - */ - -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/platform_device.h> -#include <asm/bootinfo.h> -#include <boot_param.h> -#include <loongson_hwmon.h> -#include <workarounds.h> - -static int __init loongson3_platform_init(void) -{ - int i; - struct platform_device *pdev; - - if (loongson_sysconf.ecname[0] != '\0') - platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0); - - for (i = 0; i < loongson_sysconf.nr_sensors; i++) { - if (loongson_sysconf.sensors[i].type > SENSOR_FAN) - continue; - - pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); - if (!pdev) - return -ENOMEM; - - pdev->name = loongson_sysconf.sensors[i].name; - pdev->id = loongson_sysconf.sensors[i].id; - pdev->dev.platform_data = &loongson_sysconf.sensors[i]; - platform_device_register(pdev); - } - - return 0; -} - -arch_initcall(loongson3_platform_init); diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c index 3bb8a1ed9348..c97bfdc8c922 100644 --- a/arch/mips/loongson64/reset.c +++ b/arch/mips/loongson64/reset.c @@ -6,9 +6,14 @@ * Copyright (C) 2009 Lemote, Inc. * Author: Zhangjin Wu, wuzhangjin@gmail.com */ +#include <linux/cpu.h> +#include <linux/delay.h> #include <linux/init.h> +#include <linux/kexec.h> #include <linux/pm.h> +#include <linux/slab.h> +#include <asm/bootinfo.h> #include <asm/idle.h> #include <asm/reboot.h> @@ -47,12 +52,120 @@ static void loongson_halt(void) } } +#ifdef CONFIG_KEXEC + +/* 0X80000000~0X80200000 is safe */ +#define MAX_ARGS 64 +#define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL +#define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL +#define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE +#define KEXEC_ENVP_SIZE 4800 + +static int kexec_argc; +static int kdump_argc; +static void *kexec_argv; +static void *kdump_argv; +static void *kexec_envp; + +static int loongson_kexec_prepare(struct kimage *image) +{ + int i, argc = 0; + unsigned int *argv; + char *str, *ptr, *bootloader = "kexec"; + + /* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */ + if (image->type == KEXEC_TYPE_DEFAULT) + argv = (unsigned int *)kexec_argv; + else + argv = (unsigned int *)kdump_argv; + + argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2); + + for (i = 0; i < image->nr_segments; i++) { + if (!strncmp(bootloader, (char *)image->segment[i].buf, + strlen(bootloader))) { + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + int offt; + str = (char *)argv + KEXEC_ARGV_SIZE/2; + memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2); + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (int)(ptr - str + 1); + argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt; + argc++; + } + ptr = strchr(ptr + 1, ' '); + } + break; + } + } + + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_argc = argc; + else + kdump_argc = argc; + + /* kexec/kdump need a safe page to save reboot_code_buffer */ + image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE); + + return 0; +} + +static void loongson_kexec_shutdown(void) +{ +#ifdef CONFIG_SMP + int cpu; + + /* All CPUs go to reboot_code_buffer */ + for_each_possible_cpu(cpu) + if (!cpu_online(cpu)) + cpu_device_up(get_cpu_device(cpu)); +#endif + kexec_args[0] = kexec_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +static void loongson_crash_shutdown(struct pt_regs *regs) +{ + default_machine_crash_shutdown(regs); + kexec_args[0] = kdump_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +#endif + static int __init mips_reboot_setup(void) { _machine_restart = loongson_restart; _machine_halt = loongson_halt; pm_power_off = loongson_poweroff; +#ifdef CONFIG_KEXEC + kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL); + fw_arg1 = KEXEC_ARGV_ADDR; + memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE); + + _machine_kexec_prepare = loongson_kexec_prepare; + _machine_kexec_shutdown = loongson_kexec_shutdown; + _machine_crash_shutdown = loongson_crash_shutdown; +#endif + return 0; } diff --git a/arch/mips/loongson64/time.c b/arch/mips/loongson64/time.c index 91e842b58365..f6d2c1e30570 100644 --- a/arch/mips/loongson64/time.c +++ b/arch/mips/loongson64/time.c @@ -11,9 +11,33 @@ #include <asm/hpet.h> #include <loongson.h> +#include <linux/clk.h> +#include <linux/of_clk.h> void __init plat_time_init(void) { + struct clk *clk; + struct device_node *np; + + if (loongson_sysconf.fw_interface == LOONGSON_DTB) { + of_clk_init(NULL); + + np = of_get_cpu_node(0, NULL); + if (!np) { + pr_err("Failed to get CPU node\n"); + return; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk)); + return; + } + + cpu_clock_freq = clk_get_rate(clk); + clk_put(clk); + } + /* setup mips r4k timer */ mips_hpt_frequency = cpu_clock_freq / 2; diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 865926a37775..4acc4f3d31f8 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -22,6 +22,10 @@ else obj-y += uasm-mips.o endif +ifndef CONFIG_EVA +obj-y += maccess.o +endif + obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o @@ -40,3 +44,5 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o + +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7719d632df8d..a7bf0c80371c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -15,6 +15,7 @@ #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/processor.h> diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 5cb73bf74a8b..c36358758969 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -467,7 +467,6 @@ void __init mem_init(void) memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); - mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) diff --git a/arch/mips/mm/maccess.c b/arch/mips/mm/maccess.c new file mode 100644 index 000000000000..58173842c6be --- /dev/null +++ b/arch/mips/mm/maccess.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/uaccess.h> +#include <linux/kernel.h> + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + /* highest bit set means kernel space */ + return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1); +} diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c new file mode 100644 index 000000000000..a1ced5e44951 --- /dev/null +++ b/arch/mips/mm/physaddr.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mmdebug.h> +#include <linux/mm.h> + +#include <asm/sections.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/dma.h> + +static inline bool __debug_virt_addr_valid(unsigned long x) +{ + /* high_memory does not get immediately defined, and there + * are early callers of __pa() against PAGE_OFFSET + */ + if (!high_memory && x >= PAGE_OFFSET) + return true; + + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) + return true; + + /* + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an + * actual physical address. Enough code relies on + * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it + * and always return true. + */ + if (x == MAX_DMA_ADDRESS) + return true; + + return false; +} + +phys_addr_t __virt_to_phys(volatile const void *x) +{ + WARN(!__debug_virt_addr_valid((unsigned long)x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + x, x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long)_text || + x > (unsigned long)_end); + + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0fb1db8a8ef7..cd4afcdf3725 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -849,8 +849,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); uasm_i_drotr(p, ptr, ptr, 11); #elif defined(CONFIG_SMP) UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); @@ -1165,8 +1165,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); + uasm_i_drotr(p, ptr, ptr, 11); } diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform index 41e0d2a2d325..f4616934d950 100644 --- a/arch/mips/mti-malta/Platform +++ b/arch/mips/mti-malta/Platform @@ -2,9 +2,5 @@ # MIPS Malta board # cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta -ifdef CONFIG_KVM_GUEST - load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 -else - load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 -endif +load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 567720374d57..bbf1e38e1431 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -66,11 +66,6 @@ static void __init estimate_frequencies(void) int secs; u64 giccount = 0, gicstart = 0; -#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ - mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; - return; -#endif - local_irq_save(flags); if (mips_gic_present()) diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index cf33dd8a487e..c25a2ce5e29f 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -276,10 +276,6 @@ asmlinkage void plat_irq_dispatch(void) } #ifdef CONFIG_CPU_XLP -static const struct irq_domain_ops xlp_pic_irq_domain_ops = { - .xlate = irq_domain_xlate_onetwocell, -}; - static int __init xlp_of_pic_init(struct device_node *node, struct device_node *parent) { @@ -324,7 +320,7 @@ static int __init xlp_of_pic_init(struct device_node *node, xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs, nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE, - &xlp_pic_irq_domain_ops, NULL); + &irq_domain_simple_ops, NULL); if (xlp_pic_domain == NULL) { pr_err("PIC %pOFn: Creating legacy domain failed!\n", node); return -EINVAL; diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index f741b8c528e4..c1a655aee599 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -31,6 +31,7 @@ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/dma-direct.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/bitops.h> diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 39052de915f3..468722c8a5c6 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -89,7 +89,6 @@ static void pcibios_scanbus(struct pci_controller *hose) hose->mem_resource, hose->mem_offset); pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); - pci_add_resource(&resources, hose->busn_resource); list_splice_init(&resources, &bridge->windows); bridge->dev.parent = NULL; bridge->sysdata = hose; @@ -140,7 +139,6 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) struct of_pci_range range; struct of_pci_range_parser parser; - pr_info("PCI host bridge %pOF ranges:\n", node); hose->of_node = node; if (of_pci_range_parser_init(&parser, node)) @@ -151,23 +149,22 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) switch (range.flags & IORESOURCE_TYPE_BITS) { case IORESOURCE_IO: - pr_info(" IO 0x%016llx..0x%016llx\n", - range.cpu_addr, - range.cpu_addr + range.size - 1); hose->io_map_base = (unsigned long)ioremap(range.cpu_addr, range.size); res = hose->io_resource; break; case IORESOURCE_MEM: - pr_info(" MEM 0x%016llx..0x%016llx\n", - range.cpu_addr, - range.cpu_addr + range.size - 1); res = hose->mem_resource; break; } - if (res != NULL) - of_pci_range_to_resource(&range, node, res); + if (res != NULL) { + res->name = node->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } } } @@ -252,7 +249,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; - for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { + for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { /* Only set up the requested stuff */ if (!(mask & (1<<idx))) continue; @@ -282,9 +279,9 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) int pcibios_enable_device(struct pci_dev *dev, int mask) { - int err; + int err = pcibios_enable_resources(dev, mask); - if ((err = pcibios_enable_resources(dev, mask)) < 0) + if (err < 0) return err; return pcibios_plat_dev_init(dev); diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index d36061603752..e032932348d6 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -30,6 +30,7 @@ #define RALINK_GPIOMODE 0x60 #define PPLL_CFG1 0x9c +#define PPLL_LD BIT(23) #define PPLL_DRV 0xa0 #define PDRV_SW_SET BIT(31) @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev) rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); mdelay(100); - if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) { - dev_err(&pdev->dev, "MT7620 PPLL unlock\n"); + if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { + dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n"); reset_control_assert(rstpcie0); rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); return -1; diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index e1f12e398136..e9dd01431f21 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -41,7 +41,6 @@ #define RT2880_PCI_REG_ARBCTL 0x80 static void __iomem *rt2880_pci_base; -static DEFINE_SPINLOCK(rt2880_pci_lock); static u32 rt2880_pci_reg_read(u32 reg) { @@ -63,17 +62,14 @@ static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - unsigned long flags; u32 address; u32 data; address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); switch (size) { case 1: @@ -93,14 +89,12 @@ static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - unsigned long flags; u32 address; u32 data; address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); @@ -119,7 +113,6 @@ static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, } rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -151,36 +144,29 @@ static struct pci_controller rt2880_pci_controller = { static inline u32 rt2880_pci_read_u32(unsigned long reg) { - unsigned long flags; u32 address; u32 ret; address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); return ret; } static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) { - unsigned long flags; u32 address; address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); } int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - u16 cmd; int irq = -1; if (dev->bus->number != 0) @@ -188,8 +174,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) switch (PCI_SLOT(dev->devfn)) { case 0x00: - rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); - (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); break; case 0x11: irq = RT288X_CPU_IRQ_PCI; @@ -201,16 +185,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) break; } - pci_write_config_byte((struct pci_dev *) dev, - PCI_CACHE_LINE_SIZE, 0x14); - pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF); - pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | - PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; - pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd); - pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE, - dev->irq); return irq; } @@ -251,6 +225,30 @@ static int rt288x_pci_probe(struct platform_device *pdev) int pcibios_plat_dev_init(struct pci_dev *dev) { + static bool slot0_init; + + /* + * Nobody seems to initialize slot 0, but this platform requires it, so + * do it once when some other slot is being enabled. The PCI subsystem + * should configure other slots properly, so no need to do anything + * special for those. + */ + if (!slot0_init && dev->bus->number == 0) { + u16 cmd; + u32 bar0; + + slot0_init = true; + + pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + 0x08000000); + pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + &bar0); + + pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd); + } + return 0; } diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 0ac6346026d0..aebd4964ea34 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -100,7 +100,6 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc, unsigned bus, unsigned slot, unsigned func, unsigned reg) { - unsigned long flags; u32 address; u32 ret; @@ -116,7 +115,6 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, unsigned bus, unsigned slot, unsigned func, unsigned reg, u32 val) { - unsigned long flags; u32 address; address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); @@ -229,7 +227,6 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct rt3883_pci_controller *rpc; - unsigned long flags; u32 address; u32 data; @@ -263,7 +260,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct rt3883_pci_controller *rpc; - unsigned long flags; u32 address; u32 data; @@ -435,8 +431,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) if (!rpc->intc_of_node) { dev_err(dev, "%pOF has no %s child node", - rpc->intc_of_node, - "interrupt controller"); + np, "interrupt controller"); return -EINVAL; } @@ -450,8 +445,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) if (!rpc->pci_controller.of_node) { dev_err(dev, "%pOF has no %s child node", - rpc->intc_of_node, - "PCI host bridge"); + np, "PCI host bridge"); err = -EINVAL; goto err_put_intc_node; } diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 50f7d42cca5a..d2216942af18 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -385,7 +385,7 @@ static int bridge_domain_activate(struct irq_domain *domain, bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ /* - * Enable sending of an interrupt clear packt to the hub on a high to + * Enable sending of an interrupt clear packet to the hub on a high to * low transition of the interrupt pin. * * IRIX sets additional bits in the address which are documented as diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index c10d8b233ab1..ec4daa63c5e3 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -26,6 +26,7 @@ choice config SOC_RT288X bool "RT288x" + select MIPS_AUTO_PFN_OFFSET select MIPS_L1_CACHE_SHIFT_4 select HAVE_LEGACY_CLK select HAVE_PCI diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c index 2f9d5acb38ea..f0bcb1051c30 100644 --- a/arch/mips/ralink/clk.c +++ b/arch/mips/ralink/clk.c @@ -70,6 +70,20 @@ long clk_round_rate(struct clk *clk, unsigned long rate) } EXPORT_SYMBOL_GPL(clk_round_rate); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + WARN_ON(clk); + return -1; +} +EXPORT_SYMBOL_GPL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + WARN_ON(clk); + return NULL; +} +EXPORT_SYMBOL_GPL(clk_get_parent); + void __init plat_time_init(void) { struct clk *clk; diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h index 4bc65b7a3241..87fc16751281 100644 --- a/arch/mips/ralink/common.h +++ b/arch/mips/ralink/common.h @@ -17,6 +17,7 @@ struct ralink_soc_info { unsigned long mem_size; unsigned long mem_size_min; unsigned long mem_size_max; + void (*mem_detect)(void); }; extern struct ralink_soc_info soc_info; @@ -27,7 +28,7 @@ extern void ralink_clk_add(const char *dev, unsigned long rate); extern void ralink_rst_init(void); -extern void prom_soc_init(struct ralink_soc_info *soc_info); +extern void __init prom_soc_init(struct ralink_soc_info *soc_info); __iomem void *plat_of_remap_node(const char *node); diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index fcf010038054..53a5969e61af 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -639,7 +639,7 @@ mt7628_dram_init(struct ralink_soc_info *soc_info) } } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); unsigned char *name = NULL; diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index ca0ac607b0f3..f82ad2a621f6 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -9,7 +9,9 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/sys_soc.h> +#include <linux/memblock.h> +#include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/smp-ops.h> #include <asm/mips-cps.h> @@ -49,6 +51,8 @@ #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 #define MT7621_GPIO_MODE_SDHCI_GPIO 1 +static void *detect_magic __initdata = detect_memory_region; + static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; static struct rt2880_pmx_func uart3_grp[] = { @@ -110,10 +114,30 @@ phys_addr_t mips_cpc_default_phys_base(void) panic("Cannot detect cpc address"); } +static void __init mt7621_memory_detect(void) +{ + void *dm = &detect_magic; + phys_addr_t size; + + for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) { + if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic))) + break; + } + + if ((size == 256 * SZ_1M) && + (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) && + __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) { + memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE); + memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE); + } else { + memblock_add(MT7621_LOWMEM_BASE, size); + } +} + void __init ralink_of_remap(void) { - rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc"); - rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc"); + rt_sysc_membase = plat_of_remap_node("mediatek,mt7621-sysc"); + rt_memc_membase = plat_of_remap_node("mediatek,mt7621-memc"); if (!rt_sysc_membase || !rt_memc_membase) panic("Failed to remap core resources"); @@ -146,7 +170,7 @@ static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) } } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); unsigned char *name = NULL; @@ -181,7 +205,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info) if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { name = "MT7621"; - soc_info->compatible = "mtk,mt7621-soc"; + soc_info->compatible = "mediatek,mt7621-soc"; } else { panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); } @@ -194,10 +218,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info) (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, (rev & CHIP_REV_ECO_MASK)); - soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN; - soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX; - soc_info->mem_base = MT7621_DRAM_BASE; - + soc_info->mem_detect = mt7621_memory_detect; rt2880_pinmux_data = mt7621_pinmux_data; soc_dev_init(soc_info, rev); diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 8286c3521476..0c5de07da097 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -78,6 +78,8 @@ void __init plat_mem_setup(void) of_scan_flat_dt(early_init_dt_find_memory, NULL); if (memory_dtb) of_scan_flat_dt(early_init_dt_scan_memory, NULL); + else if (soc_info.mem_detect) + soc_info.mem_detect(); else if (soc_info.mem_size) memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M); else diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c index 3f096897858c..34083c70ec68 100644 --- a/arch/mips/ralink/rt288x.c +++ b/arch/mips/ralink/rt288x.c @@ -77,7 +77,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE); const char *name; diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c index 496f966c05f9..c5b63c142705 100644 --- a/arch/mips/ralink/rt305x.c +++ b/arch/mips/ralink/rt305x.c @@ -214,7 +214,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); unsigned char *name; diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index 8f3fe3106708..ff91f3531ad0 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -113,7 +113,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); const char *name; diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index dd34f1b32b79..04684990e28e 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -58,37 +58,27 @@ EXPORT_SYMBOL(get_latch_u5); static struct resource korina_dev0_res[] = { { - .name = "korina_regs", + .name = "emac", .start = ETH0_BASE_ADDR, .end = ETH0_BASE_ADDR + sizeof(struct eth_regs), .flags = IORESOURCE_MEM, }, { - .name = "korina_rx", + .name = "rx", .start = ETH0_DMA_RX_IRQ, .end = ETH0_DMA_RX_IRQ, .flags = IORESOURCE_IRQ }, { - .name = "korina_tx", + .name = "tx", .start = ETH0_DMA_TX_IRQ, .end = ETH0_DMA_TX_IRQ, .flags = IORESOURCE_IRQ }, { - .name = "korina_ovr", - .start = ETH0_RX_OVR_IRQ, - .end = ETH0_RX_OVR_IRQ, - .flags = IORESOURCE_IRQ - }, { - .name = "korina_und", - .start = ETH0_TX_UND_IRQ, - .end = ETH0_TX_UND_IRQ, - .flags = IORESOURCE_IRQ - }, { - .name = "korina_dma_rx", + .name = "dma_rx", .start = ETH0_RX_DMA_ADDR, .end = ETH0_RX_DMA_ADDR + DMA_CHAN_OFFSET - 1, .flags = IORESOURCE_MEM, }, { - .name = "korina_dma_tx", + .name = "dma_tx", .start = ETH0_TX_DMA_ADDR, .end = ETH0_TX_DMA_ADDR + DMA_CHAN_OFFSET - 1, .flags = IORESOURCE_MEM, @@ -105,6 +95,9 @@ static struct platform_device korina_dev0 = { .name = "korina", .resource = korina_dev0_res, .num_resources = ARRAY_SIZE(korina_dev0_res), + .dev = { + .platform_data = &korina_dev0_data.mac, + } }; static struct resource cf_slot0_res[] = { @@ -299,8 +292,6 @@ static int __init plat_setup_devices(void) /* set the uart clock to the current cpu frequency */ rb532_uart_res[0].uartclk = idt_cpu_freq; - dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data); - gpiod_add_lookup_table(&cf_slot0_gpio_table); return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); } diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO deleted file mode 100644 index 160857ff1483..000000000000 --- a/arch/mips/sgi-ip27/TODO +++ /dev/null @@ -1,19 +0,0 @@ -1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay -not to write to the IOC3 ever. -2. Need to figure out RRB allocation in bridge_startup(). -3. Need to figure out why address swaizzling is needed in inw/outw for -Qlogic scsi controllers. -4. Need to integrate ip27-klconfig.c:find_lboard and -ip27-init.c:find_lbaord_real. DONE -5. Is it okay to set calias space on all nodes as 0, instead of 8k as -in irix? -6. Investigate why things do not work without the setup_test() call -being invoked on all nodes in ip27-memory.c. -8. Too many do_page_faults invoked - investigate. -9. start_thread must turn off UX64 ... and define tlb_refill_debug. -10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable -does not agree with pgd_bad/pmd_bad. -11. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node. -This might need to change later. Only the timer intr is set up to be -received on both Cpu A and B. (ip27_do_irq()/bridge_startup()) -13. Cache flushing (specially the SMP version) has to be investigated. diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 87bb6945ec25..6173684b5aaa 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -420,5 +420,4 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 79c434fece52..444b5e0e935f 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copytight (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) - * Copytight (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/bcd.h> #include <linux/clockchips.h> diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 2131d3fd7333..1b2ea34c3d3b 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -46,7 +46,7 @@ CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -in CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) endif -CFLAGS_REMOVE_vgettimeofday.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) ifdef CONFIG_MIPS_DISABLE_VDSO ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO @@ -60,7 +60,7 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T -CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE) GCOV_PROFILE := n UBSAN_SANITIZE := n diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 010ba5f1d7dd..d4cbf069dc22 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -260,7 +260,6 @@ do { \ extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); extern long strncpy_from_user(char *dest, const char __user * src, long count); -extern __must_check long strlen_user(const char __user * str); extern __must_check long strnlen_user(const char __user * str, long n); extern unsigned long __arch_copy_from_user(void *to, const void __user * from, unsigned long n); diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 414f8a780cc3..0e23e3a8df6b 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -236,7 +236,7 @@ void __naked return_to_handler(void) "bal ftrace_return_to_handler\n\t" "move $lp, $r0 \n\t" - /* restore state nedded by the ABI */ + /* restore state needed by the ABI */ "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); } diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 6eb98a7ad27d..ad5344ef5d33 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c @@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page) { struct address_space *mapping; - mapping = page_mapping(page); + mapping = page_mapping_file(page); if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else { diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index fa86f7b2f416..f63f839738c4 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -191,7 +191,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a741abbed6fb..ba9340e96fd4 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -83,7 +83,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern long strncpy_from_user(char *__to, const char __user *__from, long __len); -extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *s, long n); /* Optimized macros */ diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 65de1bd6a760..6aa9257c3ede 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/cpuinfo.h> diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 61862dbb0e32..613fcaa5988a 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -71,7 +71,6 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); } void __init mmu_init(void) diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index 75f2da324d0e..6e1e004047c7 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -43,7 +43,6 @@ CONFIG_MICREL_PHY=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index bf9b2310fc93..d5641198b90c 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -211,8 +211,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index afc3b8d03572..bde9907bc5b2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN + select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select DMA_OPS select RTC_CLASS @@ -138,10 +139,6 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 -config SYS_SUPPORTS_HUGETLBFS - def_bool y if PA20 - - menu "Processor type and features" choice diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 7d9f71aa829a..aed8ea29268b 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -41,7 +41,7 @@ endif export LD_BFD -ifneq ($(SUBARCH),$(UTS_MACHINE)) +ifdef cross_compiling ifeq ($(CROSS_COMPILE),) CC_SUFFIXES = linux linux-gnu unknown-linux-gnu CROSS_COMPILE := $(call cc-cross-prefix, \ diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 4406475a2304..e6e7f74c8ac9 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += user.h diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index cf5ee9b0b393..84ee232278a6 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); - case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 8a11b8cf4719..0b5259102319 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -316,11 +316,6 @@ extern void iowrite64be(u64 val, void __iomem *addr); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - extern int devmem_is_allowed(unsigned long pfn); #endif diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h index ae3e108d22ad..d6d82f53d3d0 100644 --- a/arch/parisc/include/asm/pdc_chassis.h +++ b/arch/parisc/include/asm/pdc_chassis.h @@ -365,4 +365,3 @@ void parisc_pdc_chassis_init(void); PDC_CHASSIS_EOM_SET ) #endif /* _PARISC_PDC_CHASSIS_H */ -/* vim: set ts=8 */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 11ece0d07374..b5fbcd2c1780 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -272,7 +272,6 @@ on downward growing arches, it looks like this: regs->gr[23] = 0; \ } while(0) -struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index e320bae501d3..3fb86ee507dd 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -268,7 +268,7 @@ static int __init parisc_init_resources(void) result = request_resource(&iomem_resource, &local_broadcast); if (result < 0) { printk(KERN_ERR - "%s: failed to claim %saddress space!\n", + "%s: failed to claim %s address space!\n", __FILE__, local_broadcast.name); return result; } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 322503780db6..3f24a0af1e04 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -919,24 +919,24 @@ ENTRY(lws_table) END(lws_table) /* End of lws table */ +#ifdef CONFIG_64BIT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#else +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#endif #define __SYSCALL(nr, entry) ASM_ULONG_INSN entry .align 8 ENTRY(sys_call_table) .export sys_call_table,data -#ifdef CONFIG_64BIT -#include <asm/syscall_table_c32.h> /* Compat syscalls */ -#else -#include <asm/syscall_table_32.h> /* 32-bit native syscalls */ -#endif +#include <asm/syscall_table_32.h> /* 32-bit syscalls */ END(sys_call_table) #ifdef CONFIG_64BIT .align 8 ENTRY(sys_call_table64) -#include <asm/syscall_table_64.h> /* 64-bit native syscalls */ +#include <asm/syscall_table_64.h> /* 64-bit syscalls */ END(sys_call_table64) #endif -#undef __SYSCALL /* All light-weight-syscall atomic operations @@ -961,5 +961,3 @@ END(lws_lock_start) .previous .end - - diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index 283f64407b07..0f2ea5bcb0d7 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -6,46 +6,34 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ -syshdr_abis_unistd_32 := common,32 +$(uapi)/unistd_32.h: abis := common,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abis_unistd_64 := common,64 +$(uapi)/unistd_64.h: abis := common,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: abis := common,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: abis := common,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 271a92519683..5ac80b83d745 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -440,3 +440,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 730db288fe54..000000000000 --- a/arch/parisc/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index f7393a7b18aa..000000000000 --- a/arch/parisc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/parisc/math-emu/fpu.h b/arch/parisc/math-emu/fpu.h index 853c19c03828..dec951d40286 100644 --- a/arch/parisc/math-emu/fpu.h +++ b/arch/parisc/math-emu/fpu.h @@ -5,34 +5,10 @@ * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> */ -/* - * BEGIN_DESC - * - * File: - * @(#) pa/fp/fpu.h $Revision: 1.1 $ - * - * Purpose: - * <<please update with a synopis of the functionality provided by this file>> - * - * - * END_DESC -*/ - -#ifdef __NO_PA_HDRS - PA header file -- do not include this header file for non-PA builds. -#endif - #ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */ #define _MACHINE_FPU_INCLUDED -#if 0 -#ifndef _SYS_STDSYMS_INCLUDED -# include <sys/stdsyms.h> -#endif /* _SYS_STDSYMS_INCLUDED */ -#include <machine/pdc/pdc_rqsts.h> -#endif - #define PA83_FPU_FLAG 0x00000001 #define PA89_FPU_FLAG 0x00000002 #define PA2_0_FPU_FLAG 0x00000010 @@ -43,21 +19,19 @@ #define COPR_FP 0x00000080 /* Floating point -- Coprocessor 0 */ #define SFU_MPY_DIVIDE 0x00008000 /* Multiply/Divide __ SFU 0 */ - #define EM_FPU_TYPE_OFFSET 272 /* version of EMULATION software for COPR,0,0 instruction */ #define EMULATION_VERSION 4 /* - * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T) - * is thorough the potential type field from the PDC_MODEL call. The - * following flags are used at assist this differeniation. + * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T) + * is through the potential type field from the PDC_MODEL call. + * The following flags are used to assist this differentiation. */ #define ROLEX_POTENTIAL_KEY_FLAGS PDC_MODEL_CPU_KEY_WORD_TO_IO #define TIMEX_POTENTIAL_KEY_FLAGS (PDC_MODEL_CPU_KEY_QUAD_STORE | \ PDC_MODEL_CPU_KEY_RECIP_SQRT) - #endif /* ! _MACHINE_FPU_INCLUDED */ diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 43652de5f139..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 9ca4e4ff6895..591a4e939415 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -573,8 +573,6 @@ void __init mem_init(void) #endif parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); - mem_init_print_info(NULL); - #if 0 /* * Do not expose the virtual kernel memory layout to userspace. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 386ae12d8523..088dd2afcfe4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -118,37 +118,43 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_HAS_COPY_MC if PPC64 select ARCH_HAS_DEBUG_VIRTUAL + select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_KCOV select ARCH_HAS_HUGEPD if HUGETLB_PAGE + select ARCH_HAS_KCOV + select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEMREMAP_COMPAT_ALIGN select ARCH_HAS_MMIOWB if PPC64 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API - select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL - select ARCH_HAS_MEMBARRIER_CALLBACKS - select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 - select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) + select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE - select ARCH_HAS_COPY_MC if PPC64 select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION @@ -159,9 +165,8 @@ config PPC select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN - select DMA_OPS if PPC64 select DMA_OPS_BYPASS if PPC64 - select ARCH_HAS_DMA_MAP_DIRECT if PPC64 && PPC_PSERIES + select DMA_OPS if PPC64 select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT @@ -171,6 +176,7 @@ config PPC select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC select GENERIC_EARLY_IOREMAP + select GENERIC_GETTIMEOFDAY select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP if PCI @@ -178,12 +184,15 @@ config PPC select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL - select GENERIC_GETTIMEOFDAY + select GENERIC_VDSO_TIME_NS select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 + select HAVE_ARCH_KFENCE if PPC32 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT @@ -191,16 +200,13 @@ config PPC select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ASM_MODVERSIONS - select HAVE_C_RECORDMCOUNT - select HAVE_CBPF_JIT if !PPC64 - select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) - select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_CONTEXT_TRACKING if PPC64 + select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL - select HAVE_EBPF_JIT if PPC64 + select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -209,10 +215,13 @@ config PPC select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC select HAVE_GENERIC_VDSO + select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK + select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE select HAVE_KERNEL_LZO if DEFAULT_UIMAGE @@ -224,25 +233,25 @@ config PPC select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) - select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S) - select HAVE_OPTPROBES if PPC64 + select HAVE_OPTPROBES select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 - select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select MMU_GATHER_RCU_TABLE_FREE - select MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN + select HAVE_RELIABLE_STACKTRACE + select HAVE_RSEQ select HAVE_SOFTIRQ_ON_OWN_STACK + select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) + select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING - select HAVE_IRQ_TIME_ACCOUNTING - select HAVE_RSEQ + select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE select IOMMU_HELPER if PPC64 select IRQ_DOMAIN select IRQ_FORCED_THREADING + select MMU_GATHER_PAGE_SIZE + select MMU_GATHER_RCU_TABLE_FREE select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE select NEED_SG_DMA_LENGTH @@ -415,11 +424,6 @@ config HIGHMEM source "kernel/Kconfig.hz" -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE && PPC_BOOK3S_64 - default y - config MATH_EMULATION bool "Math emulation" depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE @@ -515,12 +519,6 @@ config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config PPC64_SUPPORTS_MEMORY_FAILURE bool "Add support for memory hwpoison" depends on PPC_BOOK3S_64 @@ -548,7 +546,7 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE - select HAVE_IMA_KEXEC + select HAVE_IMA_KEXEC if IMA select BUILD_BIN2C select KEXEC_ELF depends on PPC64 @@ -700,9 +698,6 @@ config ARCH_SPARSEMEM_DEFAULT def_bool y depends on PPC_BOOK3S_64 -config SYS_SUPPORTS_HUGETLBFS - bool - config ILLEGAL_POINTER_VALUE hex # This is roughly half way between the top of user space and the bottom @@ -786,7 +781,7 @@ config THREAD_SHIFT config DATA_SHIFT_BOOL bool "Set custom data alignment" depends on ADVANCED_OPTIONS - depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC + depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) help This option allows you to set the kernel data alignment. When @@ -798,13 +793,13 @@ config DATA_SHIFT_BOOL config DATA_SHIFT int "Data shift" if DATA_SHIFT_BOOL default 24 if STRICT_KERNEL_RWX && PPC64 - range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32 - range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx + range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 + range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 - default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32 + default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx - default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA - default 19 if DEBUG_PAGEALLOC && PPC_8xx + default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA + default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx default PPC_PAGE_SHIFT help On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. @@ -1217,7 +1212,7 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx - default "0xb0000000" if PPC_BOOK3S_32 && STRICT_KERNEL_RWX + default "0xb0000000" if PPC_BOOK3S_32 default "0xc0000000" endmenu diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index ae084357994e..6342f9da4545 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION + depends on PCI || IBMVIO help Provide fault-injection capability for IOMMU. Each device can be selectively enabled via the fail_iommu property. diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 5f8544cf724a..3212d076ac6a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -181,12 +181,6 @@ CC_FLAGS_FTRACE := -pg ifdef CONFIG_MPROFILE_KERNEL CC_FLAGS_FTRACE += -mprofile-kernel endif -# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8 -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199 -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828 -ifndef CONFIG_CC_IS_CLANG -CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog) -endif endif CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) @@ -444,12 +438,15 @@ endif endif ifdef CONFIG_SMP +ifdef CONFIG_PPC32 prepare: task_cpu_prepare PHONY += task_cpu_prepare task_cpu_prepare: prepare0 $(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h)) -endif + +endif # CONFIG_PPC32 +endif # CONFIG_SMP PHONY += checkbin # Check toolchain versions: diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi index 0c0efa94cfb4..2a677fd323eb 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi @@ -170,8 +170,6 @@ timer@41100 { /include/ "pq3-etsec2-0.dtsi" enet0: ethernet@b0000 { queue-group@b0000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>; }; }; @@ -179,8 +177,6 @@ enet0: ethernet@b0000 { /include/ "pq3-etsec2-1.dtsi" enet1: ethernet@b1000 { queue-group@b1000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>; }; }; diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi index b5f071574e83..b8e0edd1ac69 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi @@ -190,8 +190,6 @@ crypto@30000 { /include/ "pq3-etsec2-0.dtsi" enet0: ethernet@b0000 { queue-group@b0000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>; }; }; @@ -199,8 +197,6 @@ enet0: ethernet@b0000 { /include/ "pq3-etsec2-1.dtsi" enet1: ethernet@b1000 { queue-group@b1000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>; }; }; diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi index bd208320bff5..bec0fc36849d 100644 --- a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi @@ -171,8 +171,6 @@ enet0: ethernet@b0000 { queue-group@b0000 { reg = <0x10000 0x1000>; - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; }; }; @@ -180,8 +178,6 @@ enet1: ethernet@b1000 { queue-group@b1000 { reg = <0x11000 0x1000>; - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; }; }; diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi index 1b4aafc1f6a2..c2717f31925a 100644 --- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi @@ -172,29 +172,8 @@ /include/ "pq3-mpic-timer-B.dtsi" /include/ "pq3-etsec2-0.dtsi" - enet0: ethernet@b0000 { - queue-group@b0000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - }; - /include/ "pq3-etsec2-1.dtsi" - enet1: ethernet@b1000 { - queue-group@b1000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - }; - /include/ "pq3-etsec2-2.dtsi" - enet2: ethernet@b2000 { - queue-group@b2000 { - fsl,rx-bit-map = <0xff>; - fsl,tx-bit-map = <0xff>; - }; - - }; global-utilities@e0000 { compatible = "fsl,p1010-guts"; diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts index fbaa60b8f87a..4fd7a4fbb4fb 100644 --- a/arch/powerpc/boot/dts/icon.dts +++ b/arch/powerpc/boot/dts/icon.dts @@ -197,13 +197,6 @@ reg = <0x00fa0000 0x00060000>; }; }; - - SysACE_CompactFlash: sysace@1,0 { - compatible = "xlnx,sysace"; - interrupt-parent = <&UIC2>; - interrupts = <24 0x4>; - reg = <0x00000001 0x00000000 0x10000>; - }; }; UART0: serial@f0000200 { diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 41fa0a8715e3..cdb796b76e2e 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -191,7 +191,7 @@ if [ -z "$kernel" ]; then kernel=vmlinux fi -LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" +LC_ALL=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" case "$elfformat" in elf64-powerpcle) format=elf64lppc ;; elf64-powerpc) format=elf32ppc ;; diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig index 930948a1da76..fb9a15573546 100644 --- a/arch/powerpc/configs/44x/icon_defconfig +++ b/arch/powerpc/configs/44x/icon_defconfig @@ -28,7 +28,6 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP_OF=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_XILINX_SYSACE=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_CONSTANTS=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 4f05a6652478..701811c91a6f 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -50,6 +50,7 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_CRASH_DUMP=y +CONFIG_FA_DUMP=y CONFIG_IRQ_ALL_CPUS=y CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y @@ -177,6 +178,7 @@ CONFIG_CHELSIO_T1=m CONFIG_BE2NET=m CONFIG_IBMVETH=m CONFIG_EHEA=m +CONFIG_IBMVNIC=m CONFIG_E100=y CONFIG_E1000=y CONFIG_E1000E=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 6677ac0da45a..1fd9d1260f9e 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -595,7 +595,6 @@ CONFIG_GAMEPORT_FM801=m # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y CONFIG_ROCKETPORT=m -CONFIG_CYCLADES=m CONFIG_SYNCLINK_GT=m CONFIG_NOZOMI=m CONFIG_N_HDLC=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 777221775c83..50168dde4ea5 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -41,6 +41,7 @@ CONFIG_DTL=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y CONFIG_IBMEBUS=y +CONFIG_PAPR_SCM=m CONFIG_PPC_SVM=y # CONFIG_PPC_PMAC is not set CONFIG_RTAS_FLASH=m @@ -159,6 +160,7 @@ CONFIG_BE2NET=m CONFIG_S2IO=m CONFIG_IBMVETH=y CONFIG_EHEA=y +CONFIG_IBMVNIC=y CONFIG_E100=y CONFIG_E1000=y CONFIG_E1000E=y diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index b1e577cbf00c..88e8ea73bfa7 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, src += bytes; len -= bytes; - }; + } memcpy((char *)sctx->buffer, src, len); return 0; diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index e1f9b4ea1c53..bcf95ce0964f 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generated-y += syscall_table_spu.h generic-y += export.h generic-y += kvm_types.h diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 939f3c94c8f3..1c7b75834e04 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -77,8 +77,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); long sys_switch_endian(void); -notrace unsigned int __check_irq_replay(void); -void notrace restore_interrupts(void); /* prom_init (OpenFirmware) */ unsigned long __init prom_init(unsigned long r3, unsigned long r4, diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index aecfde829d5d..7ae29cfb06c0 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -80,22 +80,6 @@ do { \ ___p1; \ }) -#ifdef CONFIG_PPC64 -#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ - typeof(ptr) __PTR = (ptr); \ - __unqual_scalar_typeof(*ptr) VAL; \ - VAL = READ_ONCE(*__PTR); \ - if (unlikely(!(cond_expr))) { \ - spin_begin(); \ - do { \ - VAL = READ_ONCE(*__PTR); \ - } while (!(cond_expr)); \ - spin_end(); \ - } \ - (typeof(*ptr))VAL; \ -}) -#endif - #ifdef CONFIG_PPC_BOOK3S_64 #define NOSPEC_BARRIER_SLOT nop #elif defined(CONFIG_PPC_FSL_BOOK3E) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 73bc5d2c431d..1670dfe9d4f1 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -5,86 +5,7 @@ #include <asm/bug.h> #include <asm/book3s/32/mmu-hash.h> -#ifdef __ASSEMBLY__ - -.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */ -101: mtsrin \gpr1, \gpr2 - addi \gpr1, \gpr1, 0x111 /* next VSID */ - rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */ - addis \gpr2, \gpr2, 0x1000 /* address of next segment */ - bdnz 101b - isync -.endm - -.macro kuep_lock gpr1, gpr2 -#ifdef CONFIG_PPC_KUEP - li \gpr1, NUM_USER_SEGMENTS - li \gpr2, 0 - mtctr \gpr1 - mfsrin \gpr1, \gpr2 - oris \gpr1, \gpr1, SR_NX@h /* set Nx */ - kuep_update_sr \gpr1, \gpr2 -#endif -.endm - -.macro kuep_unlock gpr1, gpr2 -#ifdef CONFIG_PPC_KUEP - li \gpr1, NUM_USER_SEGMENTS - li \gpr2, 0 - mtctr \gpr1 - mfsrin \gpr1, \gpr2 - rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */ - kuep_update_sr \gpr1, \gpr2 -#endif -.endm - -#ifdef CONFIG_PPC_KUAP - -.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */ -101: mtsrin \gpr1, \gpr2 - addi \gpr1, \gpr1, 0x111 /* next VSID */ - rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */ - addis \gpr2, \gpr2, 0x1000 /* address of next segment */ - cmplw \gpr2, \gpr3 - blt- 101b - isync -.endm - -.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3 - lwz \gpr2, KUAP(\thread) - rlwinm. \gpr3, \gpr2, 28, 0xf0000000 - stw \gpr2, STACK_REGS_KUAP(\sp) - beq+ 102f - li \gpr1, 0 - stw \gpr1, KUAP(\thread) - mfsrin \gpr1, \gpr2 - oris \gpr1, \gpr1, SR_KS@h /* set Ks */ - kuap_update_sr \gpr1, \gpr2, \gpr3 -102: -.endm - -.macro kuap_restore sp, current, gpr1, gpr2, gpr3 - lwz \gpr2, STACK_REGS_KUAP(\sp) - rlwinm. \gpr3, \gpr2, 28, 0xf0000000 - stw \gpr2, THREAD + KUAP(\current) - beq+ 102f - mfsrin \gpr1, \gpr2 - rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */ - kuap_update_sr \gpr1, \gpr2, \gpr3 -102: -.endm - -.macro kuap_check current, gpr -#ifdef CONFIG_PPC_KUAP_DEBUG - lwz \gpr, THREAD + KUAP(\current) -999: twnei \gpr, 0 - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) -#endif -.endm - -#endif /* CONFIG_PPC_KUAP */ - -#else /* !__ASSEMBLY__ */ +#ifndef __ASSEMBLY__ #ifdef CONFIG_PPC_KUAP @@ -103,6 +24,51 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) isync(); /* Context sync required after mtsr() */ } +static inline void kuap_save_and_lock(struct pt_regs *regs) +{ + unsigned long kuap = current->thread.kuap; + u32 addr = kuap & 0xf0000000; + u32 end = kuap << 28; + + regs->kuap = kuap; + if (unlikely(!kuap)) + return; + + current->thread.kuap = 0; + kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */ +} + +static inline void kuap_user_restore(struct pt_regs *regs) +{ +} + +static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +{ + u32 addr = regs->kuap & 0xf0000000; + u32 end = regs->kuap << 28; + + current->thread.kuap = regs->kuap; + + if (unlikely(regs->kuap == kuap)) + return; + + kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */ +} + +static inline unsigned long kuap_get_and_assert_locked(void) +{ + unsigned long kuap = current->thread.kuap; + + WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0); + + return kuap; +} + +static inline void kuap_assert_locked(void) +{ + kuap_get_and_assert_locked(); +} + static __always_inline void allow_user_access(void __user *to, const void __user *from, u32 size, unsigned long dir) { diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 415ae29fa73a..83c65845a1a9 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -194,10 +194,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #define VMALLOC_END ioremap_bot #endif -#ifdef CONFIG_STRICT_KERNEL_RWX #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) #define MODULES_VADDR (MODULES_END - SZ_256M) -#endif #ifndef __ASSEMBLY__ #include <linux/sched.h> diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h index d941c06d4f2e..ba1743c52b56 100644 --- a/arch/powerpc/include/asm/book3s/32/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h @@ -79,4 +79,4 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm(mm); } -#endif /* _ASM_POWERPC_TLBFLUSH_H */ +#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index 8bd905050896..9700da3a4093 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -287,7 +287,7 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, */ } -static inline unsigned long kuap_get_and_check_amr(void) +static inline unsigned long kuap_get_and_assert_locked(void) { if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { unsigned long amr = mfspr(SPRN_AMR); @@ -298,27 +298,7 @@ static inline unsigned long kuap_get_and_check_amr(void) return 0; } -#else /* CONFIG_PPC_PKEY */ - -static inline void kuap_user_restore(struct pt_regs *regs) -{ -} - -static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) -{ -} - -static inline unsigned long kuap_get_and_check_amr(void) -{ - return 0; -} - -#endif /* CONFIG_PPC_PKEY */ - - -#ifdef CONFIG_PPC_KUAP - -static inline void kuap_check_amr(void) +static inline void kuap_assert_locked(void) { if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index f911bdb68d8b..3004f3323144 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -18,7 +18,6 @@ * complete pgtable.h but only a portion of it. */ #include <asm/book3s/64/pgtable.h> -#include <asm/bug.h> #include <asm/task_size_64.h> #include <asm/cpu_has_feature.h> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 058601efbc8a..a666d561b44d 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> #include <linux/bug.h> +#include <linux/sizes.h> #endif /* @@ -116,6 +117,7 @@ */ #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY) #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ) +#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC) #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ _PAGE_RW | _PAGE_EXEC) /* @@ -323,7 +325,8 @@ extern unsigned long pci_io_base; #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) #define IOREMAP_START (ioremap_bot) -#define IOREMAP_END (KERN_IO_END) +#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE) +#define FIXADDR_SIZE SZ_32M /* Advertise special mapping type for AGP */ #define HAVE_PAGE_AGP diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index c7813dc628fc..59cab558e2f0 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr, * from ptesync, it should probably go into update_mmu_cache, rather * than set_pte_at (which is used to set ptes unrelated to faults). * - * Spurious faults to vmalloc region are not tolerated, so there is - * a ptesync in flush_cache_vmap. + * Spurious faults from the kernel memory are not tolerated, so there + * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows + * the pte update sequence from ISA Book III 6.10 Translation Table + * Update Synchronization Requirements. */ } diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index d1635ffbb179..0b2162890d8b 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -111,11 +111,8 @@ #ifndef __ASSEMBLY__ struct pt_regs; -long do_page_fault(struct pt_regs *); -long hash__do_page_fault(struct pt_regs *); +void hash__do_page_fault(struct pt_regs *); void bad_page_fault(struct pt_regs *, int); -void __bad_page_fault(struct pt_regs *regs, int sig); -void do_bad_page_fault_segv(struct pt_regs *regs); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index f63495109f63..7564dd4fd12b 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -30,7 +30,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) #endif /* CONFIG_PPC_BOOK3S_64 */ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -extern void flush_dcache_page(struct page *page); +/* + * This is called when a page has been modified by the kernel. + * It just marks the page as not i-cache clean. We do the i-cache + * flush later when the page is given to a user process, if necessary. + */ +static inline void flush_dcache_page(struct page *page) +{ + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + return; + /* avoid an atomic op if possible */ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} void flush_icache_range(unsigned long start, unsigned long stop); #define flush_icache_range flush_icache_range @@ -40,7 +52,6 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, #define flush_icache_user_page flush_icache_user_page void flush_dcache_icache_page(struct page *page); -void __flush_dcache_icache(void *page); /** * flush_dcache_range(): Write any modified data cache blocks out to memory and diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index 2211b934ecb4..bda45788cfcc 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -594,7 +594,7 @@ typedef struct fcc_enet { uint fen_p256c; /* Total packets 256 < bytes <= 511 */ uint fen_p512c; /* Total packets 512 < bytes <= 1023 */ uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */ - uint fen_cambuf; /* Internal CAM buffer poiner */ + uint fen_cambuf; /* Internal CAM buffer pointer */ ushort fen_rfthr; /* Received frames threshold */ ushort fen_rfcnt; /* Received frames count */ } fcc_enet_t; diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 7897d16e0990..727d4b321937 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -7,7 +7,7 @@ #include <linux/bug.h> #include <asm/cputable.h> -static inline bool early_cpu_has_feature(unsigned long feature) +static __always_inline bool early_cpu_has_feature(unsigned long feature) { return !!((CPU_FTRS_ALWAYS & feature) || (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); @@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature) return static_branch_likely(&cpu_feature_keys[i]); } #else -static inline bool cpu_has_feature(unsigned long feature) +static __always_inline bool cpu_has_feature(unsigned long feature) { return early_cpu_has_feature(feature); } diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 8d03c16a3663..947b5b9c4424 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -23,12 +23,17 @@ #include <asm/kmap_size.h> #endif +#ifdef CONFIG_PPC64 +#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE) +#else +#define FIXADDR_SIZE 0 #ifdef CONFIG_KASAN #include <asm/kasan.h> #define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE) #else #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) #endif +#endif /* * Here we define all the compile-time 'special' virtual @@ -50,6 +55,7 @@ */ enum fixed_addresses { FIX_HOLE, +#ifdef CONFIG_PPC32 /* reserve the top 128K for early debugging purposes */ FIX_EARLY_DEBUG_TOP = FIX_HOLE, FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1, @@ -72,6 +78,7 @@ enum fixed_addresses { FIX_IMMR_SIZE, #endif /* FIX_PCIE_MCFG, */ +#endif /* CONFIG_PPC32 */ __end_of_permanent_fixed_addresses, #define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE) @@ -98,6 +105,8 @@ enum fixed_addresses { static inline void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE); + if (__builtin_constant_p(idx)) BUILD_BUG_ON(idx >= __end_of_fixed_addresses); else if (WARN_ON(idx >= __end_of_fixed_addresses)) diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h index 30a31ad2123d..c0fbadb70b5d 100644 --- a/arch/powerpc/include/asm/fsl_pamu_stash.h +++ b/arch/powerpc/include/asm/fsl_pamu_stash.h @@ -7,6 +7,8 @@ #ifndef __FSL_PAMU_STASH_H #define __FSL_PAMU_STASH_H +struct iommu_domain; + /* cache stash targets */ enum pamu_stash_target { PAMU_ATTR_CACHE_L1 = 1, @@ -14,14 +16,6 @@ enum pamu_stash_target { PAMU_ATTR_CACHE_L3, }; -/* - * This attribute allows configuring stashig specific parameters - * in the PAMU hardware. - */ - -struct pamu_stash_attribute { - u32 cpu; /* cpu number */ - u32 cache; /* cache to stash to: L1,L2,L3 */ -}; +int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu); #endif /* __FSL_PAMU_STASH_H */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index bc76970b6ee5..debe8c4f7062 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -12,7 +12,7 @@ #ifdef __ASSEMBLY__ -/* Based off of objdump optput from glibc */ +/* Based off of objdump output from glibc */ #define MCOUNT_SAVE_FRAME \ stwu r1,-48(r1); \ @@ -52,7 +52,7 @@ extern void _mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { - /* reloction of mcount call site is the same as the address */ + /* relocation of mcount call site is the same as the address */ return addr; } diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index e93ee3202e4c..b3001f8b2c1e 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -33,9 +33,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, { int oldval = 0, ret; - if (!access_ok(uaddr, sizeof(u32))) + if (!user_access_begin(uaddr, sizeof(u32))) return -EFAULT; - allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); switch (op) { case FUTEX_OP_SET: @@ -56,10 +55,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, default: ret = -ENOSYS; } + user_access_end(); *oval = oldval; - prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); return ret; } @@ -70,11 +69,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; u32 prev; - if (!access_ok(uaddr, sizeof(u32))) + if (!user_access_begin(uaddr, sizeof(u32))) return -EFAULT; - allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); - __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ @@ -93,8 +90,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); + user_access_end(); + *uval = prev; - prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); return ret; } diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index ed6086d57b22..443050906018 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -315,7 +315,8 @@ #define H_SCM_HEALTH 0x400 #define H_SCM_PERFORMANCE_STATS 0x418 #define H_RPT_INVALIDATE 0x448 -#define MAX_HCALL_OPCODE H_RPT_INVALIDATE +#define H_SCM_FLUSH 0x44C +#define MAX_HCALL_OPCODE H_SCM_FLUSH /* Scope args for H_SCM_UNBIND_ALL */ #define H_UNBIND_SCOPE_ALL (0x1) @@ -389,6 +390,7 @@ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 +#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3 #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 #define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h index 999ed5ac9053..ccb2034506f0 100644 --- a/arch/powerpc/include/asm/hvconsole.h +++ b/arch/powerpc/include/asm/hvconsole.h @@ -24,5 +24,8 @@ extern int hvc_get_chars(uint32_t vtermno, char *buf, int count); extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count); +/* Provided by HVC VIO */ +void hvc_vio_init_early(void); + #endif /* __KERNEL__ */ #endif /* _PPC64_HVCONSOLE_H */ diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h index ae02eb53d6ef..d024447283a0 100644 --- a/arch/powerpc/include/asm/hydra.h +++ b/arch/powerpc/include/asm/hydra.h @@ -94,8 +94,6 @@ extern volatile struct Hydra __iomem *Hydra; #define HYDRA_INT_EXT7 18 /* Power Off Request */ #define HYDRA_INT_SPARE 19 -extern int hydra_init(void); - #endif /* __KERNEL__ */ #endif /* _ASMPPC_HYDRA_H */ diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h deleted file mode 100644 index ead488cf3981..000000000000 --- a/arch/powerpc/include/asm/ima.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_IMA_H -#define _ASM_POWERPC_IMA_H - -struct kimage; - -int ima_get_kexec_buffer(void **addr, size_t *size); -int ima_free_kexec_buffer(void); - -#ifdef CONFIG_IMA -void remove_ima_buffer(void *fdt, int chosen_node); -#else -static inline void remove_ima_buffer(void *fdt, int chosen_node) {} -#endif - -#ifdef CONFIG_IMA_KEXEC -int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr, - size_t size); - -int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node); -#else -static inline int setup_ima_buffer(const struct kimage *image, void *fdt, - int chosen_node) -{ - remove_ima_buffer(fdt, chosen_node); - return 0; -} -#endif /* CONFIG_IMA_KEXEC */ - -#endif /* _ASM_POWERPC_IMA_H */ diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index cc73c1267572..268d3bd073c8 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -4,6 +4,40 @@ #include <asm/ppc-opcode.h> +#ifdef CONFIG_PPC64 + +#define ___get_user_instr(gu_op, dest, ptr) \ +({ \ + long __gui_ret = 0; \ + unsigned long __gui_ptr = (unsigned long)ptr; \ + struct ppc_inst __gui_inst; \ + unsigned int __prefix, __suffix; \ + __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \ + if (__gui_ret == 0) { \ + if ((__prefix >> 26) == OP_PREFIX) { \ + __gui_ret = gu_op(__suffix, \ + (unsigned int __user *)__gui_ptr + 1); \ + __gui_inst = ppc_inst_prefix(__prefix, \ + __suffix); \ + } else { \ + __gui_inst = ppc_inst(__prefix); \ + } \ + if (__gui_ret == 0) \ + (dest) = __gui_inst; \ + } \ + __gui_ret; \ +}) +#else /* !CONFIG_PPC64 */ +#define ___get_user_instr(gu_op, dest, ptr) \ + gu_op((dest).val, (u32 __user *)(ptr)) +#endif /* CONFIG_PPC64 */ + +#define get_user_instr(x, ptr) \ + ___get_user_instr(get_user, x, ptr) + +#define __get_user_instr(x, ptr) \ + ___get_user_instr(__get_user, x, ptr) + /* * Instruction data type for POWER */ @@ -68,6 +102,8 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) #define ppc_inst(x) ((struct ppc_inst){ .val = x }) +#define ppc_inst_prefix(x, y) ppc_inst(x) + static inline bool ppc_inst_prefixed(struct ppc_inst x) { return false; @@ -113,13 +149,14 @@ static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *va return location + ppc_inst_len(tmp); } -static inline u64 ppc_inst_as_u64(struct ppc_inst x) +static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x) { -#ifdef CONFIG_CPU_LITTLE_ENDIAN - return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x); -#else - return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x); -#endif + if (IS_ENABLED(CONFIG_PPC32)) + return ppc_inst_val(x); + else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x); + else + return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x); } #define PPC_INST_STR_LEN sizeof("00000000 00000000") @@ -141,10 +178,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins __str; \ }) -int probe_user_read_inst(struct ppc_inst *inst, - struct ppc_inst __user *nip); - -int probe_kernel_read_inst(struct ppc_inst *inst, - struct ppc_inst *src); +int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src); #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index e8d09a841373..44cde2e129b8 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -2,6 +2,70 @@ #ifndef _ASM_POWERPC_INTERRUPT_H #define _ASM_POWERPC_INTERRUPT_H +/* BookE/4xx */ +#define INTERRUPT_CRITICAL_INPUT 0x100 + +/* BookE */ +#define INTERRUPT_DEBUG 0xd00 +#ifdef CONFIG_BOOKE +#define INTERRUPT_PERFMON 0x260 +#define INTERRUPT_DOORBELL 0x280 +#endif + +/* BookS/4xx/8xx */ +#define INTERRUPT_MACHINE_CHECK 0x200 + +/* BookS/8xx */ +#define INTERRUPT_SYSTEM_RESET 0x100 + +/* BookS */ +#define INTERRUPT_DATA_SEGMENT 0x380 +#define INTERRUPT_INST_SEGMENT 0x480 +#define INTERRUPT_TRACE 0xd00 +#define INTERRUPT_H_DATA_STORAGE 0xe00 +#define INTERRUPT_HMI 0xe60 +#define INTERRUPT_H_FAC_UNAVAIL 0xf80 +#ifdef CONFIG_PPC_BOOK3S +#define INTERRUPT_DOORBELL 0xa00 +#define INTERRUPT_PERFMON 0xf00 +#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 +#endif + +/* BookE/BookS/4xx/8xx */ +#define INTERRUPT_DATA_STORAGE 0x300 +#define INTERRUPT_INST_STORAGE 0x400 +#define INTERRUPT_EXTERNAL 0x500 +#define INTERRUPT_ALIGNMENT 0x600 +#define INTERRUPT_PROGRAM 0x700 +#define INTERRUPT_SYSCALL 0xc00 +#define INTERRUPT_TRACE 0xd00 + +/* BookE/BookS/44x */ +#define INTERRUPT_FP_UNAVAIL 0x800 + +/* BookE/BookS/44x/8xx */ +#define INTERRUPT_DECREMENTER 0x900 + +#ifndef INTERRUPT_PERFMON +#define INTERRUPT_PERFMON 0x0 +#endif + +/* 8xx */ +#define INTERRUPT_SOFT_EMU_8xx 0x1000 +#define INTERRUPT_INST_TLB_MISS_8xx 0x1100 +#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 +#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 +#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 +#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 +#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 + +/* 603 */ +#define INTERRUPT_INST_TLB_MISS_603 0x1000 +#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 +#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 + +#ifndef __ASSEMBLY__ + #include <linux/context_tracking.h> #include <linux/hardirq.h> #include <asm/cputime.h> @@ -9,10 +73,18 @@ #include <asm/kprobes.h> #include <asm/runlatch.h> -struct interrupt_state { -#ifdef CONFIG_PPC_BOOK3E_64 - enum ctx_state ctx_state; +static inline void nap_adjust_return(struct pt_regs *regs) +{ +#ifdef CONFIG_PPC_970_NAP + if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { + /* Can avoid a test-and-clear because NMIs do not call this */ + clear_thread_local_flags(_TLF_NAPPING); + regs->nip = (unsigned long)power4_idle_nap_return; + } #endif +} + +struct interrupt_state { }; static inline void booke_restore_dbcr0(void) @@ -29,10 +101,19 @@ static inline void booke_restore_dbcr0(void) static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) { - /* - * Book3E reconciles irq soft mask in asm - */ -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC32 + if (!arch_irq_disabled_regs(regs)) + trace_hardirqs_off(); + + if (user_mode(regs)) { + kuep_lock(); + account_cpu_user_entry(); + } else { + kuap_save_and_lock(regs); + } +#endif + +#ifdef CONFIG_PPC64 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) trace_hardirqs_off(); local_paca->irq_happened |= PACA_IRQ_HARD_DIS; @@ -48,16 +129,12 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != 0x700) + if (TRAP(regs) != INTERRUPT_PROGRAM) CT_WARN_ON(ct_state() != CONTEXT_KERNEL); } #endif -#ifdef CONFIG_PPC_BOOK3E_64 - state->ctx_state = exception_enter(); - if (user_mode(regs)) - account_cpu_user_entry(); -#endif + booke_restore_dbcr0(); } /* @@ -76,23 +153,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup */ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) { -#ifdef CONFIG_PPC_BOOK3E_64 - exception_exit(state->ctx_state); -#endif - - /* - * Book3S exits to user via interrupt_exit_user_prepare(), which does - * context tracking, which is a cleaner way to handle PREEMPT=y - * and avoid context entry/exit in e.g., preempt_schedule_irq()), - * which is likely to be where the core code wants to end up. - * - * The above comment explains why we can't do the - * - * if (user_mode(regs)) - * user_exit_irqoff(); - * - * sequence here. - */ + if (user_mode(regs)) + kuep_unlock(); } static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) @@ -109,24 +171,46 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) { + /* + * Adjust at exit so the main handler sees the true NIA. This must + * come before irq_exit() because irq_exit can enable interrupts, and + * if another interrupt is taken before nap_adjust_return has run + * here, then that interrupt would return directly to idle nap return. + */ + nap_adjust_return(regs); + irq_exit(); interrupt_exit_prepare(regs, state); } struct interrupt_nmi_state { #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3S_64 u8 irq_soft_mask; u8 irq_happened; -#endif u8 ftrace_enabled; #endif }; +static inline bool nmi_disables_ftrace(struct pt_regs *regs) +{ + /* Allow DEC and PMI to be traced when they are soft-NMI */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { + if (TRAP(regs) == INTERRUPT_DECREMENTER) + return false; + if (TRAP(regs) == INTERRUPT_PERFMON) + return false; + } + if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (TRAP(regs) == INTERRUPT_PERFMON) + return false; + } + + return true; +} + static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) { #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3S_64 state->irq_soft_mask = local_paca->irq_soft_mask; state->irq_happened = local_paca->irq_happened; @@ -139,9 +223,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte local_paca->irq_happened |= PACA_IRQ_HARD_DIS; /* Don't do any per-CPU operations until interrupt state is fixed */ -#endif - /* Allow DEC and PMI to be traced when they are soft-NMI */ - if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) { + + if (nmi_disables_ftrace(regs)) { state->ftrace_enabled = this_cpu_get_ftrace_enabled(); this_cpu_set_ftrace_enabled(0); } @@ -164,17 +247,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter radix_enabled() || (mfmsr() & MSR_DR)) nmi_exit(); + /* + * nmi does not call nap_adjust_return because nmi should not create + * new work to do (must use irq_work for that). + */ + #ifdef CONFIG_PPC64 - if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) + if (nmi_disables_ftrace(regs)) this_cpu_set_ftrace_enabled(state->ftrace_enabled); -#ifdef CONFIG_PPC_BOOK3S_64 /* Check we didn't change the pending interrupt mask. */ WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); local_paca->irq_happened = state->irq_happened; local_paca->irq_soft_mask = state->irq_soft_mask; #endif -#endif } /* @@ -387,6 +473,7 @@ DECLARE_INTERRUPT_HANDLER(SMIException); DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); DECLARE_INTERRUPT_HANDLER(unknown_exception); DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); +DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); DECLARE_INTERRUPT_HANDLER(RunModeException); DECLARE_INTERRUPT_HANDLER(single_step_exception); @@ -410,7 +497,7 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); DECLARE_INTERRUPT_HANDLER(CacheLockingException); DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); -DECLARE_INTERRUPT_HANDLER(WatchdogException); +DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); /* slb.c */ @@ -421,7 +508,7 @@ DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); /* fault.c */ -DECLARE_INTERRUPT_HANDLER_RET(do_page_fault); +DECLARE_INTERRUPT_HANDLER(do_page_fault); DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); /* process.c */ @@ -436,7 +523,7 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); -void unrecoverable_exception(struct pt_regs *regs); +void __noreturn unrecoverable_exception(struct pt_regs *regs); void replay_system_reset(void); void replay_soft_interrupts(void); @@ -447,4 +534,6 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) local_irq_enable(); } +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_POWERPC_INTERRUPT_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 273edd208ec5..f130783c8301 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -663,11 +663,6 @@ static inline void name at \ #define xlate_dev_mem_ptr(p) __va(p) /* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -/* * We don't do relaxed operations yet, at least not with this semantic */ #define readb_relaxed(addr) readb(addr) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index f3f264e441a7..b2bd58830430 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS]; -void call_do_softirq(void *sp); -void call_do_irq(struct pt_regs *regs, void *sp); extern void do_IRQ(struct pt_regs *regs); extern void __init init_IRQ(void); extern void __do_irq(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 09297ec9fa52..2d5c6bec2b4f 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -20,7 +20,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("1:\n\t" "nop # arch_static_branch\n\t" ".pushsection __jump_table, \"aw\"\n\t" - JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" + ".long 1b - ., %l[l_yes] - .\n\t" + JUMP_ENTRY_TYPE "%c0 - .\n\t" ".popsection \n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -34,7 +35,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("1:\n\t" "b %l[l_yes] # arch_static_branch_jump\n\t" ".pushsection __jump_table, \"aw\"\n\t" - JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" + ".long 1b - ., %l[l_yes] - .\n\t" + JUMP_ENTRY_TYPE "%c0 - .\n\t" ".popsection \n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -43,23 +45,12 @@ l_yes: return true; } -#ifdef CONFIG_PPC64 -typedef u64 jump_label_t; -#else -typedef u32 jump_label_t; -#endif - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #else #define ARCH_STATIC_BRANCH(LABEL, KEY) \ 1098: nop; \ .pushsection __jump_table, "aw"; \ - FTR_ENTRY_LONG 1098b, LABEL, KEY; \ + .long 1098b - ., LABEL - .; \ + FTR_ENTRY_LONG KEY; \ .popsection #endif diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 7355ed05e65e..3c478e5ef24c 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -19,7 +19,7 @@ #define KASAN_SHADOW_SCALE_SHIFT 3 -#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX) +#ifdef CONFIG_MODULES #define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) #else #define KASAN_KERN_START PAGE_OFFSET diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 9ab344d29a54..88d0d7cf3a79 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -107,15 +107,7 @@ struct kimage_arch { unsigned long backup_start; void *backup_buf; - - unsigned long elfcorehdr_addr; - unsigned long elf_headers_sz; - void *elf_headers; - -#ifdef CONFIG_IMA_KEXEC - phys_addr_t ima_buffer_addr; - size_t ima_buffer_size; -#endif + void *fdt; }; char *setup_kdump_cmdline(struct kimage *image, char *cmdline, @@ -123,10 +115,6 @@ char *setup_kdump_cmdline(struct kimage *image, char *cmdline, int setup_purgatory(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); -int setup_new_fdt(const struct kimage *image, void *fdt, - unsigned long initrd_load_addr, unsigned long initrd_len, - const char *cmdline); -int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size); #ifdef CONFIG_PPC64 struct kexec_buf; @@ -136,7 +124,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); -unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image); +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image); int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h new file mode 100644 index 000000000000..a9846b68c6b9 --- /dev/null +++ b/arch/powerpc/include/asm/kfence.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * powerpc KFENCE support. + * + * Copyright (C) 2020 CS GROUP France + */ + +#ifndef __ASM_POWERPC_KFENCE_H +#define __ASM_POWERPC_KFENCE_H + +#include <linux/mm.h> +#include <asm/pgtable.h> + +static inline bool arch_kfence_init_pool(void) +{ + return true; +} + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + pte_t *kpte = virt_to_kpte(addr); + + if (protect) { + pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + } else { + pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0); + } + + return true; +} + +#endif /* __ASM_POWERPC_KFENCE_H */ diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 7ec21af49a45..ec96232529ac 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -28,15 +28,6 @@ #ifdef __ASSEMBLY__ #ifndef CONFIG_PPC_KUAP -.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3 -.endm - -.macro kuap_restore sp, current, gpr1, gpr2, gpr3 -.endm - -.macro kuap_check current, gpr -.endm - .macro kuap_check_amr gpr1, gpr2 .endm @@ -55,6 +46,14 @@ void setup_kuep(bool disabled); static inline void setup_kuep(bool disabled) { } #endif /* CONFIG_PPC_KUEP */ +#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) +void kuep_lock(void); +void kuep_unlock(void); +#else +static inline void kuep_lock(void) { } +static inline void kuep_unlock(void) { } +#endif + #ifdef CONFIG_PPC_KUAP void setup_kuap(bool disabled); #else @@ -66,7 +65,15 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) return false; } -static inline void kuap_check_amr(void) { } +static inline void kuap_assert_locked(void) { } +static inline void kuap_save_and_lock(struct pt_regs *regs) { } +static inline void kuap_user_restore(struct pt_regs *regs) { } +static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } + +static inline unsigned long kuap_get_and_assert_locked(void) +{ + return 0; +} /* * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 2f5f919f6cd3..e6b53c6e21e3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -210,12 +210,12 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid); extern int kvmppc_radix_init(void); extern void kvmppc_radix_exit(void); -extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn); -extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn); -extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn); +extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn); +extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn); +extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn); extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map); extern void kvmppc_radix_flush_memslot(struct kvm *kvm, @@ -258,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map); +extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, + unsigned long lpcr); extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask); extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 05fb00d37609..1e83359f286b 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -56,13 +56,6 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER -extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, - unsigned flags); -extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 8aacd76bb702..5bf8ae9bb2cc 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -281,11 +281,10 @@ struct kvmppc_ops { const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); - int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, - unsigned long end); - int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); - int (*test_age_hva)(struct kvm *kvm, unsigned long hva); - void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); + bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); + bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); + bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); + bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); @@ -767,8 +766,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn); long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu); long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, - unsigned long pte_index, unsigned long avpn, - unsigned long va); + unsigned long pte_index, unsigned long avpn); long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index); long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 652ce85f9410..4bc45d3ed8b0 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, unsigned long start, unsigned long end) { - unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE; + unsigned long vdso_base = (unsigned long)mm->context.vdso; if (start <= vdso_base && vdso_base < end) mm->context.vdso = NULL; diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 17a4a616436f..295ef5639609 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -7,33 +7,41 @@ #ifdef CONFIG_PPC_KUAP -#ifdef __ASSEMBLY__ - -.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3 - lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */ - mfspr \gpr1, SPRN_MD_AP - mtspr SPRN_MD_AP, \gpr2 - stw \gpr1, STACK_REGS_KUAP(\sp) -.endm - -.macro kuap_restore sp, current, gpr1, gpr2, gpr3 - lwz \gpr1, STACK_REGS_KUAP(\sp) - mtspr SPRN_MD_AP, \gpr1 -.endm - -.macro kuap_check current, gpr -#ifdef CONFIG_PPC_KUAP_DEBUG - mfspr \gpr, SPRN_MD_AP - rlwinm \gpr, \gpr, 16, 0xffff -999: twnei \gpr, MD_APG_KUAP@h - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) -#endif -.endm - -#else /* !__ASSEMBLY__ */ +#ifndef __ASSEMBLY__ #include <asm/reg.h> +static inline void kuap_save_and_lock(struct pt_regs *regs) +{ + regs->kuap = mfspr(SPRN_MD_AP); + mtspr(SPRN_MD_AP, MD_APG_KUAP); +} + +static inline void kuap_user_restore(struct pt_regs *regs) +{ +} + +static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +{ + mtspr(SPRN_MD_AP, regs->kuap); +} + +static inline unsigned long kuap_get_and_assert_locked(void) +{ + unsigned long kuap = mfspr(SPRN_MD_AP); + + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) + WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16); + + return kuap; +} + +static inline void kuap_assert_locked(void) +{ + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) + kuap_get_and_assert_locked(); +} + static inline void allow_user_access(void __user *to, const void __user *from, unsigned long size, unsigned long dir) { diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 478249959baa..6e4faa0a9b35 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -172,6 +172,9 @@ #define mmu_linear_psize MMU_PAGE_8M +#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) +#define MODULES_END PAGE_OFFSET + #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 6cb8aa357191..57cd3892bfe0 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -6,6 +6,8 @@ * the ppc64 non-hashed page table. */ +#include <linux/sizes.h> + #include <asm/nohash/64/pgtable-4k.h> #include <asm/barrier.h> #include <asm/asm-const.h> @@ -54,7 +56,8 @@ #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) #define IOREMAP_START (ioremap_bot) -#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) +#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE) +#define FIXADDR_SIZE SZ_32M /* diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 9986ac34b8e2..6ea9001de9a9 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -28,9 +28,6 @@ extern struct device_node *opal_node; /* API functions */ int64_t opal_invalid_call(void); -int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf); -int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr, - uint64_t bdf); int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid, uint64_t lpcr); int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn, @@ -307,7 +304,7 @@ int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data, s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size); s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr); -s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr); +s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr); s64 opal_signal_system_reset(s32 cpu); s64 opal_quiesce(u64 shutdown_type, s32 cpu); diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index d2a2a14e56f9..74424c14515c 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -126,7 +126,6 @@ struct pci_controller { #endif /* CONFIG_PPC64 */ void *private_data; - struct npu *npu; }; /* These are used for config access before all the PCI probing diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 6436f0b41539..d1f53260725c 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -119,11 +119,4 @@ extern void pcibios_scan_phb(struct pci_controller *hose); #endif /* __KERNEL__ */ -extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev); -extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index); -extern int pnv_npu2_init(struct pci_controller *hose); -extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, - unsigned long msr); -extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev); - #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 00e7e671bb4b..f4c3428e816b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -43,7 +43,7 @@ struct power_pmu { u64 alt[]); void (*get_mem_data_src)(union perf_mem_data_src *dsrc, u32 flags, struct pt_regs *regs); - void (*get_mem_weight)(u64 *weight); + void (*get_mem_weight)(u64 *weight, u64 type); unsigned long group_constraint_mask; unsigned long group_constraint_val; u64 (*bhrb_filter_map)(u64 branch_sample_type); @@ -67,6 +67,12 @@ struct power_pmu { * the pmu supports extended perf regs capability */ int capabilities; + /* + * Function to check event code for values which are + * reserved. Function takes struct perf_event as input, + * since event code could be spread in attr.config* + */ + int (*check_attr_config)(struct perf_event *ev); }; /* diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 4eed82172e33..c6a676714f04 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,8 +41,6 @@ struct mm_struct; #ifndef __ASSEMBLY__ -#include <asm/tlbflush.h> - /* Keep these as a macros to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index ed161ef2b3ca..ac41776661e9 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -265,6 +265,7 @@ #define PPC_INST_ORI 0x60000000 #define PPC_INST_ORIS 0x64000000 #define PPC_INST_BRANCH 0x48000000 +#define PPC_INST_BL 0x48000001 #define PPC_INST_BRANCH_COND 0x40800000 /* Prefixes */ @@ -437,6 +438,9 @@ #define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a)) +#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a)) #define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) @@ -445,11 +449,14 @@ #define PPC_RAW_BLR() (PPC_INST_BLR) #define PPC_RAW_BLRL() (0x4e800021) #define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r)) +#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t)) #define PPC_RAW_BCTR() (PPC_INST_BCTR) #define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r)) #define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i) #define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i) #define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc)) @@ -472,6 +479,10 @@ #define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b)) +#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a)) #define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) @@ -484,11 +495,13 @@ #define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a) #define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 3dceb64fc9af..d6739d700f0a 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -16,36 +16,6 @@ #define SZL (BITS_PER_LONG/8) /* - * Stuff for accurate CPU time accounting. - * These macros handle transitions between user and system state - * in exception entry and exit and accumulate time to the - * user_time and system_time fields in the paca. - */ - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) -#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) -#else -#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \ - MFTB(ra); /* get timebase */ \ - PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \ - PPC_STL ra, ACCOUNT_STARTTIME(ptr); \ - subf rb,rb,ra; /* subtract start value */ \ - PPC_LL ra, ACCOUNT_USER_TIME(ptr); \ - add ra,ra,rb; /* add on to user time */ \ - PPC_STL ra, ACCOUNT_USER_TIME(ptr); \ - -#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \ - MFTB(ra); /* get timebase */ \ - PPC_LL rb, ACCOUNT_STARTTIME(ptr); \ - PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \ - subf rb,rb,ra; /* subtract start value */ \ - PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \ - add ra,ra,rb; /* add on to system time */ \ - PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr) -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ - -/* * Macros for storing registers into and loading registers from * exception frames. */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8acc3590c971..7bf8a15af224 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -144,15 +144,12 @@ struct thread_struct { #endif #ifdef CONFIG_PPC32 void *pgdir; /* root of page-table tree */ - unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ #ifdef CONFIG_PPC_RTAS unsigned long rtas_sp; /* stack pointer for when in RTAS */ #endif -#endif #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP) unsigned long kuap; /* opened segments for user access */ #endif -#ifdef CONFIG_VMAP_STACK unsigned long srr0; unsigned long srr1; unsigned long dar; @@ -161,7 +158,7 @@ struct thread_struct { unsigned long r0, r3, r4, r5, r6, r8, r9, r11; unsigned long lr, ctr; #endif -#endif +#endif /* CONFIG_PPC32 */ /* Debug Registers */ struct debug_reg debug; #ifdef CONFIG_PPC_FPU_REGS @@ -282,7 +279,6 @@ struct thread_struct { #ifdef CONFIG_PPC32 #define INIT_THREAD { \ .ksp = INIT_SP, \ - .ksp_limit = INIT_SP_LIMIT, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ SPEFSCR_INIT \ @@ -393,6 +389,7 @@ extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val); extern unsigned long isa206_idle_insn_mayloss(unsigned long type); #ifdef CONFIG_PPC_970_NAP extern void power4_idle_nap(void); +void power4_idle_nap_return(void); #endif extern unsigned long cpuidle_disable; @@ -417,6 +414,8 @@ extern int fix_alignment(struct pt_regs *); #define NET_IP_ALIGN 0 #endif +int do_mathemu(struct pt_regs *regs); + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PROCESSOR_H */ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 1499e928ea6a..9c9ab2746168 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -185,44 +185,27 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) #define current_pt_regs() \ ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1) +/* + * The 4 low bits (0xf) are available as flags to overload the trap word, + * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK + * must cover the bits used as flags, including bit 0 which is used as the + * "norestart" bit. + */ #ifdef __powerpc64__ -#ifdef CONFIG_PPC_BOOK3S -#define TRAP_FLAGS_MASK 0x10 -#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) -#define FULL_REGS(regs) true -#define SET_FULL_REGS(regs) do { } while (0) -#else -#define TRAP_FLAGS_MASK 0x11 -#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) -#define FULL_REGS(regs) (((regs)->trap & 1) == 0) -#define SET_FULL_REGS(regs) ((regs)->trap &= ~1) -#endif -#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs)) -#define NV_REG_POISON 0xdeadbeefdeadbeefUL +#define TRAP_FLAGS_MASK 0x1 #else /* - * We use the least-significant bit of the trap field to indicate - * whether we have saved the full set of registers, or only a - * partial set. A 1 there means the partial set. - * On 4xx we use the next bit to indicate whether the exception + * On 4xx we use bit 1 in the trap word to indicate whether the exception * is a critical exception (1 means it is). */ -#define TRAP_FLAGS_MASK 0x1F -#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) -#define FULL_REGS(regs) (((regs)->trap & 1) == 0) -#define SET_FULL_REGS(regs) ((regs)->trap &= ~1) +#define TRAP_FLAGS_MASK 0xf #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0) #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0) #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0) -#define NV_REG_POISON 0xdeadbeef -#define CHECK_FULL_REGS(regs) \ -do { \ - if ((regs)->trap & 1) \ - printk(KERN_CRIT "%s: partial register set\n", __func__); \ -} while (0) #endif /* __powerpc64__ */ +#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) -static inline void set_trap(struct pt_regs *regs, unsigned long val) +static __always_inline void set_trap(struct pt_regs *regs, unsigned long val) { regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK); } @@ -244,12 +227,12 @@ static inline bool trap_is_syscall(struct pt_regs *regs) static inline bool trap_norestart(struct pt_regs *regs) { - return regs->trap & 0x10; + return regs->trap & 0x1; } -static inline void set_trap_norestart(struct pt_regs *regs) +static __always_inline void set_trap_norestart(struct pt_regs *regs) { - regs->trap |= 0x10; + regs->trap |= 0x1; } #define arch_has_single_step() (1) diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b752d34517b3..07318bc63e3d 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -44,20 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) } #define queued_spin_lock queued_spin_lock -#define smp_mb__after_spinlock() smp_mb() - -static __always_inline int queued_spin_is_locked(struct qspinlock *lock) -{ - /* - * This barrier was added to simple spinlocks by commit 51d7d5205d338, - * but it should now be possible to remove it, asm arm64 has done with - * commit c6f5d02b6a0f. - */ - smp_mb(); - return atomic_read(&lock->val); -} -#define queued_spin_is_locked queued_spin_is_locked - #ifdef CONFIG_PARAVIRT_SPINLOCKS #define SPIN_THRESHOLD (1<<15) /* not tuned */ @@ -86,6 +72,13 @@ static inline void pv_spinlocks_init(void) #endif +/* + * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait, + * which was found to have performance problems if implemented with + * the preferred spin_begin()/spin_end() SMT priority pattern. Use the + * generic version instead. + */ + #include <asm-generic/qspinlock.h> #endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index da103e92c112..7c81d3e563b2 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -124,7 +124,7 @@ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ #else -#define MSR_TM_ACTIVE(x) 0 +#define MSR_TM_ACTIVE(x) ((void)(x), 0) #endif #if defined(CONFIG_PPC_BOOK3S_64) @@ -441,6 +441,7 @@ #define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000) #define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */ #define LPCR_RMLS_SH 26 +#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */ #define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */ #define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */ #define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */ @@ -1393,8 +1394,7 @@ static inline void mtmsr_isync(unsigned long val) : "r" ((unsigned long)(v)) \ : "memory") #endif -#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \ - : : "memory") +#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory") static inline void wrtee(unsigned long val) { diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 658448ca5b8a..9dc97d2f9d27 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -19,8 +19,8 @@ #define RTAS_UNKNOWN_SERVICE (-1) #define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */ -/* Buffer size for ppc_rtas system call. */ -#define RTAS_RMOBUF_MAX (64 * 1024) +/* Memory set aside for sys_rtas to use with calls that need a work area. */ +#define RTAS_USER_REGION_SIZE (64 * 1024) /* RTAS return status codes */ #define RTAS_BUSY -2 /* RTAS Busy */ @@ -357,7 +357,7 @@ extern void rtas_take_timebase(void); static inline int page_is_rtas_user_buf(unsigned long pfn) { unsigned long paddr = (pfn << PAGE_SHIFT); - if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX)) + if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE)) return 1; return 0; } diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 5b862de29dff..552f325412cc 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -38,8 +38,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - smp_mb(); - return !arch_spin_value_unlocked(*lock); + return !arch_spin_value_unlocked(READ_ONCE(*lock)); } /* @@ -282,7 +281,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) #define arch_read_relax(lock) rw_yield(lock) #define arch_write_relax(lock) rw_yield(lock) -/* See include/linux/spinlock.h */ -#define smp_mb__after_spinlock() smp_mb() - #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 7a13bc20f0a0..03b3d010cbab 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -31,6 +31,7 @@ extern u32 *cpu_to_phys_id; extern bool coregroup_enabled; extern int cpu_to_chip_id(int cpu); +extern int *chip_id_lookup_table; #ifdef CONFIG_SMP @@ -121,6 +122,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu) return per_cpu(cpu_sibling_map, cpu); } +static inline struct cpumask *cpu_core_mask(int cpu) +{ + return per_cpu(cpu_core_map, cpu); +} + static inline struct cpumask *cpu_l2_cache_mask(int cpu) { return per_cpu(cpu_l2_cache_map, cpu); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 6ec72282888d..bd75872a6334 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -10,6 +10,9 @@ #include <asm/simple_spinlock.h> #endif +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + #ifndef CONFIG_PARAVIRT_SPINLOCKS static inline void pv_spinlocks_init(void) { } #endif diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 386d576673a1..b4ec6c7dd72e 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -38,7 +38,6 @@ #ifndef __ASSEMBLY__ #include <linux/cache.h> #include <asm/processor.h> -#include <asm/page.h> #include <asm/accounting.h> #define SLB_PRELOAD_NR 16U @@ -152,6 +151,12 @@ void arch_setup_new_exec(void); #ifndef __ASSEMBLY__ +static inline void clear_thread_local_flags(unsigned int flags) +{ + struct thread_info *ti = current_thread_info(); + ti->local_flags &= ~flags; +} + static inline bool test_thread_local_flags(unsigned int flags) { struct thread_info *ti = current_thread_info(); diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 3beeb030cd78..e4db64c0e184 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -126,7 +126,7 @@ static inline int cpu_to_coregroup_id(int cpu) #define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu)) #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) -#define topology_core_cpumask(cpu) (cpu_cpu_mask(cpu)) +#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 78e2a3990eab..a09e4240c5b1 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -43,129 +43,39 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) * exception handling means that it's no longer "just"...) * */ -#define get_user(x, ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr))) -#define put_user(x, ptr) \ - __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) - -#define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) -#define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) - -#define __get_user_allowed(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) - -#define __get_user_inatomic(x, ptr) \ - __get_user_nosleep((x), (ptr), sizeof(*(ptr))) -#define __put_user_inatomic(x, ptr) \ - __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) - -#ifdef CONFIG_PPC64 - -#define ___get_user_instr(gu_op, dest, ptr) \ -({ \ - long __gui_ret = 0; \ - unsigned long __gui_ptr = (unsigned long)ptr; \ - struct ppc_inst __gui_inst; \ - unsigned int __prefix, __suffix; \ - __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \ - if (__gui_ret == 0) { \ - if ((__prefix >> 26) == OP_PREFIX) { \ - __gui_ret = gu_op(__suffix, \ - (unsigned int __user *)__gui_ptr + 1); \ - __gui_inst = ppc_inst_prefix(__prefix, \ - __suffix); \ - } else { \ - __gui_inst = ppc_inst(__prefix); \ - } \ - if (__gui_ret == 0) \ - (dest) = __gui_inst; \ - } \ - __gui_ret; \ -}) - -#define get_user_instr(x, ptr) \ - ___get_user_instr(get_user, x, ptr) - -#define __get_user_instr(x, ptr) \ - ___get_user_instr(__get_user, x, ptr) - -#define __get_user_instr_inatomic(x, ptr) \ - ___get_user_instr(__get_user_inatomic, x, ptr) - -#else /* !CONFIG_PPC64 */ -#define get_user_instr(x, ptr) \ - get_user((x).val, (u32 __user *)(ptr)) - -#define __get_user_instr(x, ptr) \ - __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true) - -#define __get_user_instr_inatomic(x, ptr) \ - __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32)) - -#endif /* CONFIG_PPC64 */ - -extern long __put_user_bad(void); - -#define __put_user_size(x, ptr, size, retval) \ -do { \ - __label__ __pu_failed; \ - \ - retval = 0; \ - allow_write_to_user(ptr, size); \ - __put_user_size_goto(x, ptr, size, __pu_failed); \ - prevent_write_to_user(ptr, size); \ - break; \ - \ -__pu_failed: \ - retval = -EFAULT; \ - prevent_write_to_user(ptr, size); \ -} while (0) - -#define __put_user_nocheck(x, ptr, size) \ +#define __put_user(x, ptr) \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - __typeof__(size) __pu_size = (size); \ + __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \ + __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \ \ - if (!is_kernel_addr((unsigned long)__pu_addr)) \ - might_fault(); \ - __chk_user_ptr(__pu_addr); \ - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + might_fault(); \ + do { \ + __label__ __pu_failed; \ + \ + allow_write_to_user(__pu_addr, __pu_size); \ + __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \ + prevent_write_to_user(__pu_addr, __pu_size); \ + __pu_err = 0; \ + break; \ + \ +__pu_failed: \ + prevent_write_to_user(__pu_addr, __pu_size); \ + __pu_err = -EFAULT; \ + } while (0); \ \ __pu_err; \ }) -#define __put_user_check(x, ptr, size) \ +#define put_user(x, ptr) \ ({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - __typeof__(size) __pu_size = (size); \ - \ - might_fault(); \ - if (access_ok(__pu_addr, __pu_size)) \ - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + __typeof__(*(ptr)) __user *_pu_addr = (ptr); \ \ - __pu_err; \ + access_ok(_pu_addr, sizeof(*(ptr))) ? \ + __put_user(x, _pu_addr) : -EFAULT; \ }) -#define __put_user_nosleep(x, ptr, size) \ -({ \ - long __pu_err; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - __typeof__(size) __pu_size = (size); \ - \ - __chk_user_ptr(__pu_addr); \ - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ - \ - __pu_err; \ -}) - - /* * We don't tell gcc that we are accessing memory, but this is OK * because we do not write to any memory gcc knows about, so there @@ -198,25 +108,17 @@ __pu_failed: \ #define __put_user_size_goto(x, ptr, size, label) \ do { \ + __typeof__(*(ptr)) __user *__pus_addr = (ptr); \ + \ switch (size) { \ - case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \ - case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \ - case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \ - case 8: __put_user_asm2_goto(x, ptr, label); break; \ - default: __put_user_bad(); \ + case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \ + case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \ + case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \ + case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \ + default: BUILD_BUG(); \ } \ } while (0) -#define __unsafe_put_user_goto(x, ptr, size, label) \ -do { \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __chk_user_ptr(ptr); \ - __put_user_size_goto((x), __pu_addr, (size), label); \ -} while (0) - - -extern long __get_user_bad(void); - /* * This does an atomic 128 byte aligned load from userspace. * Upto caller to do enable_kernel_vmx() before calling! @@ -234,6 +136,59 @@ extern long __get_user_bad(void); : "=r" (err) \ : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err)) +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + +#define __get_user_asm_goto(x, addr, label, op) \ + asm_volatile_goto( \ + "1: "op"%U1%X1 %0, %1 # get_user\n" \ + EX_TABLE(1b, %l2) \ + : "=r" (x) \ + : "m"UPD_CONSTR (*addr) \ + : \ + : label) + +#ifdef __powerpc64__ +#define __get_user_asm2_goto(x, addr, label) \ + __get_user_asm_goto(x, addr, label, "ld") +#else /* __powerpc64__ */ +#define __get_user_asm2_goto(x, addr, label) \ + asm_volatile_goto( \ + "1: lwz%X1 %0, %1\n" \ + "2: lwz%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ + EX_TABLE(2b, %l2) \ + : "=r" (x) \ + : "m" (*addr) \ + : \ + : label) +#endif /* __powerpc64__ */ + +#define __get_user_size_goto(x, ptr, size, label) \ +do { \ + BUILD_BUG_ON(size > sizeof(x)); \ + switch (size) { \ + case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \ + case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \ + case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \ + case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \ + default: x = 0; BUILD_BUG(); \ + } \ +} while (0) + +#define __get_user_size_allowed(x, ptr, size, retval) \ +do { \ + __label__ __gus_failed; \ + \ + __get_user_size_goto(x, ptr, size, __gus_failed); \ + retval = 0; \ + break; \ +__gus_failed: \ + x = 0; \ + retval = -EFAULT; \ +} while (0) + +#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + #define __get_user_asm(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op"%U2%X2 %1, %2 # get_user\n" \ @@ -271,25 +226,27 @@ extern long __get_user_bad(void); #define __get_user_size_allowed(x, ptr, size, retval) \ do { \ retval = 0; \ - __chk_user_ptr(ptr); \ - if (size > sizeof(x)) \ - (x) = __get_user_bad(); \ + BUILD_BUG_ON(size > sizeof(x)); \ switch (size) { \ case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ - default: (x) = __get_user_bad(); \ + default: x = 0; BUILD_BUG(); \ } \ } while (0) -#define __get_user_size(x, ptr, size, retval) \ +#define __get_user_size_goto(x, ptr, size, label) \ do { \ - allow_read_from_user(ptr, size); \ - __get_user_size_allowed(x, ptr, size, retval); \ - prevent_read_from_user(ptr, size); \ + long __gus_retval; \ + \ + __get_user_size_allowed(x, ptr, size, __gus_retval); \ + if (__gus_retval) \ + goto label; \ } while (0) +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + /* * This is a type: either unsigned long, if the argument fits into * that type, or otherwise unsigned long long. @@ -297,86 +254,36 @@ do { \ #define __long_type(x) \ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) -#define __get_user_nocheck(x, ptr, size, do_allow) \ +#define __get_user(x, ptr) \ ({ \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __typeof__(size) __gu_size = (size); \ + __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \ \ - __chk_user_ptr(__gu_addr); \ - if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \ - might_fault(); \ - if (do_allow) \ - __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ - else \ - __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ + might_fault(); \ + allow_read_from_user(__gu_addr, __gu_size); \ + __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ + prevent_read_from_user(__gu_addr, __gu_size); \ (x) = (__typeof__(*(ptr)))__gu_val; \ \ __gu_err; \ }) -#define __get_user_check(x, ptr, size) \ +#define get_user(x, ptr) \ ({ \ - long __gu_err = -EFAULT; \ - __long_type(*(ptr)) __gu_val = 0; \ - __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __typeof__(size) __gu_size = (size); \ - \ - might_fault(); \ - if (access_ok(__gu_addr, __gu_size)) \ - __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __typeof__(*(ptr)) __user *_gu_addr = (ptr); \ \ - __gu_err; \ + access_ok(_gu_addr, sizeof(*(ptr))) ? \ + __get_user(x, _gu_addr) : \ + ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \ }) -#define __get_user_nosleep(x, ptr, size) \ -({ \ - long __gu_err; \ - __long_type(*(ptr)) __gu_val; \ - __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __typeof__(size) __gu_size = (size); \ - \ - __chk_user_ptr(__gu_addr); \ - __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ - \ - __gu_err; \ -}) - - /* more complex routines */ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); -#ifdef CONFIG_ARCH_HAS_COPY_MC -unsigned long __must_check -copy_mc_generic(void *to, const void *from, unsigned long size); - -static inline unsigned long __must_check -copy_mc_to_kernel(void *to, const void *from, unsigned long size) -{ - return copy_mc_generic(to, from, size); -} -#define copy_mc_to_kernel copy_mc_to_kernel - -static inline unsigned long __must_check -copy_mc_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(check_copy_size(from, n, true))) { - if (access_ok(to, n)) { - allow_write_to_user(to, n); - n = copy_mc_generic((void *)to, from, n); - prevent_write_to_user(to, n); - } - } - - return n; -} -#endif - #ifdef __powerpc64__ static inline unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) @@ -414,26 +321,51 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long __arch_clear_user(void __user *addr, unsigned long size); -static inline unsigned long clear_user(void __user *addr, unsigned long size) +static inline unsigned long __clear_user(void __user *addr, unsigned long size) { - unsigned long ret = size; + unsigned long ret; + might_fault(); - if (likely(access_ok(addr, size))) { - allow_write_to_user(addr, size); - ret = __arch_clear_user(addr, size); - prevent_write_to_user(addr, size); - } + allow_write_to_user(addr, size); + ret = __arch_clear_user(addr, size); + prevent_write_to_user(addr, size); return ret; } -static inline unsigned long __clear_user(void __user *addr, unsigned long size) +static inline unsigned long clear_user(void __user *addr, unsigned long size) { - return clear_user(addr, size); + return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size; } extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); +#ifdef CONFIG_ARCH_HAS_COPY_MC +unsigned long __must_check +copy_mc_generic(void *to, const void *from, unsigned long size); + +static inline unsigned long __must_check +copy_mc_to_kernel(void *to, const void *from, unsigned long size) +{ + return copy_mc_generic(to, from, size); +} +#define copy_mc_to_kernel copy_mc_to_kernel + +static inline unsigned long __must_check +copy_mc_to_user(void __user *to, const void *from, unsigned long n) +{ + if (likely(check_copy_size(from, n, true))) { + if (access_ok(to, n)) { + allow_write_to_user(to, n); + n = copy_mc_generic((void *)to, from, n); + prevent_write_to_user(to, n); + } + } + + return n; +} +#endif + extern long __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size); extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, @@ -482,10 +414,37 @@ user_write_access_begin(const void __user *ptr, size_t len) #define user_write_access_begin user_write_access_begin #define user_write_access_end prevent_current_write_to_user -#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) -#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) +#define unsafe_get_user(x, p, e) do { \ + __long_type(*(p)) __gu_val; \ + __typeof__(*(p)) __user *__gu_addr = (p); \ + \ + __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \ + (x) = (__typeof__(*(p)))__gu_val; \ +} while (0) + #define unsafe_put_user(x, p, e) \ - __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) + __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) + +#define unsafe_copy_from_user(d, s, l, e) \ +do { \ + u8 *_dst = (u8 *)(d); \ + const u8 __user *_src = (const u8 __user *)(s); \ + size_t _len = (l); \ + int _i; \ + \ + for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ + unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \ + if (_len & 4) { \ + unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \ + _i += 4; \ + } \ + if (_len & 2) { \ + unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \ + _i += 2; \ + } \ + if (_len & 1) \ + unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \ +} while (0) #define unsafe_copy_to_user(d, s, l, e) \ do { \ @@ -494,9 +453,9 @@ do { \ size_t _len = (l); \ int _i; \ \ - for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ - unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \ - if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ + for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ + unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \ + if (_len & 4) { \ unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ _i += 4; \ } \ @@ -511,14 +470,8 @@ do { \ #define HAVE_GET_KERNEL_NOFAULT #define __get_kernel_nofault(dst, src, type, err_label) \ -do { \ - int __kr_err; \ - \ - __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\ - sizeof(type), __kr_err); \ - if (unlikely(__kr_err)) \ - goto err_label; \ -} while (0) + __get_user_size_goto(*((type *)(dst)), \ + (__force type __user *)(src), sizeof(type), err_label) #define __put_kernel_nofault(dst, src, type, err_label) \ __put_user_size_goto(*((type *)(src)), \ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 700fcdac2e3c..b541c690a31c 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -40,6 +40,7 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #ifdef CONFIG_PPC32 #define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_SYS_OLD_SELECT #endif #ifdef CONFIG_PPC64 #define __ARCH_WANT_SYS_TIME diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h index 77c635c2c90d..1faff0be1111 100644 --- a/arch/powerpc/include/asm/vdso/gettimeofday.h +++ b/arch/powerpc/include/asm/vdso/gettimeofday.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H +#include <asm/page.h> + #ifdef __ASSEMBLY__ #include <asm/ppc_asm.h> @@ -154,6 +156,14 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *__arch_get_vdso_data(void); +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) +{ + return (void *)vd + PAGE_SIZE; +} +#endif + static inline bool vdso_clocksource_ok(const struct vdso_data *vd) { return true; diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index 3f958ecf2beb..a585c8e538ff 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -107,9 +107,7 @@ extern struct vdso_arch_data *vdso_data; bcl 20, 31, .+4 999: mflr \ptr -#if CONFIG_PPC_PAGE_SHIFT > 14 addis \ptr, \ptr, (_vdso_datapage - 999b)@ha -#endif addi \ptr, \ptr, (_vdso_datapage - 999b)@l .endm diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 721c0d6715ac..e7479a4abf96 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -114,6 +114,7 @@ struct vio_driver { const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); void (*remove)(struct vio_dev *dev); + void (*shutdown)(struct vio_dev *dev); /* A driver must have a get_desired_dma() function to * be loaded in a CMO environment if it uses DMA. */ diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa161..4c69ece52a31 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,24 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H +#include <asm/mmu.h> +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + /* HPT does not cope with large pages in the vmalloc area */ + return radix_enabled(); +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return radix_enabled(); +} + +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 9a312b975ca8..aa094a8655b0 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -102,6 +102,7 @@ void xive_flush_interrupt(void); /* xmon hook */ void xmon_xive_do_dump(int cpu); int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d); +void xmon_xive_get_irq_all(void); /* APIs used by KVM */ u32 xive_native_default_eq_shift(void); diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h index cc79856896a1..4ba87de32be0 100644 --- a/arch/powerpc/include/uapi/asm/errno.h +++ b/arch/powerpc/include/uapi/asm/errno.h @@ -2,6 +2,7 @@ #ifndef _ASM_POWERPC_ERRNO_H #define _ASM_POWERPC_ERRNO_H +#undef EDEADLOCK #include <asm-generic/errno.h> #undef EDEADLOCK diff --git a/arch/powerpc/include/uapi/asm/posix_types.h b/arch/powerpc/include/uapi/asm/posix_types.h index f698400e4bb0..9c0342312544 100644 --- a/arch/powerpc/include/uapi/asm/posix_types.h +++ b/arch/powerpc/include/uapi/asm/posix_types.h @@ -12,11 +12,6 @@ typedef unsigned long __kernel_old_dev_t; #define __kernel_old_dev_t __kernel_old_dev_t #else -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef long __kernel_ptrdiff_t; -#define __kernel_size_t __kernel_size_t - typedef short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t #endif diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 6084fa499aa3..f66b63e81c3b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE targets += prom_init_check clean-files := vmlinux.lds + +# Force dependency (incbin is bad) +$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg +$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index c7797eb958c7..bbb4181621dd 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = { static int emulate_spe(struct pt_regs *regs, unsigned int reg, struct ppc_inst ppc_instr) { - int ret; union { u64 ll; u32 w[2]; @@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, nb = spe_aligninfo[instr].len; flags = spe_aligninfo[instr].flags; - /* Verify the address of the operand */ - if (unlikely(user_mode(regs) && - !access_ok(addr, nb))) - return -EFAULT; - /* userland only */ if (unlikely(!user_mode(regs))) return 0; @@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, } } else { temp.ll = data.ll = 0; - ret = 0; p = addr; + if (!user_read_access_begin(addr, nb)) + return -EFAULT; + switch (nb) { case 8: - ret |= __get_user_inatomic(temp.v[0], p++); - ret |= __get_user_inatomic(temp.v[1], p++); - ret |= __get_user_inatomic(temp.v[2], p++); - ret |= __get_user_inatomic(temp.v[3], p++); + unsafe_get_user(temp.v[0], p++, Efault_read); + unsafe_get_user(temp.v[1], p++, Efault_read); + unsafe_get_user(temp.v[2], p++, Efault_read); + unsafe_get_user(temp.v[3], p++, Efault_read); fallthrough; case 4: - ret |= __get_user_inatomic(temp.v[4], p++); - ret |= __get_user_inatomic(temp.v[5], p++); + unsafe_get_user(temp.v[4], p++, Efault_read); + unsafe_get_user(temp.v[5], p++, Efault_read); fallthrough; case 2: - ret |= __get_user_inatomic(temp.v[6], p++); - ret |= __get_user_inatomic(temp.v[7], p++); - if (unlikely(ret)) - return -EFAULT; + unsafe_get_user(temp.v[6], p++, Efault_read); + unsafe_get_user(temp.v[7], p++, Efault_read); } + user_read_access_end(); switch (instr) { case EVLDD: @@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, /* Store result to memory or update registers */ if (flags & ST) { - ret = 0; p = addr; + + if (!user_write_access_begin(addr, nb)) + return -EFAULT; + switch (nb) { case 8: - ret |= __put_user_inatomic(data.v[0], p++); - ret |= __put_user_inatomic(data.v[1], p++); - ret |= __put_user_inatomic(data.v[2], p++); - ret |= __put_user_inatomic(data.v[3], p++); + unsafe_put_user(data.v[0], p++, Efault_write); + unsafe_put_user(data.v[1], p++, Efault_write); + unsafe_put_user(data.v[2], p++, Efault_write); + unsafe_put_user(data.v[3], p++, Efault_write); fallthrough; case 4: - ret |= __put_user_inatomic(data.v[4], p++); - ret |= __put_user_inatomic(data.v[5], p++); + unsafe_put_user(data.v[4], p++, Efault_write); + unsafe_put_user(data.v[5], p++, Efault_write); fallthrough; case 2: - ret |= __put_user_inatomic(data.v[6], p++); - ret |= __put_user_inatomic(data.v[7], p++); + unsafe_put_user(data.v[6], p++, Efault_write); + unsafe_put_user(data.v[7], p++, Efault_write); } - if (unlikely(ret)) - return -EFAULT; + user_write_access_end(); } else { *evr = data.w[0]; regs->gpr[reg] = data.w[1]; } return 1; + +Efault_read: + user_read_access_end(); + return -EFAULT; + +Efault_write: + user_write_access_end(); + return -EFAULT; } #endif /* CONFIG_SPE */ @@ -299,13 +304,12 @@ int fix_alignment(struct pt_regs *regs) struct instruction_op op; int r, type; - /* - * We require a complete register set, if not, then our assembly - * is broken - */ - CHECK_FULL_REGS(regs); + if (is_kernel_addr(regs->nip)) + r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip); + else + r = __get_user_instr(instr, (void __user *)regs->nip); - if (unlikely(__get_user_instr(instr, (void __user *)regs->nip))) + if (unlikely(r)) return -EFAULT; if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { /* We don't handle PPC little-endian any more... */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f3a662201a9f..28af4efb4587 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,7 +91,6 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); #else - OFFSET(KSP_LIMIT, thread_struct, ksp_limit); #ifdef CONFIG_PPC_RTAS OFFSET(RTAS_SP, thread_struct, rtas_sp); #endif @@ -132,7 +131,6 @@ int main(void) OFFSET(KSP_VSID, thread_struct, ksp_vsid); #else /* CONFIG_PPC64 */ OFFSET(PGDIR, thread_struct, pgdir); -#ifdef CONFIG_VMAP_STACK OFFSET(SRR0, thread_struct, srr0); OFFSET(SRR1, thread_struct, srr1); OFFSET(DAR, thread_struct, dar); @@ -149,7 +147,6 @@ int main(void) OFFSET(THLR, thread_struct, lr); OFFSET(THCTR, thread_struct, ctr); #endif -#endif #ifdef CONFIG_SPE OFFSET(THREAD_EVR0, thread_struct, evr[0]); OFFSET(THREAD_ACC, thread_struct, acc); @@ -285,21 +282,11 @@ int main(void) OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default); - OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime); - OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user); - OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime); - OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime); #ifdef CONFIG_PPC_BOOK3E OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); #endif OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); #else /* CONFIG_PPC64 */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime); - OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user); - OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime); - OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime); -#endif #endif /* CONFIG_PPC64 */ /* RTAS */ @@ -323,9 +310,6 @@ int main(void) STACK_PT_REGS_OFFSET(GPR11, gpr[11]); STACK_PT_REGS_OFFSET(GPR12, gpr[12]); STACK_PT_REGS_OFFSET(GPR13, gpr[13]); -#ifndef CONFIG_PPC64 - STACK_PT_REGS_OFFSET(GPR14, gpr[14]); -#endif /* CONFIG_PPC64 */ /* * Note: these symbols include _ because they overlap with special * register names @@ -381,7 +365,6 @@ int main(void) DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); - DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); #endif #endif diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index cd60bc1c8701..f24cd53ff26e 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) pa = pte_pfn(*ptep); /* On radix we can do hugepage mappings for io, so handle that */ - if (hugepage_shift) { - pa <<= hugepage_shift; - pa |= token & ((1ul << hugepage_shift) - 1); - } else { - pa <<= PAGE_SHIFT; - pa |= token & (PAGE_SIZE - 1); - } + if (!hugepage_shift) + hugepage_shift = PAGE_SHIFT; + pa <<= PAGE_SHIFT; + pa |= token & ((1ul << hugepage_shift) - 1); return pa; } @@ -779,7 +776,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat default: eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true); return -EINVAL; - }; + } return 0; } @@ -1568,6 +1565,7 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, } EXPORT_SYMBOL_GPL(eeh_pe_inject_err); +#ifdef CONFIG_PROC_FS static int proc_eeh_show(struct seq_file *m, void *v) { if (!eeh_enabled()) { @@ -1594,6 +1592,7 @@ static int proc_eeh_show(struct seq_file *m, void *v) return 0; } +#endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_FS diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 78c430b7f9d9..9160285cb2f4 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -48,195 +48,16 @@ */ .align 12 -#ifdef CONFIG_BOOKE - .globl mcheck_transfer_to_handler -mcheck_transfer_to_handler: - mfspr r0,SPRN_DSRR0 - stw r0,_DSRR0(r11) - mfspr r0,SPRN_DSRR1 - stw r0,_DSRR1(r11) - /* fall through */ -_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler) - - .globl debug_transfer_to_handler -debug_transfer_to_handler: - mfspr r0,SPRN_CSRR0 - stw r0,_CSRR0(r11) - mfspr r0,SPRN_CSRR1 - stw r0,_CSRR1(r11) - /* fall through */ -_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler) - - .globl crit_transfer_to_handler -crit_transfer_to_handler: -#ifdef CONFIG_PPC_BOOK3E_MMU - mfspr r0,SPRN_MAS0 - stw r0,MAS0(r11) - mfspr r0,SPRN_MAS1 - stw r0,MAS1(r11) - mfspr r0,SPRN_MAS2 - stw r0,MAS2(r11) - mfspr r0,SPRN_MAS3 - stw r0,MAS3(r11) - mfspr r0,SPRN_MAS6 - stw r0,MAS6(r11) -#ifdef CONFIG_PHYS_64BIT - mfspr r0,SPRN_MAS7 - stw r0,MAS7(r11) -#endif /* CONFIG_PHYS_64BIT */ -#endif /* CONFIG_PPC_BOOK3E_MMU */ -#ifdef CONFIG_44x - mfspr r0,SPRN_MMUCR - stw r0,MMUCR(r11) -#endif - mfspr r0,SPRN_SRR0 - stw r0,_SRR0(r11) - mfspr r0,SPRN_SRR1 - stw r0,_SRR1(r11) - - /* set the stack limit to the current stack */ - mfspr r8,SPRN_SPRG_THREAD - lwz r0,KSP_LIMIT(r8) - stw r0,SAVED_KSP_LIMIT(r11) - rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) - stw r0,KSP_LIMIT(r8) - /* fall through */ -_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) -#endif - -#ifdef CONFIG_40x - .globl crit_transfer_to_handler -crit_transfer_to_handler: - lwz r0,crit_r10@l(0) - stw r0,GPR10(r11) - lwz r0,crit_r11@l(0) - stw r0,GPR11(r11) - mfspr r0,SPRN_SRR0 - stw r0,crit_srr0@l(0) - mfspr r0,SPRN_SRR1 - stw r0,crit_srr1@l(0) - - /* set the stack limit to the current stack */ - mfspr r8,SPRN_SPRG_THREAD - lwz r0,KSP_LIMIT(r8) - stw r0,saved_ksp_limit@l(0) - rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) - stw r0,KSP_LIMIT(r8) - /* fall through */ -_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) -#endif - -/* - * This code finishes saving the registers to the exception frame - * and jumps to the appropriate handler for the exception, turning - * on address translation. - * Note that we rely on the caller having set cr0.eq iff the exception - * occurred in kernel mode (i.e. MSR:PR = 0). - */ - .globl transfer_to_handler_full -transfer_to_handler_full: - SAVE_NVGPRS(r11) -_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full) - /* fall through */ - - .globl transfer_to_handler -transfer_to_handler: - stw r2,GPR2(r11) - stw r12,_NIP(r11) - stw r9,_MSR(r11) - andi. r2,r9,MSR_PR - mfctr r12 - mfspr r2,SPRN_XER - stw r12,_CTR(r11) - stw r2,_XER(r11) - mfspr r12,SPRN_SPRG_THREAD - tovirt_vmstack r12, r12 - beq 2f /* if from user, fix up THREAD.regs */ - addi r2, r12, -THREAD - addi r11,r1,STACK_FRAME_OVERHEAD - stw r11,PT_REGS(r12) -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - /* Check to see if the dbcr0 register is set up to debug. Use the - internal debug mode bit to do this. */ - lwz r12,THREAD_DBCR0(r12) - andis. r12,r12,DBCR0_IDM@h -#endif - ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) -#ifdef CONFIG_PPC_BOOK3S_32 - kuep_lock r11, r12 -#endif -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l -#ifdef CONFIG_SMP - lwz r9,TASK_CPU(r2) - slwi r9,r9,2 - add r11,r11,r9 -#endif - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 -#endif - - b 3f - -2: /* if from kernel, check interrupted DOZE/NAP mode and - * check for stack overflow - */ - kuap_save_and_lock r11, r12, r9, r2, r6 - addi r2, r12, -THREAD -#ifndef CONFIG_VMAP_STACK - lwz r9,KSP_LIMIT(r12) - cmplw r1,r9 /* if r1 <= ksp_limit */ - ble- stack_ovf /* then the kernel stack overflowed */ -#endif -5: #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) + .globl prepare_transfer_to_handler +prepare_transfer_to_handler: + /* if from kernel, check interrupted DOZE/NAP mode */ lwz r12,TI_LOCAL_FLAGS(r2) mtcrf 0x01,r12 bt- 31-TLF_NAPPING,4f bt- 31-TLF_SLEEPING,7f -#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ - .globl transfer_to_handler_cont -transfer_to_handler_cont: -3: - mflr r9 - tovirt_novmstack r2, r2 /* set r2 to current */ - tovirt_vmstack r9, r9 - lwz r11,0(r9) /* virtual address of handler */ - lwz r9,4(r9) /* where to go when done */ -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * When tracing IRQ state (lockdep) we enable the MMU before we call - * the IRQ tracing functions as they might access vmalloc space or - * perform IOs for console output. - * - * To speed up the syscall path where interrupts stay on, let's check - * first if we are changing the MSR value at all. - */ - tophys_novmstack r12, r1 - lwz r12,_MSR(r12) - andi. r12,r12,MSR_EE - bne 1f - - /* MSR isn't changing, just transition directly */ -#endif - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r10 - mtlr r9 - rfi /* jump to handler, enable MMU */ -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif + blr -#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) 4: rlwinm r12,r12,0,~_TLF_NAPPING stw r12,TI_LOCAL_FLAGS(r2) b power_save_ppc32_restore @@ -246,97 +67,18 @@ transfer_to_handler_cont: lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ rlwinm r9,r9,0,~MSR_EE lwz r12,_LINK(r11) /* and return to address in LR */ - kuap_restore r11, r2, r3, r4, r5 lwz r2, GPR2(r11) b fast_exception_return -#endif -_ASM_NOKPROBE_SYMBOL(transfer_to_handler) -_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) - -#ifdef CONFIG_TRACE_IRQFLAGS -1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to - * keep interrupts disabled at this point otherwise we might risk - * taking an interrupt before we tell lockdep they are enabled. - */ - lis r12,reenable_mmu@h - ori r12,r12,reenable_mmu@l - LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) - mtspr SPRN_SRR0,r12 - mtspr SPRN_SRR1,r0 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif - -reenable_mmu: - /* - * We save a bunch of GPRs, - * r3 can be different from GPR3(r1) at this point, r9 and r11 - * contains the old MSR and handler address respectively, - * r0, r4-r8, r12, CCR, CTR, XER etc... are left - * clobbered as they aren't useful past this point. - */ - - stwu r1,-32(r1) - stw r9,8(r1) - stw r11,12(r1) - stw r3,16(r1) - - /* If we are disabling interrupts (normal case), simply log it with - * lockdep - */ -1: bl trace_hardirqs_off - lwz r3,16(r1) - lwz r11,12(r1) - lwz r9,8(r1) - addi r1,r1,32 - mtctr r11 - mtlr r9 - bctr /* jump to handler */ -#endif /* CONFIG_TRACE_IRQFLAGS */ - -#ifndef CONFIG_VMAP_STACK -/* - * On kernel stack overflow, load up an initial stack pointer - * and call StackOverflow(regs), which should not return. - */ -stack_ovf: - /* sometimes we use a statically-allocated stack, which is OK. */ - lis r12,_end@h - ori r12,r12,_end@l - cmplw r1,r12 - ble 5b /* r1 <= &_end is OK */ - SAVE_NVGPRS(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - lis r9,StackOverflow@ha - addi r9,r9,StackOverflow@l - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR0,r9 - mtspr SPRN_SRR1,r10 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -_ASM_NOKPROBE_SYMBOL(stack_ovf) -#endif +_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) +#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ .globl transfer_to_syscall transfer_to_syscall: SAVE_NVGPRS(r1) -#ifdef CONFIG_PPC_BOOK3S_32 - kuep_lock r11, r12 -#endif /* Calling convention has r9 = orig r0, r10 = regs */ addi r10,r1,STACK_FRAME_OVERHEAD mr r9,r0 - stw r10,THREAD+PT_REGS(r2) bl system_call_exception ret_from_syscall: @@ -349,10 +91,6 @@ ret_from_syscall: cmplwi cr0,r5,0 bne- 2f #endif /* CONFIG_PPC_47x */ -#ifdef CONFIG_PPC_BOOK3S_32 - kuep_unlock r5, r7 -#endif - kuap_check r2, r4 lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 @@ -412,27 +150,6 @@ ret_from_kernel_thread: b ret_from_syscall /* - * Top-level page fault handling. - * This is in assembler because if do_page_fault tells us that - * it is a bad kernel page fault, we want to save the non-volatile - * registers before calling bad_page_fault. - */ - .globl handle_page_fault -handle_page_fault: - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_page_fault - cmpwi r3,0 - beq+ ret_from_except - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - clrrwi r0,r0,1 - stw r0,_TRAP(r1) - mr r4,r3 /* err arg for bad_page_fault */ - addi r3,r1,STACK_FRAME_OVERHEAD - bl __bad_page_fault - b ret_from_except_full - -/* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory @@ -485,7 +202,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ - kuap_check r2, r0 #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all @@ -529,12 +245,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) fast_exception_return: #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) andi. r10,r9,MSR_RI /* check for recoverable interrupt */ - beq 1f /* if not, we've got problems */ + beq 3f /* if not, we've got problems */ #endif 2: REST_4GPRS(3, r11) lwz r10,_CCR(r11) - REST_GPR(1, r11) + REST_2GPRS(1, r11) mtcr r10 lwz r10,_LINK(r11) mtlr r10 @@ -556,257 +272,147 @@ fast_exception_return: #endif _ASM_NOKPROBE_SYMBOL(fast_exception_return) -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) -/* check if the exception happened in a restartable section */ -1: lis r3,exc_exit_restart_end@ha - addi r3,r3,exc_exit_restart_end@l - cmplw r12,r3 - bge 3f - lis r4,exc_exit_restart@ha - addi r4,r4,exc_exit_restart@l - cmplw r12,r4 - blt 3f - lis r3,fee_restarts@ha - tophys(r3,r3) - lwz r5,fee_restarts@l(r3) - addi r5,r5,1 - stw r5,fee_restarts@l(r3) - mr r12,r4 /* restart at exc_exit_restart */ - b 2b - - .section .bss - .align 2 -fee_restarts: - .space 4 - .previous - /* aargh, a nonrecoverable interrupt, panic */ /* aargh, we don't know which trap this is */ 3: li r10,-1 stw r10,_TRAP(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - lis r10,MSR_KERNEL@h - ori r10,r10,MSR_KERNEL@l - bl transfer_to_handler_full - .long unrecoverable_exception - .long ret_from_except -#endif - - .globl ret_from_except_full -ret_from_except_full: - REST_NVGPRS(r1) - /* fall through */ - - .globl ret_from_except -ret_from_except: - /* Hard-disable interrupts so that current_thread_info()->flags - * can't change between when we test it and when we return - * from the interrupt. */ - /* Note: We don't bother telling lockdep about it */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - mtmsr r10 /* disable interrupts */ - - lwz r3,_MSR(r1) /* Returning to user mode? */ - andi. r0,r3,MSR_PR - beq resume_kernel - -user_exc_return: /* r10 contains MSR_KERNEL here */ - /* Check current_thread_info()->flags */ - lwz r9,TI_FLAGS(r2) - andi. r0,r9,_TIF_USER_WORK_MASK - bne do_work + prepare_transfer_to_handler + bl unrecoverable_exception + trap /* should not get here */ -restore_user: -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* Check whether this process has its own DBCR0 value. The internal - debug mode bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IDM@h - bnel- load_dbcr0 -#endif - ACCOUNT_CPU_USER_EXIT(r2, r10, r11) -#ifdef CONFIG_PPC_BOOK3S_32 - kuep_unlock r10, r11 -#endif + .globl interrupt_return +interrupt_return: + lwz r4,_MSR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + andi. r0,r4,MSR_PR + beq .Lkernel_interrupt_return + bl interrupt_exit_user_prepare + cmpwi r3,0 + bne- .Lrestore_nvgprs - b restore +.Lfast_user_interrupt_return: + lwz r11,_NIP(r1) + lwz r12,_MSR(r1) + mtspr SPRN_SRR0,r11 + mtspr SPRN_SRR1,r12 -/* N.B. the only way to get here is from the beq following ret_from_except. */ -resume_kernel: - /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ - lwz r8,TI_FLAGS(r2) - andis. r0,r8,_TIF_EMULATE_STACK_STORE@h - beq+ 1f +BEGIN_FTR_SECTION + stwcx. r0,0,r1 /* to clear the reservation */ +FTR_SECTION_ELSE + lwarx r0,0,r1 +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ + lwz r3,_CCR(r1) + lwz r4,_LINK(r1) + lwz r5,_CTR(r1) + lwz r6,_XER(r1) + li r0,0 - lwz r3,GPR1(r1) - subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ - mr r4,r1 /* src: current exception frame */ - mr r1,r3 /* Reroute the trampoline frame to r1 */ + /* + * Leaving a stale exception_marker on the stack can confuse + * the reliable stack unwinder later on. Clear it. + */ + stw r0,8(r1) + REST_4GPRS(7, r1) + REST_2GPRS(11, r1) - /* Copy from the original to the trampoline. */ - li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */ - li r6,0 /* start offset: 0 */ + mtcr r3 + mtlr r4 mtctr r5 -2: lwzx r0,r6,r4 - stwx r0,r6,r3 - addi r6,r6,4 - bdnz 2b - - /* Do real store operation to complete stwu */ - lwz r5,GPR1(r1) - stw r8,0(r5) + mtspr SPRN_XER,r6 - /* Clear _TIF_EMULATE_STACK_STORE flag */ - lis r11,_TIF_EMULATE_STACK_STORE@h - addi r5,r2,TI_FLAGS -0: lwarx r8,0,r5 - andc r8,r8,r11 - stwcx. r8,0,r5 - bne- 0b -1: - -#ifdef CONFIG_PREEMPTION - /* check current_thread_info->preempt_count */ - lwz r0,TI_PREEMPT(r2) - cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ - bne restore_kuap - andi. r8,r8,_TIF_NEED_RESCHED - beq+ restore_kuap - lwz r3,_MSR(r1) - andi. r0,r3,MSR_EE /* interrupts off? */ - beq restore_kuap /* don't schedule if so */ -#ifdef CONFIG_TRACE_IRQFLAGS - /* Lockdep thinks irqs are enabled, we need to call - * preempt_schedule_irq with IRQs off, so we inform lockdep - * now that we -did- turn them off already - */ - bl trace_hardirqs_off -#endif - bl preempt_schedule_irq -#ifdef CONFIG_TRACE_IRQFLAGS - /* And now, to properly rebalance the above, we tell lockdep they - * are being turned back on, which will happen when we return - */ - bl trace_hardirqs_on + REST_4GPRS(2, r1) + REST_GPR(6, r1) + REST_GPR(0, r1) + REST_GPR(1, r1) + rfi +#ifdef CONFIG_40x + b . /* Prevent prefetch past rfi */ #endif -#endif /* CONFIG_PREEMPTION */ -restore_kuap: - kuap_restore r1, r2, r9, r10, r0 - - /* interrupts are hard-disabled at this point */ -restore: -#if defined(CONFIG_44x) && !defined(CONFIG_PPC_47x) - lis r4,icache_44x_need_flush@ha - lwz r5,icache_44x_need_flush@l(r4) - cmplwi cr0,r5,0 - beq+ 1f - li r6,0 - iccci r0,r0 - stw r6,icache_44x_need_flush@l(r4) -1: -#endif /* CONFIG_44x */ - lwz r9,_MSR(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* Lockdep doesn't know about the fact that IRQs are temporarily turned - * off in this assembly code while peeking at TI_FLAGS() and such. However - * we need to inform it if the exception turned interrupts off, and we - * are about to trun them back on. - */ - andi. r10,r9,MSR_EE - beq 1f - stwu r1,-32(r1) - mflr r0 - stw r0,4(r1) - bl trace_hardirqs_on - addi r1, r1, 32 - lwz r9,_MSR(r1) -1: -#endif /* CONFIG_TRACE_IRQFLAGS */ +.Lrestore_nvgprs: + REST_NVGPRS(r1) + b .Lfast_user_interrupt_return - lwz r0,GPR0(r1) - lwz r2,GPR2(r1) - REST_4GPRS(3, r1) - REST_2GPRS(7, r1) +.Lkernel_interrupt_return: + bl interrupt_exit_kernel_prepare - lwz r10,_XER(r1) - lwz r11,_CTR(r1) - mtspr SPRN_XER,r10 - mtctr r11 +.Lfast_kernel_interrupt_return: + cmpwi cr1,r3,0 + lwz r11,_NIP(r1) + lwz r12,_MSR(r1) + mtspr SPRN_SRR0,r11 + mtspr SPRN_SRR1,r12 BEGIN_FTR_SECTION - lwarx r11,0,r1 -END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) - stwcx. r0,0,r1 /* to clear the reservation */ + stwcx. r0,0,r1 /* to clear the reservation */ +FTR_SECTION_ELSE + lwarx r0,0,r1 +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) - andi. r10,r9,MSR_RI /* check if this exception occurred */ - beql nonrecoverable /* at a bad place (MSR:RI = 0) */ + lwz r3,_LINK(r1) + lwz r4,_CTR(r1) + lwz r5,_XER(r1) + lwz r6,_CCR(r1) + li r0,0 + + REST_4GPRS(7, r1) + REST_2GPRS(11, r1) - lwz r10,_CCR(r1) - lwz r11,_LINK(r1) - mtcrf 0xFF,r10 - mtlr r11 + mtlr r3 + mtctr r4 + mtspr SPRN_XER,r5 - /* Clear the exception_marker on the stack to avoid confusing stacktrace */ - li r10, 0 - stw r10, 8(r1) /* - * Once we put values in SRR0 and SRR1, we are in a state - * where exceptions are not recoverable, since taking an - * exception will trash SRR0 and SRR1. Therefore we clear the - * MSR:RI bit to indicate this. If we do take an exception, - * we can't return to the point of the exception but we - * can restart the exception exit path at the label - * exc_exit_restart below. -- paulus + * Leaving a stale exception_marker on the stack can confuse + * the reliable stack unwinder later on. Clear it. */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) - mtmsr r10 /* clear the RI bit */ - .globl exc_exit_restart -exc_exit_restart: - lwz r12,_NIP(r1) - mtspr SPRN_SRR0,r12 - mtspr SPRN_SRR1,r9 - REST_4GPRS(9, r1) - lwz r1,GPR1(r1) - .globl exc_exit_restart_end -exc_exit_restart_end: + stw r0,8(r1) + + REST_4GPRS(2, r1) + + bne- cr1,1f /* emulate stack store */ + mtcr r6 + REST_GPR(6, r1) + REST_GPR(0, r1) + REST_GPR(1, r1) rfi -_ASM_NOKPROBE_SYMBOL(exc_exit_restart) -_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) +#ifdef CONFIG_40x + b . /* Prevent prefetch past rfi */ +#endif -#else /* !(CONFIG_4xx || CONFIG_BOOKE) */ - /* - * This is a bit different on 4xx/Book-E because it doesn't have - * the RI bit in the MSR. - * The TLB miss handler checks if we have interrupted - * the exception exit path and restarts it if so - * (well maybe one day it will... :). +1: /* + * Emulate stack store with update. New r1 value was already calculated + * and updated in our interrupt regs by emulate_loadstore, but we can't + * store the previous value of r1 to the stack before re-loading our + * registers from it, otherwise they could be clobbered. Use + * SPRG Scratch0 as temporary storage to hold the store + * data, as interrupts are disabled here so it won't be clobbered. */ - lwz r11,_LINK(r1) - mtlr r11 - lwz r10,_CCR(r1) - mtcrf 0xff,r10 - /* Clear the exception_marker on the stack to avoid confusing stacktrace */ - li r10, 0 - stw r10, 8(r1) - REST_2GPRS(9, r1) - .globl exc_exit_restart -exc_exit_restart: - lwz r11,_NIP(r1) - lwz r12,_MSR(r1) - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r12 - REST_2GPRS(11, r1) - lwz r1,GPR1(r1) - .globl exc_exit_restart_end -exc_exit_restart_end: + mtcr r6 +#ifdef CONFIG_BOOKE + mtspr SPRN_SPRG_WSCRATCH0, r9 +#else + mtspr SPRN_SPRG_SCRATCH0, r9 +#endif + addi r9,r1,INT_FRAME_SIZE /* get original r1 */ + REST_GPR(6, r1) + REST_GPR(0, r1) + REST_GPR(1, r1) + stw r9,0(r1) /* perform store component of stwu */ +#ifdef CONFIG_BOOKE + mfspr r9, SPRN_SPRG_RSCRATCH0 +#else + mfspr r9, SPRN_SPRG_SCRATCH0 +#endif rfi - b . /* prevent prefetch past rfi */ -_ASM_NOKPROBE_SYMBOL(exc_exit_restart) +#ifdef CONFIG_40x + b . /* Prevent prefetch past rfi */ +#endif +_ASM_NOKPROBE_SYMBOL(interrupt_return) + +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* * Returning from a critical interrupt in user mode doesn't need @@ -837,8 +443,7 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart) REST_NVGPRS(r1); \ lwz r3,_MSR(r1); \ andi. r3,r3,MSR_PR; \ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \ - bne user_exc_return; \ + bne interrupt_return; \ lwz r0,GPR0(r1); \ lwz r2,GPR2(r1); \ REST_4GPRS(3, r1); \ @@ -906,11 +511,6 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart) #ifdef CONFIG_40x .globl ret_from_crit_exc ret_from_crit_exc: - mfspr r9,SPRN_SPRG_THREAD - lis r10,saved_ksp_limit@ha; - lwz r10,saved_ksp_limit@l(r10); - tovirt(r9,r9); - stw r10,KSP_LIMIT(r9) lis r9,crit_srr0@ha; lwz r9,crit_srr0@l(r9); lis r10,crit_srr1@ha; @@ -924,9 +524,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) #ifdef CONFIG_BOOKE .globl ret_from_crit_exc ret_from_crit_exc: - mfspr r9,SPRN_SPRG_THREAD - lwz r10,SAVED_KSP_LIMIT(r1) - stw r10,KSP_LIMIT(r9) RESTORE_xSRR(SRR0,SRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) @@ -934,9 +531,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) .globl ret_from_debug_exc ret_from_debug_exc: - mfspr r9,SPRN_SPRG_THREAD - lwz r10,SAVED_KSP_LIMIT(r1) - stw r10,KSP_LIMIT(r9) RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(CSRR0,CSRR1); RESTORE_MMU_REGS; @@ -945,9 +539,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) .globl ret_from_mcheck_exc ret_from_mcheck_exc: - mfspr r9,SPRN_SPRG_THREAD - lwz r10,SAVED_KSP_LIMIT(r1) - stw r10,KSP_LIMIT(r9) RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(CSRR0,CSRR1); RESTORE_xSRR(DSRR0,DSRR1); @@ -955,121 +546,8 @@ ret_from_mcheck_exc: RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) #endif /* CONFIG_BOOKE */ - -/* - * Load the DBCR0 value for a task that is being ptraced, - * having first saved away the global DBCR0. Note that r0 - * has the dbcr0 value to set upon entry to this. - */ -load_dbcr0: - mfmsr r10 /* first disable debug exceptions */ - rlwinm r10,r10,0,~MSR_DE - mtmsr r10 - isync - mfspr r10,SPRN_DBCR0 - lis r11,global_dbcr0@ha - addi r11,r11,global_dbcr0@l -#ifdef CONFIG_SMP - lwz r9,TASK_CPU(r2) - slwi r9,r9,2 - add r11,r11,r9 -#endif - stw r10,0(r11) - mtspr SPRN_DBCR0,r0 - li r11,-1 - mtspr SPRN_DBSR,r11 /* clear all pending debug events */ - blr - - .section .bss - .align 4 - .global global_dbcr0 -global_dbcr0: - .space 4*NR_CPUS - .previous #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ -do_work: /* r10 contains MSR_KERNEL here */ - andi. r0,r9,_TIF_NEED_RESCHED - beq do_user_signal - -do_resched: /* r10 contains MSR_KERNEL here */ -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on - mfmsr r10 -#endif - ori r10,r10,MSR_EE - mtmsr r10 /* hard-enable interrupts */ - bl schedule -recheck: - /* Note: And we don't tell it we are disabling them again - * neither. Those disable/enable cycles used to peek at - * TI_FLAGS aren't advertised. - */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - mtmsr r10 /* disable interrupts */ - lwz r9,TI_FLAGS(r2) - andi. r0,r9,_TIF_NEED_RESCHED - bne- do_resched - andi. r0,r9,_TIF_USER_WORK_MASK - beq restore_user -do_user_signal: /* r10 contains MSR_KERNEL here */ - ori r10,r10,MSR_EE - mtmsr r10 /* hard-enable interrupts */ - /* save r13-r31 in the exception frame, if not already done */ - lwz r3,_TRAP(r1) - andi. r0,r3,1 - beq 2f - SAVE_NVGPRS(r1) - rlwinm r3,r3,0,0,30 - stw r3,_TRAP(r1) -2: addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r9 - bl do_notify_resume - REST_NVGPRS(r1) - b recheck - -/* - * We come here when we are at the end of handling an exception - * that occurred at a place where taking an exception will lose - * state information, such as the contents of SRR0 and SRR1. - */ -nonrecoverable: - lis r10,exc_exit_restart_end@ha - addi r10,r10,exc_exit_restart_end@l - cmplw r12,r10 - bge 3f - lis r11,exc_exit_restart@ha - addi r11,r11,exc_exit_restart@l - cmplw r12,r11 - blt 3f - lis r10,ee_restarts@ha - lwz r12,ee_restarts@l(r10) - addi r12,r12,1 - stw r12,ee_restarts@l(r10) - mr r12,r11 /* restart at exc_exit_restart */ - blr -3: /* OK, we can't recover, kill this process */ - lwz r3,_TRAP(r1) - andi. r0,r3,1 - beq 5f - SAVE_NVGPRS(r1) - rlwinm r3,r3,0,0,30 - stw r3,_TRAP(r1) -5: mfspr r2,SPRN_SPRG_THREAD - addi r2,r2,-THREAD - tovirt(r2,r2) /* set back r2 to current */ -4: addi r3,r1,STACK_FRAME_OVERHEAD - bl unrecoverable_exception - /* shouldn't return */ - b 4b -_ASM_NOKPROBE_SYMBOL(nonrecoverable) - - .section .bss - .align 2 -ee_restarts: - .space 4 - .previous - /* * PROM code for specific machines follows. Put it * here so it's easy to add arch-specific sections later. @@ -1088,7 +566,6 @@ _GLOBAL(enter_rtas) lis r6,1f@ha /* physical return address for rtas */ addi r6,r6,1f@l tophys(r6,r6) - tophys_novmstack r7, r1 lwz r8,RTASENTRY(r4) lwz r4,RTASBASE(r4) mfmsr r9 @@ -1097,24 +574,25 @@ _GLOBAL(enter_rtas) mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 - stw r7, THREAD + RTAS_SP(r2) + stw r1, THREAD + RTAS_SP(r2) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 rfi -1: tophys_novmstack r9, r1 -#ifdef CONFIG_VMAP_STACK - li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */ - mtmsr r0 - isync -#endif - lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ - lwz r9,8(r9) /* original msr value */ - addi r1,r1,INT_FRAME_SIZE - li r0,0 - tophys_novmstack r7, r2 - stw r0, THREAD + RTAS_SP(r7) +1: + lis r8, 1f@h + ori r8, r8, 1f@l + LOAD_REG_IMMEDIATE(r9,MSR_KERNEL) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 - rfi /* return to caller */ + rfi /* Reactivate MMU translation */ +1: + lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */ + lwz r9,8(r1) /* original msr value */ + addi r1,r1,INT_FRAME_SIZE + li r0,0 + stw r0, THREAD + RTAS_SP(r2) + mtlr r8 + mtmsr r9 + blr /* return to caller */ _ASM_NOKPROBE_SYMBOL(enter_rtas) #endif /* CONFIG_PPC_RTAS */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 6c4d9e276c4d..03727308d8cc 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -117,13 +117,12 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* - * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which - * would clobber syscall parameters. Also we always enter with IRQs - * enabled and nothing pending. system_call_exception() will call - * trace_hardirqs_off(). - * - * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The - * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED. + * scv enters with MSR[EE]=1 and is immediately considered soft-masked. + * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED, + * and interrupts may be masked and pending already. + * system_call_exception() will call trace_hardirqs_off() which means + * interrupts could already have been blocked before trace_hardirqs_off, + * but this is the best we can do. */ /* Calling convention has r9 = orig r0, r10 = regs */ @@ -288,9 +287,8 @@ END_BTB_FLUSH_SECTION std r11,-16(r10) /* "regshere" marker */ /* - * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which - * would clobber syscall parameters. Also we always enter with IRQs - * enabled and nothing pending. system_call_exception() will call + * We always enter kernel from userspace with irq soft-mask enabled and + * nothing pending. system_call_exception() will call * trace_hardirqs_off(). */ li r11,IRQS_ALL_DISABLED @@ -417,19 +415,6 @@ _GLOBAL(ret_from_kernel_thread) li r3,0 b .Lsyscall_exit -#ifdef CONFIG_PPC_BOOK3E -/* Save non-volatile GPRs, if not already saved. */ -_GLOBAL(save_nvgprs) - ld r11,_TRAP(r1) - andi. r0,r11,1 - beqlr- - SAVE_NVGPRS(r1) - clrrdi r0,r11,1 - std r0,_TRAP(r1) - blr -_ASM_NOKPROBE_SYMBOL(save_nvgprs); -#endif - #ifdef CONFIG_PPC_BOOK3S_64 #define FLUSH_COUNT_CACHE \ @@ -645,7 +630,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) addi r1,r1,SWITCH_FRAME_SIZE blr -#ifdef CONFIG_PPC_BOOK3S /* * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * touched, no exit work created, then this can be used. @@ -657,6 +641,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) kuap_check_amr r3, r4 ld r5,_MSR(r1) andi. r0,r5,MSR_PR +#ifdef CONFIG_PPC_BOOK3S bne .Lfast_user_interrupt_return_amr kuap_kernel_restore r3, r4 andi. r0,r5,MSR_RI @@ -665,6 +650,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) addi r3,r1,STACK_FRAME_OVERHEAD bl unrecoverable_exception b . /* should not get here */ +#else + bne .Lfast_user_interrupt_return + b .Lfast_kernel_interrupt_return +#endif .balign IFETCH_ALIGN_BYTES .globl interrupt_return @@ -678,8 +667,10 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) cmpdi r3,0 bne- .Lrestore_nvgprs +#ifdef CONFIG_PPC_BOOK3S .Lfast_user_interrupt_return_amr: kuap_user_restore r3, r4 +#endif .Lfast_user_interrupt_return: ld r11,_NIP(r1) ld r12,_MSR(r1) @@ -788,7 +779,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) RFI_TO_KERNEL b . /* prevent speculative execution */ -#endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_RTAS /* diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index e8eb9992a270..7c3654b0d0f4 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -63,9 +63,6 @@ ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) special_reg_save: - lbz r9,PACAIRQHAPPENED(r13) - RECONCILE_IRQ_STATE(r3,r4) - /* * We only need (or have stack space) to save this stuff if * we interrupted the kernel. @@ -119,15 +116,11 @@ BEGIN_FTR_SECTION mtspr SPRN_MAS5,r10 mtspr SPRN_MAS8,r10 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) - SPECIAL_EXC_STORE(r9,IRQHAPPENED) - mfspr r10,SPRN_DEAR SPECIAL_EXC_STORE(r10,DEAR) mfspr r10,SPRN_ESR SPECIAL_EXC_STORE(r10,ESR) - lbz r10,PACAIRQSOFTMASK(r13) - SPECIAL_EXC_STORE(r10,SOFTE) ld r10,_NIP(r1) SPECIAL_EXC_STORE(r10,CSRR0) ld r10,_MSR(r1) @@ -139,7 +132,8 @@ ret_from_level_except: ld r3,_MSR(r1) andi. r3,r3,MSR_PR beq 1f - b ret_from_except + REST_NVGPRS(r1) + b interrupt_return 1: LOAD_REG_ADDR(r11,extlb_level_exc) @@ -193,27 +187,6 @@ BEGIN_FTR_SECTION mtspr SPRN_MAS8,r10 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) - lbz r6,PACAIRQSOFTMASK(r13) - ld r5,SOFTE(r1) - - /* Interrupts had better not already be enabled... */ - tweqi r6,IRQS_ENABLED - - andi. r6,r5,IRQS_DISABLED - bne 1f - - TRACE_ENABLE_INTS - stb r5,PACAIRQSOFTMASK(r13) -1: - /* - * Restore PACAIRQHAPPENED rather than setting it based on - * the return MSR[EE], since we could have interrupted - * __check_irq_replay() or other inconsistent transitory - * states that must remain that way. - */ - SPECIAL_EXC_LOAD(r10,IRQHAPPENED) - stb r10,PACAIRQHAPPENED(r13) - SPECIAL_EXC_LOAD(r10,DEAR) mtspr SPRN_DEAR,r10 SPECIAL_EXC_LOAD(r10,ESR) @@ -417,14 +390,15 @@ exc_##n##_common: \ std r6,_LINK(r1); \ std r7,_CTR(r1); \ std r8,_XER(r1); \ - li r3,(n)+1; /* indicate partial regs in trap */ \ + li r3,(n); /* regs.trap vector */ \ std r9,0(r1); /* store stack frame back link */ \ std r10,_CCR(r1); /* store orig CR in stackframe */ \ std r9,GPR1(r1); /* store stack frame back link */ \ std r11,SOFTE(r1); /* and save it to stackframe */ \ std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ std r3,_TRAP(r1); /* set trap number */ \ - std r0,RESULT(r1); /* clear regs->result */ + std r0,RESULT(r1); /* clear regs->result */ \ + SAVE_NVGPRS(r1); #define EXCEPTION_COMMON(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) @@ -435,28 +409,6 @@ exc_##n##_common: \ #define EXCEPTION_COMMON_DBG(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) -/* - * This is meant for exceptions that don't immediately hard-enable. We - * set a bit in paca->irq_happened to ensure that a subsequent call to - * arch_local_irq_restore() will properly hard-enable and avoid the - * fast-path, and then reconcile irq state. - */ -#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4) - -/* - * This is called by exceptions that don't use INTS_DISABLE (that did not - * touch irq indicators in the PACA). This will restore MSR:EE to it's - * previous value - * - * XXX In the long run, we may want to open-code it in order to separate the - * load from the wrtee, thus limiting the latency caused by the dependency - * but at this point, I'll favor code clarity until we have a near to final - * implementation - */ -#define INTS_RESTORE_HARD \ - ld r11,_MSR(r1); \ - wrtee r11; - /* XXX FIXME: Restore r14/r15 when necessary */ #define BAD_STACK_TRAMPOLINE(n) \ exc_##n##_bad_stack: \ @@ -505,12 +457,11 @@ exc_##n##_bad_stack: \ START_EXCEPTION(label); \ NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ EXCEPTION_COMMON(trapnum) \ - INTS_DISABLE; \ ack(r8); \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ - b ret_from_except_lite; + b interrupt_return /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -561,11 +512,10 @@ __end_interrupts: CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x100) - bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl unknown_exception + bl unknown_nmi_exception b ret_from_crit_except /* Machine Check Interrupt */ @@ -573,7 +523,6 @@ __end_interrupts: MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_MC(0x000) - bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD @@ -587,7 +536,6 @@ __end_interrupts: mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR EXCEPTION_COMMON(0x300) - INTS_DISABLE b storage_fault_common /* Instruction Storage Interrupt */ @@ -597,7 +545,6 @@ __end_interrupts: li r15,0 mr r14,r10 EXCEPTION_COMMON(0x400) - INTS_DISABLE b storage_fault_common /* External Input Interrupt */ @@ -619,13 +566,12 @@ __end_interrupts: PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR EXCEPTION_COMMON(0x700) - INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) - bl save_nvgprs bl program_check_exception - b ret_from_except + REST_NVGPRS(r1) + b interrupt_return /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); @@ -637,12 +583,10 @@ __end_interrupts: andi. r0,r12,MSR_PR; beq- 1f bl load_up_fpu - b fast_exception_return -1: INTS_DISABLE - bl save_nvgprs - addi r3,r1,STACK_FRAME_OVERHEAD + b fast_interrupt_return +1: addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception - b ret_from_except + b interrupt_return /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); @@ -656,15 +600,13 @@ BEGIN_FTR_SECTION andi. r0,r12,MSR_PR; beq- 1f bl load_up_altivec - b fast_exception_return + b fast_interrupt_return 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - INTS_DISABLE - bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_exception - b ret_from_except + b interrupt_return /* AltiVec Assist */ START_EXCEPTION(altivec_assist); @@ -672,17 +614,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) BOOKE_INTERRUPT_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) - INTS_DISABLE - bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION bl altivec_assist_exception END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + REST_NVGPRS(r1) #else bl unknown_exception #endif - b ret_from_except + b interrupt_return /* Decrementer Interrupt */ @@ -698,14 +639,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x9f0) - bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_BOOKE_WDT bl WatchdogException #else - bl unknown_exception + bl unknown_nmi_exception #endif b ret_from_crit_except @@ -722,11 +662,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) - INTS_DISABLE - bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception - b ret_from_except + b interrupt_return /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); @@ -792,9 +730,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) - bl save_nvgprs bl DebugException - b ret_from_except + REST_NVGPRS(r1) + b interrupt_return kernel_dbg_exc: b . /* NYI */ @@ -859,24 +797,22 @@ kernel_dbg_exc: */ mfspr r14,SPRN_DBSR EXCEPTION_COMMON_DBG(0xd08) - INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) - bl save_nvgprs bl DebugException - b ret_from_except + REST_NVGPRS(r1) + b interrupt_return START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260) - INTS_DISABLE CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD bl performance_monitor_exception - b ret_from_except_lite + b interrupt_return /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, @@ -887,11 +823,10 @@ kernel_dbg_exc: CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2a0) - bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl unknown_exception + bl unknown_nmi_exception b ret_from_crit_except /* @@ -903,21 +838,18 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD - bl save_nvgprs - INTS_RESTORE_HARD bl unknown_exception - b ret_from_except + b interrupt_return /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2e0) - bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl unknown_exception + bl unknown_nmi_exception b ret_from_crit_except /* Hypervisor call */ @@ -926,10 +858,8 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD - bl save_nvgprs - INTS_RESTORE_HARD bl unknown_exception - b ret_from_except + b interrupt_return /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); @@ -937,10 +867,8 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD - bl save_nvgprs - INTS_RESTORE_HARD bl unknown_exception - b ret_from_except + b interrupt_return /* LRAT Error interrupt */ START_EXCEPTION(lrat_error); @@ -948,10 +876,8 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x340) addi r3,r1,STACK_FRAME_OVERHEAD - bl save_nvgprs - INTS_RESTORE_HARD bl unknown_exception - b ret_from_except + b interrupt_return /* * An interrupt came in while soft-disabled; We mark paca->irq_happened @@ -1011,14 +937,7 @@ storage_fault_common: ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) bl do_page_fault - cmpdi r3,0 - bne- 1f - b ret_from_except_lite -1: bl save_nvgprs - mr r4,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - bl __bad_page_fault - b ret_from_except + b interrupt_return /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it @@ -1030,291 +949,9 @@ alignment_more: addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl save_nvgprs - INTS_RESTORE_HARD bl alignment_exception - b ret_from_except - - .align 7 -_GLOBAL(ret_from_except) - ld r11,_TRAP(r1) - andi. r0,r11,1 - bne ret_from_except_lite REST_NVGPRS(r1) - -_GLOBAL(ret_from_except_lite) - /* - * Disable interrupts so that current_thread_info()->flags - * can't change between when we test it and when we return - * from the interrupt. - */ - wrteei 0 - - ld r9, PACA_THREAD_INFO(r13) - ld r3,_MSR(r1) - ld r10,PACACURRENT(r13) - ld r4,TI_FLAGS(r9) - andi. r3,r3,MSR_PR - beq resume_kernel - lwz r3,(THREAD+THREAD_DBCR0)(r10) - - /* Check current_thread_info()->flags */ - andi. r0,r4,_TIF_USER_WORK_MASK - bne 1f - /* - * Check to see if the dbcr0 register is set up to debug. - * Use the internal debug mode bit to do this. - */ - andis. r0,r3,DBCR0_IDM@h - beq restore - mfmsr r0 - rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ - mtmsr r0 - mtspr SPRN_DBCR0,r3 - li r10, -1 - mtspr SPRN_DBSR,r10 - b restore -1: andi. r0,r4,_TIF_NEED_RESCHED - beq 2f - bl restore_interrupts - SCHEDULE_USER - b ret_from_except_lite -2: - bl save_nvgprs - /* - * Use a non volatile GPR to save and restore our thread_info flags - * across the call to restore_interrupts. - */ - mr r30,r4 - bl restore_interrupts - mr r4,r30 - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_notify_resume - b ret_from_except - -resume_kernel: - /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ - andis. r8,r4,_TIF_EMULATE_STACK_STORE@h - beq+ 1f - - addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ - - ld r3,GPR1(r1) - subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ - mr r4,r1 /* src: current exception frame */ - mr r1,r3 /* Reroute the trampoline frame to r1 */ - - /* Copy from the original to the trampoline. */ - li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ - li r6,0 /* start offset: 0 */ - mtctr r5 -2: ldx r0,r6,r4 - stdx r0,r6,r3 - addi r6,r6,8 - bdnz 2b - - /* Do real store operation to complete stdu */ - ld r5,GPR1(r1) - std r8,0(r5) - - /* Clear _TIF_EMULATE_STACK_STORE flag */ - lis r11,_TIF_EMULATE_STACK_STORE@h - addi r5,r9,TI_FLAGS -0: ldarx r4,0,r5 - andc r4,r4,r11 - stdcx. r4,0,r5 - bne- 0b -1: - -#ifdef CONFIG_PREEMPT - /* Check if we need to preempt */ - andi. r0,r4,_TIF_NEED_RESCHED - beq+ restore - /* Check that preempt_count() == 0 and interrupts are enabled */ - lwz r8,TI_PREEMPT(r9) - cmpwi cr0,r8,0 - bne restore - ld r0,SOFTE(r1) - andi. r0,r0,IRQS_DISABLED - bne restore - - /* - * Here we are preempting the current task. We want to make - * sure we are soft-disabled first and reconcile irq state. - */ - RECONCILE_IRQ_STATE(r3,r4) - bl preempt_schedule_irq - - /* - * arch_local_irq_restore() from preempt_schedule_irq above may - * enable hard interrupt but we really should disable interrupts - * when we return from the interrupt, and so that we don't get - * interrupted after loading SRR0/1. - */ - wrteei 0 -#endif /* CONFIG_PREEMPT */ - -restore: - /* - * This is the main kernel exit path. First we check if we - * are about to re-enable interrupts - */ - ld r5,SOFTE(r1) - lbz r6,PACAIRQSOFTMASK(r13) - andi. r5,r5,IRQS_DISABLED - bne .Lrestore_irq_off - - /* We are enabling, were we already enabled ? Yes, just return */ - andi. r6,r6,IRQS_DISABLED - beq cr0,fast_exception_return - - /* - * We are about to soft-enable interrupts (we are hard disabled - * at this point). We check if there's anything that needs to - * be replayed first. - */ - lbz r0,PACAIRQHAPPENED(r13) - cmpwi cr0,r0,0 - bne- .Lrestore_check_irq_replay - - /* - * Get here when nothing happened while soft-disabled, just - * soft-enable and move-on. We will hard-enable as a side - * effect of rfi - */ -.Lrestore_no_replay: - TRACE_ENABLE_INTS - li r0,IRQS_ENABLED - stb r0,PACAIRQSOFTMASK(r13); - -/* This is the return from load_up_fpu fast path which could do with - * less GPR restores in fact, but for now we have a single return path - */ -fast_exception_return: - wrteei 0 -1: mr r0,r13 - ld r10,_MSR(r1) - REST_4GPRS(2, r1) - andi. r6,r10,MSR_PR - REST_2GPRS(6, r1) - beq 1f - ACCOUNT_CPU_USER_EXIT(r13, r10, r11) - ld r0,GPR13(r1) - -1: stdcx. r0,0,r1 /* to clear the reservation */ - - ld r8,_CCR(r1) - ld r9,_LINK(r1) - ld r10,_CTR(r1) - ld r11,_XER(r1) - mtcr r8 - mtlr r9 - mtctr r10 - mtxer r11 - REST_2GPRS(8, r1) - ld r10,GPR10(r1) - ld r11,GPR11(r1) - ld r12,GPR12(r1) - mtspr SPRN_SPRG_GEN_SCRATCH,r0 - - std r10,PACA_EXGEN+EX_R10(r13); - std r11,PACA_EXGEN+EX_R11(r13); - ld r10,_NIP(r1) - ld r11,_MSR(r1) - ld r0,GPR0(r1) - ld r1,GPR1(r1) - mtspr SPRN_SRR0,r10 - mtspr SPRN_SRR1,r11 - ld r10,PACA_EXGEN+EX_R10(r13) - ld r11,PACA_EXGEN+EX_R11(r13) - mfspr r13,SPRN_SPRG_GEN_SCRATCH - rfi - - /* - * We are returning to a context with interrupts soft disabled. - * - * However, we may also about to hard enable, so we need to - * make sure that in this case, we also clear PACA_IRQ_HARD_DIS - * or that bit can get out of sync and bad things will happen - */ -.Lrestore_irq_off: - ld r3,_MSR(r1) - lbz r7,PACAIRQHAPPENED(r13) - andi. r0,r3,MSR_EE - beq 1f - rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS - stb r7,PACAIRQHAPPENED(r13) -1: -#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) - /* The interrupt should not have soft enabled. */ - lbz r7,PACAIRQSOFTMASK(r13) -1: tdeqi r7,IRQS_ENABLED - EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING -#endif - b fast_exception_return - - /* - * Something did happen, check if a re-emit is needed - * (this also clears paca->irq_happened) - */ -.Lrestore_check_irq_replay: - /* XXX: We could implement a fast path here where we check - * for irq_happened being just 0x01, in which case we can - * clear it and return. That means that we would potentially - * miss a decrementer having wrapped all the way around. - * - * Still, this might be useful for things like hash_page - */ - bl __check_irq_replay - cmpwi cr0,r3,0 - beq .Lrestore_no_replay - - /* - * We need to re-emit an interrupt. We do so by re-using our - * existing exception frame. We first change the trap value, - * but we need to ensure we preserve the low nibble of it - */ - ld r4,_TRAP(r1) - clrldi r4,r4,60 - or r4,r4,r3 - std r4,_TRAP(r1) - - /* - * PACA_IRQ_HARD_DIS won't always be set here, so set it now - * to reconcile the IRQ state. Tracing is already accounted for. - */ - lbz r4,PACAIRQHAPPENED(r13) - ori r4,r4,PACA_IRQ_HARD_DIS - stb r4,PACAIRQHAPPENED(r13) - - /* - * Then find the right handler and call it. Interrupts are - * still soft-disabled and we keep them that way. - */ - cmpwi cr0,r3,0x500 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl do_IRQ - b ret_from_except -1: cmpwi cr0,r3,0x900 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl timer_interrupt - b ret_from_except -#ifdef CONFIG_PPC_DOORBELL -1: - cmpwi cr0,r3,0x280 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl doorbell_exception -#endif /* CONFIG_PPC_DOORBELL */ -1: b ret_from_except /* What else to do here ? */ - -_ASM_NOKPROBE_SYMBOL(ret_from_except); -_ASM_NOKPROBE_SYMBOL(ret_from_except_lite); -_ASM_NOKPROBE_SYMBOL(resume_kernel); -_ASM_NOKPROBE_SYMBOL(restore); -_ASM_NOKPROBE_SYMBOL(fast_exception_return); + b interrupt_return /* * Trampolines used when spotting a bad kernel stack pointer in diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8082b690e874..fa8e52a0239e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -693,25 +693,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .endm /* - * When the idle code in power4_idle puts the CPU into NAP mode, - * it has to do so in a loop, and relies on the external interrupt - * and decrementer interrupt entry code to get it out of the loop. - * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags - * to signal that it is in the loop and needs help to get out. - */ -#ifdef CONFIG_PPC_970_NAP -#define FINISH_NAP \ -BEGIN_FTR_SECTION \ - ld r11, PACA_THREAD_INFO(r13); \ - ld r9,TI_LOCAL_FLAGS(r11); \ - andi. r10,r9,_TLF_NAPPING; \ - bnel power4_fixup_nap; \ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) -#else -#define FINISH_NAP -#endif - -/* * There are a few constraints to be concerned with. * - Real mode exceptions code/data must be located at their physical location. * - Virtual mode exceptions must be mapped at their 0xc000... location. @@ -1248,7 +1229,6 @@ EXC_COMMON_BEGIN(machine_check_common) */ GEN_COMMON machine_check - FINISH_NAP /* Enable MSR_RI when finished with PACA_EXMC */ li r10,MSR_RI mtmsrd r10,1 @@ -1571,7 +1551,6 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) EXC_COMMON_BEGIN(hardware_interrupt_common) GEN_COMMON hardware_interrupt - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -1801,7 +1780,6 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) EXC_VIRT_END(decrementer, 0x4900, 0x80) EXC_COMMON_BEGIN(decrementer_common) GEN_COMMON decrementer - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD bl timer_interrupt b interrupt_return @@ -1886,7 +1864,6 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) EXC_COMMON_BEGIN(doorbell_super_common) GEN_COMMON doorbell_super - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception @@ -2237,7 +2214,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) EXC_COMMON_BEGIN(hmi_exception_common) GEN_COMMON hmi_exception - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD bl handle_hmi_exception b interrupt_return @@ -2266,7 +2242,6 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) EXC_COMMON_BEGIN(h_doorbell_common) GEN_COMMON h_doorbell - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception @@ -2299,7 +2274,6 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) EXC_COMMON_BEGIN(h_virt_irq_common) GEN_COMMON h_virt_irq - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -2345,7 +2319,6 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) EXC_COMMON_BEGIN(performance_monitor_common) GEN_COMMON performance_monitor - FINISH_NAP addi r3,r1,STACK_FRAME_OVERHEAD bl performance_monitor_exception b interrupt_return @@ -2530,8 +2503,6 @@ EXC_VIRT_NONE(0x5100, 0x100) INT_DEFINE_BEGIN(cbe_system_error) IVEC=0x1200 IHSRR=1 - IKVM_SKIP=1 - IKVM_REAL=1 INT_DEFINE_END(cbe_system_error) EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) @@ -2551,11 +2522,16 @@ EXC_REAL_NONE(0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) #endif - +/** + * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. + * This has been removed from the ISA before 2.01, which is the earliest + * 64-bit BookS ISA supported, however the G5 / 970 implements this + * interrupt with a non-architected feature available through the support + * processor interface. + */ INT_DEFINE_BEGIN(instruction_breakpoint) IVEC=0x1300 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE - IKVM_SKIP=1 IKVM_REAL=1 #endif INT_DEFINE_END(instruction_breakpoint) @@ -2701,8 +2677,6 @@ EXC_COMMON_BEGIN(denorm_exception_common) INT_DEFINE_BEGIN(cbe_maintenance) IVEC=0x1600 IHSRR=1 - IKVM_SKIP=1 - IKVM_REAL=1 INT_DEFINE_END(cbe_maintenance) EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) @@ -2754,8 +2728,6 @@ EXC_COMMON_BEGIN(altivec_assist_common) INT_DEFINE_BEGIN(cbe_thermal) IVEC=0x1800 IHSRR=1 - IKVM_SKIP=1 - IKVM_REAL=1 INT_DEFINE_END(cbe_thermal) EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) @@ -3096,24 +3068,6 @@ USE_FIXED_SECTION(virt_trampolines) __end_interrupts: DEFINE_FIXED_SYMBOL(__end_interrupts) -#ifdef CONFIG_PPC_970_NAP - /* - * Called by exception entry code if _TLF_NAPPING was set, this clears - * the NAPPING flag, and redirects the exception exit to - * power4_fixup_nap_return. - */ - .globl power4_fixup_nap -EXC_COMMON_BEGIN(power4_fixup_nap) - andc r9,r9,r10 - std r9,TI_LOCAL_FLAGS(r11) - LOAD_REG_ADDR(r10, power4_idle_nap_return) - std r10,_NIP(r1) - blr - -power4_idle_nap_return: - blr -#endif - CLOSE_FIXED_SECTION(real_vectors); CLOSE_FIXED_SECTION(real_trampolines); CLOSE_FIXED_SECTION(virt_vectors); diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 8482739d42f3..b990075285f5 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -31,6 +31,7 @@ #include <asm/fadump.h> #include <asm/fadump-internal.h> #include <asm/setup.h> +#include <asm/interrupt.h> /* * The CPU who acquired the lock to trigger the fadump crash should @@ -44,22 +45,21 @@ static struct fw_dump fw_dump; static void __init fadump_reserve_crash_area(u64 base); -struct kobject *fadump_kobj; - #ifndef CONFIG_PRESERVE_FA_DUMP +static struct kobject *fadump_kobj; + static atomic_t cpus_in_fadump; static DEFINE_MUTEX(fadump_mutex); -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; +static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ sizeof(struct fadump_memory_range)) static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; -struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, - RESERVED_RNGS_SZ, 0, - RESERVED_RNGS_CNT, true }; +static struct fadump_mrange_info +reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true }; static void __init early_init_dt_scan_reserved_ranges(unsigned long node); @@ -79,7 +79,7 @@ static struct cma *fadump_cma; * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump. */ -int __init fadump_cma_init(void) +static int __init fadump_cma_init(void) { unsigned long long base, size; int rc; @@ -292,7 +292,7 @@ static void fadump_show_config(void) * that is required for a kernel to boot successfully. * */ -static inline u64 fadump_calculate_reserve_size(void) +static __init u64 fadump_calculate_reserve_size(void) { u64 base, size, bootmem_min; int ret; @@ -728,7 +728,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) * If we came in via system reset, wait a while for the secondary * CPUs to enter. */ - if (TRAP(&(fdh->regs)) == 0x100) { + if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) { msecs = CRASH_TIMEOUT; while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0)) mdelay(1); diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 3ff9a8fafa46..2c57ece6671c 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -92,9 +92,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) /* enable use of FP after return */ #ifdef CONFIG_PPC32 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ -#ifdef CONFIG_VMAP_STACK tovirt(r5, r5) -#endif lwz r4,THREAD_FPEXC_MODE(r5) ori r9,r9,MSR_FP /* enable FP for current */ or r9,r9,r4 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 5d4706c14572..a8221ddcbd66 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -10,36 +10,39 @@ * We assume sprg3 has the physical address of the current * task's thread_struct. */ -.macro EXCEPTION_PROLOG handle_dar_dsisr=0 +.macro EXCEPTION_PROLOG trapno name handle_dar_dsisr=0 EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr EXCEPTION_PROLOG_1 - EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr + EXCEPTION_PROLOG_2 \trapno \name handle_dar_dsisr=\handle_dar_dsisr .endm .macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0 mtspr SPRN_SPRG_SCRATCH0,r10 mtspr SPRN_SPRG_SCRATCH1,r11 -#ifdef CONFIG_VMAP_STACK mfspr r10, SPRN_SPRG_THREAD .if \handle_dar_dsisr +#ifdef CONFIG_40x + mfspr r11, SPRN_DEAR +#else mfspr r11, SPRN_DAR +#endif stw r11, DAR(r10) +#ifdef CONFIG_40x + mfspr r11, SPRN_ESR +#else mfspr r11, SPRN_DSISR +#endif stw r11, DSISR(r10) .endif mfspr r11, SPRN_SRR0 stw r11, SRR0(r10) -#endif mfspr r11, SPRN_SRR1 /* check whether user or kernel */ -#ifdef CONFIG_VMAP_STACK stw r11, SRR1(r10) -#endif mfcr r10 andi. r11, r11, MSR_PR .endm -.macro EXCEPTION_PROLOG_1 for_rtas=0 -#ifdef CONFIG_VMAP_STACK +.macro EXCEPTION_PROLOG_1 mtspr SPRN_SPRG_SCRATCH2,r1 subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */ beq 1f @@ -47,32 +50,33 @@ lwz r1,TASK_STACK-THREAD(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE 1: +#ifdef CONFIG_VMAP_STACK mtcrf 0x3f, r1 - bt 32 - THREAD_ALIGN_SHIFT, stack_overflow -#else - subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ - beq 1f - mfspr r11,SPRN_SPRG_THREAD - lwz r11,TASK_STACK-THREAD(r11) - addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE -1: tophys(r11, r11) + bt 32 - THREAD_ALIGN_SHIFT, vmap_stack_overflow #endif .endm -.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0 -#ifdef CONFIG_VMAP_STACK - li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r11 - isync +.macro EXCEPTION_PROLOG_2 trapno name handle_dar_dsisr=0 +#ifdef CONFIG_PPC_8xx + .if \handle_dar_dsisr + li r11, RPN_PATTERN + mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */ + .endif +#endif + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */ + mtspr SPRN_SRR1, r11 + lis r11, 1f@h + ori r11, r11, 1f@l + mtspr SPRN_SRR0, r11 mfspr r11, SPRN_SPRG_SCRATCH2 + rfi + + .text +\name\()_virt: +1: stw r11,GPR1(r1) stw r11,0(r1) mr r11, r1 -#else - stw r1,GPR1(r11) - stw r1,0(r11) - tovirt(r1, r11) /* set new kernel sp */ -#endif stw r10,_CCR(r11) /* save registers */ stw r12,GPR12(r11) stw r9,GPR9(r11) @@ -82,7 +86,6 @@ stw r12,GPR11(r11) mflr r10 stw r10,_LINK(r11) -#ifdef CONFIG_VMAP_STACK mfspr r12, SPRN_SPRG_THREAD tovirt(r12, r12) .if \handle_dar_dsisr @@ -93,26 +96,48 @@ .endif lwz r9, SRR1(r12) lwz r12, SRR0(r12) -#else - mfspr r12,SPRN_SRR0 - mfspr r9,SPRN_SRR1 -#endif #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ +#elif defined(CONFIG_PPC_8xx) + mtspr SPRN_EID, r2 /* Set MSR_RI */ #else -#ifdef CONFIG_VMAP_STACK - li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */ -#else - li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */ -#endif + li r10, MSR_KERNEL /* can take exceptions */ mtmsr r10 /* (except for mach check in rtas) */ #endif - stw r0,GPR0(r11) + COMMON_EXCEPTION_PROLOG_END \trapno +_ASM_NOKPROBE_SYMBOL(\name\()_virt) +.endm + +.macro COMMON_EXCEPTION_PROLOG_END trapno + stw r0,GPR0(r1) lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ addi r10,r10,STACK_FRAME_REGS_MARKER@l - stw r10,8(r11) - SAVE_4GPRS(3, r11) - SAVE_2GPRS(7, r11) + stw r10,8(r1) + li r10, \trapno + stw r10,_TRAP(r1) + SAVE_4GPRS(3, r1) + SAVE_2GPRS(7, r1) + SAVE_NVGPRS(r1) + stw r2,GPR2(r1) + stw r12,_NIP(r1) + stw r9,_MSR(r1) + mfctr r10 + mfspr r2,SPRN_SPRG_THREAD + stw r10,_CTR(r1) + tovirt(r2, r2) + mfspr r10,SPRN_XER + addi r2, r2, -THREAD + stw r10,_XER(r1) + addi r3,r1,STACK_FRAME_OVERHEAD +.endm + +.macro prepare_transfer_to_handler +#ifdef CONFIG_PPC_BOOK3S_32 + andi. r12,r9,MSR_PR + bne 777f + bl prepare_transfer_to_handler +777: +#endif .endm .macro SYSCALL_ENTRY trapno @@ -156,54 +181,6 @@ b transfer_to_syscall /* jump to handler */ .endm -.macro save_dar_dsisr_on_stack reg1, reg2, sp -#ifndef CONFIG_VMAP_STACK - mfspr \reg1, SPRN_DAR - mfspr \reg2, SPRN_DSISR - stw \reg1, _DAR(\sp) - stw \reg2, _DSISR(\sp) -#endif -.endm - -.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp -#ifdef CONFIG_VMAP_STACK - lwz \reg1, _DAR(\sp) - lwz \reg2, _DSISR(\sp) -#else - save_dar_dsisr_on_stack \reg1, \reg2, \sp -#endif -.endm - -.macro tovirt_vmstack dst, src -#ifdef CONFIG_VMAP_STACK - tovirt(\dst, \src) -#else - .ifnc \dst, \src - mr \dst, \src - .endif -#endif -.endm - -.macro tovirt_novmstack dst, src -#ifndef CONFIG_VMAP_STACK - tovirt(\dst, \src) -#else - .ifnc \dst, \src - mr \dst, \src - .endif -#endif -.endm - -.macro tophys_novmstack dst, src -#ifndef CONFIG_VMAP_STACK - tophys(\dst, \src) -#else - .ifnc \dst, \src - mr \dst, \src - .endif -#endif -.endm - /* * Note: code which follows this uses cr0.eq (set if from kernel), * r11, r12 (SRR0), and r9 (SRR1). @@ -217,41 +194,29 @@ */ #ifdef CONFIG_PPC_BOOK3S #define START_EXCEPTION(n, label) \ + __HEAD; \ . = n; \ DO_KVM n; \ label: #else #define START_EXCEPTION(n, label) \ + __HEAD; \ . = n; \ label: #endif -#define EXCEPTION(n, label, hdlr, xfer) \ +#define EXCEPTION(n, label, hdlr) \ START_EXCEPTION(n, label) \ - EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \ - li r10,trap; \ - stw r10,_TRAP(r11); \ - LOAD_REG_IMMEDIATE(r10, msr); \ - bl tfer; \ - .long hdlr; \ - .long ret - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \ - ret_from_except) + EXCEPTION_PROLOG n label; \ + prepare_transfer_to_handler; \ + bl hdlr; \ + b interrupt_return .macro vmap_stack_overflow_exception -#ifdef CONFIG_VMAP_STACK + __HEAD +vmap_stack_overflow: #ifdef CONFIG_SMP mfspr r1, SPRN_SPRG_THREAD lwz r1, TASK_CPU - THREAD(r1) @@ -261,16 +226,11 @@ label: lis r1, emergency_ctx@ha #endif lwz r1, emergency_ctx@l(r1) - cmpwi cr1, r1, 0 - bne cr1, 1f - lis r1, init_thread_union@ha - addi r1, r1, init_thread_union@l -1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE - EXCEPTION_PROLOG_2 - SAVE_NVGPRS(r11) - addi r3, r1, STACK_FRAME_OVERHEAD - EXC_XFER_STD(0, stack_overflow_exception) -#endif + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE + EXCEPTION_PROLOG_2 0 vmap_stack_overflow + prepare_transfer_to_handler + bl stack_overflow_exception + b interrupt_return .endm #endif /* __HEAD_32_H__ */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 24724a7dad49..e1360b88b6cb 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -89,7 +89,11 @@ _ENTRY(crit_srr0) .space 4 _ENTRY(crit_srr1) .space 4 -_ENTRY(saved_ksp_limit) +_ENTRY(crit_r1) + .space 4 +_ENTRY(crit_dear) + .space 4 +_ENTRY(crit_esr) .space 4 /* @@ -100,42 +104,62 @@ _ENTRY(saved_ksp_limit) * Instead we use a couple of words of memory at low physical addresses. * This is OK since we don't support SMP on these processors. */ -#define CRITICAL_EXCEPTION_PROLOG \ - stw r10,crit_r10@l(0); /* save two registers to work with */\ - stw r11,crit_r11@l(0); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - lis r11,critirq_ctx@ha; \ - tophys(r11,r11); \ - lwz r11,critirq_ctx@l(r11); \ - beq 1f; \ - /* COMING FROM USER MODE */ \ - mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ - lwz r11,TASK_STACK-THREAD(r11); /* this thread's kernel stack */\ -1: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\ - tophys(r11,r11); \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ - stw r12,_DEAR(r11); /* since they may have had stuff */\ - mfspr r9,SPRN_ESR; /* in them at the point where the */\ - stw r9,_ESR(r11); /* exception was taken */\ - mfspr r12,SPRN_SRR2; \ - stw r1,GPR1(r11); \ - mfspr r9,SPRN_SRR3; \ - stw r1,0(r11); \ - tovirt(r1,r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */\ - addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ - stw r10, 8(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) +.macro CRITICAL_EXCEPTION_PROLOG trapno name + stw r10,crit_r10@l(0) /* save two registers to work with */ + stw r11,crit_r11@l(0) + mfspr r10,SPRN_SRR0 + mfspr r11,SPRN_SRR1 + stw r10,crit_srr0@l(0) + stw r11,crit_srr1@l(0) + mfspr r10,SPRN_DEAR + mfspr r11,SPRN_ESR + stw r10,crit_dear@l(0) + stw r11,crit_esr@l(0) + mfcr r10 /* save CR in r10 for now */ + mfspr r11,SPRN_SRR3 /* check whether user or kernel */ + andi. r11,r11,MSR_PR + lis r11,(critirq_ctx-PAGE_OFFSET)@ha + lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11) + beq 1f + /* COMING FROM USER MODE */ + mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */ + lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */ +1: stw r1,crit_r1@l(0) + addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */ + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */ + mtspr SPRN_SRR1, r11 + lis r11, 1f@h + ori r11, r11, 1f@l + mtspr SPRN_SRR0, r11 + rfi + + .text +1: +\name\()_virt: + lwz r11,crit_r1@l(0) + stw r11,GPR1(r1) + stw r11,0(r1) + mr r11,r1 + stw r10,_CCR(r11) /* save various registers */ + stw r12,GPR12(r11) + stw r9,GPR9(r11) + mflr r10 + stw r10,_LINK(r11) + lis r9,PAGE_OFFSET@ha + lwz r10,crit_r10@l(r9) + lwz r12,crit_r11@l(r9) + stw r10,GPR10(r11) + stw r12,GPR11(r11) + lwz r12,crit_dear@l(r9) + lwz r9,crit_esr@l(r9) + stw r12,_DEAR(r11) /* since they may have had stuff */ + stw r9,_ESR(r11) /* exception was taken */ + mfspr r12,SPRN_SRR2 + mfspr r9,SPRN_SRR3 + rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ + COMMON_EXCEPTION_PROLOG_END \trapno + 2 +_ASM_NOKPROBE_SYMBOL(\name\()_virt) +.endm /* * State at this point: @@ -155,10 +179,10 @@ _ENTRY(saved_ksp_limit) */ #define CRITICAL_EXCEPTION(n, label, hdlr) \ START_EXCEPTION(n, label); \ - CRITICAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - crit_transfer_to_handler, ret_from_crit_exc) + CRITICAL_EXCEPTION_PROLOG n label; \ + prepare_transfer_to_handler; \ + bl hdlr; \ + b ret_from_crit_exc /* * 0x0100 - Critical Interrupt Exception @@ -178,69 +202,67 @@ _ENTRY(saved_ksp_limit) * if they can't resolve the lightweight TLB fault. */ START_EXCEPTION(0x0300, DataStorage) - EXCEPTION_PROLOG - mfspr r5, SPRN_ESR /* Grab the ESR, save it */ - stw r5, _ESR(r11) - mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */ - stw r4, _DEAR(r11) - EXC_XFER_LITE(0x300, handle_page_fault) + EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1 + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return /* * 0x0400 - Instruction Storage Exception * This is caused by a fetch from non-execute or guarded pages. */ START_EXCEPTION(0x0400, InstructionAccess) - EXCEPTION_PROLOG + EXCEPTION_PROLOG 0x400 InstructionAccess li r5,0 stw r5, _ESR(r11) /* Zero ESR */ stw r12, _DEAR(r11) /* SRR0 as DEAR */ - EXC_XFER_LITE(0x400, handle_page_fault) + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return /* 0x0500 - External Interrupt Exception */ - EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + EXCEPTION(0x0500, HardwareInterrupt, do_IRQ) /* 0x0600 - Alignment Exception */ START_EXCEPTION(0x0600, Alignment) - EXCEPTION_PROLOG - mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ - stw r4,_DEAR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x600, alignment_exception) + EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1 + prepare_transfer_to_handler + bl alignment_exception + REST_NVGPRS(r1) + b interrupt_return /* 0x0700 - Program Exception */ START_EXCEPTION(0x0700, ProgramCheck) - EXCEPTION_PROLOG - mfspr r4,SPRN_ESR /* Grab the ESR and save it */ - stw r4,_ESR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x700, program_check_exception) + EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1 + prepare_transfer_to_handler + bl program_check_exception + REST_NVGPRS(r1) + b interrupt_return - EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_STD) + EXCEPTION(0x0800, Trap_08, unknown_exception) + EXCEPTION(0x0900, Trap_09, unknown_exception) + EXCEPTION(0x0A00, Trap_0A, unknown_exception) + EXCEPTION(0x0B00, Trap_0B, unknown_exception) /* 0x0C00 - System Call Exception */ START_EXCEPTION(0x0C00, SystemCall) SYSCALL_ENTRY 0xc00 /* Trap_0D is commented out to get more space for system call exception */ -/* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */ - EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD) +/* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */ + EXCEPTION(0x0E00, Trap_0E, unknown_exception) + EXCEPTION(0x0F00, Trap_0F, unknown_exception) /* 0x1000 - Programmable Interval Timer (PIT) Exception */ - . = 0x1000 + START_EXCEPTION(0x1000, DecrementerTrap) b Decrementer -/* 0x1010 - Fixed Interval Timer (FIT) Exception -*/ - . = 0x1010 +/* 0x1010 - Fixed Interval Timer (FIT) Exception */ + START_EXCEPTION(0x1010, FITExceptionTrap) b FITException -/* 0x1020 - Watchdog Timer (WDT) Exception -*/ - . = 0x1020 +/* 0x1020 - Watchdog Timer (WDT) Exception */ + START_EXCEPTION(0x1020, WDTExceptionTrap) b WDTException /* 0x1100 - Data TLB Miss Exception @@ -249,13 +271,13 @@ _ENTRY(saved_ksp_limit) * load TLB entries from the page table if they exist. */ START_EXCEPTION(0x1100, DTLBMiss) - mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */ - mtspr SPRN_SPRG_SCRATCH1, r11 + mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ + mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r12 mfspr r9, SPRN_PID - mtspr SPRN_SPRG_SCRATCH5, r9 + rlwimi r12, r9, 0, 0xff mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -316,13 +338,12 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r9, SPRN_SPRG_SCRATCH5 - mtspr SPRN_PID, r9 - mtcr r12 + mtspr SPRN_PID, r12 + mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH6 + mfspr r10, SPRN_SPRG_SCRATCH5 b DataStorage /* 0x1200 - Instruction TLB Miss Exception @@ -330,13 +351,13 @@ _ENTRY(saved_ksp_limit) * registers and bailout to a different point. */ START_EXCEPTION(0x1200, ITLBMiss) - mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */ - mtspr SPRN_SPRG_SCRATCH1, r11 + mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ + mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r12 mfspr r9, SPRN_PID - mtspr SPRN_SPRG_SCRATCH5, r9 + rlwimi r12, r9, 0, 0xff mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -397,28 +418,27 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r9, SPRN_SPRG_SCRATCH5 - mtspr SPRN_PID, r9 - mtcr r12 + mtspr SPRN_PID, r12 + mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH6 + mfspr r10, SPRN_SPRG_SCRATCH5 b InstructionAccess - EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_STD) + EXCEPTION(0x1300, Trap_13, unknown_exception) + EXCEPTION(0x1400, Trap_14, unknown_exception) + EXCEPTION(0x1500, Trap_15, unknown_exception) + EXCEPTION(0x1600, Trap_16, unknown_exception) + EXCEPTION(0x1700, Trap_17, unknown_exception) + EXCEPTION(0x1800, Trap_18, unknown_exception) + EXCEPTION(0x1900, Trap_19, unknown_exception) + EXCEPTION(0x1A00, Trap_1A, unknown_exception) + EXCEPTION(0x1B00, Trap_1B, unknown_exception) + EXCEPTION(0x1C00, Trap_1C, unknown_exception) + EXCEPTION(0x1D00, Trap_1D, unknown_exception) + EXCEPTION(0x1E00, Trap_1E, unknown_exception) + EXCEPTION(0x1F00, Trap_1F, unknown_exception) /* Check for a single step debug exception while in an exception * handler before state has been saved. This is to catch the case @@ -435,7 +455,7 @@ _ENTRY(saved_ksp_limit) */ /* 0x2000 - Debug Exception */ START_EXCEPTION(0x2000, DebugTrap) - CRITICAL_EXCEPTION_PROLOG + CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap /* * If this is a single step or branch-taken exception in an @@ -477,32 +497,35 @@ _ENTRY(saved_ksp_limit) /* continue normal handling for a critical exception... */ 2: mfspr r4,SPRN_DBSR stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_TEMPLATE(DebugException, 0x2002, \ - (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - crit_transfer_to_handler, ret_from_crit_exc) + prepare_transfer_to_handler + bl DebugException + b ret_from_crit_exc /* Programmable Interval Timer (PIT) Exception. (from 0x1000) */ + __HEAD Decrementer: - EXCEPTION_PROLOG + EXCEPTION_PROLOG 0x1000 Decrementer lis r0,TSR_PIS@h mtspr SPRN_TSR,r0 /* Clear the PIT exception */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x1000, timer_interrupt) + prepare_transfer_to_handler + bl timer_interrupt + b interrupt_return /* Fixed Interval Timer (FIT) Exception. (from 0x1010) */ + __HEAD FITException: - EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD; - EXC_XFER_STD(0x1010, unknown_exception) + EXCEPTION_PROLOG 0x1010 FITException + prepare_transfer_to_handler + bl unknown_exception + b interrupt_return /* Watchdog Timer (WDT) Exception. (from 0x1020) */ + __HEAD WDTException: - CRITICAL_EXCEPTION_PROLOG; - addi r3,r1,STACK_FRAME_OVERHEAD; - EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2, - (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), - crit_transfer_to_handler, ret_from_crit_exc) + CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException + prepare_transfer_to_handler + bl WatchdogException + b ret_from_crit_exc /* Other PowerPC processors, namely those derived from the 6xx-series * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. @@ -510,6 +533,7 @@ WDTException: * reserved. */ + __HEAD /* Damn, I came up one instruction too many to fit into the * exception space :-). Both the instruction and data TLB * miss get to this point to load the TLB. @@ -543,13 +567,12 @@ finish_tlb_load: /* Done...restore registers and get out of here. */ - mfspr r9, SPRN_SPRG_SCRATCH5 - mtspr SPRN_PID, r9 - mtcr r12 + mtspr SPRN_PID, r12 + mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH6 + mfspr r10, SPRN_SPRG_SCRATCH5 rfi /* Should sync shadow TLBs */ b . /* prevent prefetch past rfi */ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 813fa305c33b..5c106ac36626 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -263,8 +263,7 @@ interrupt_base: INSTRUCTION_STORAGE_EXCEPTION /* External Input Interrupt */ - EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \ - do_IRQ, EXC_XFER_LITE) + EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ) /* Alignment Interrupt */ ALIGNMENT_EXCEPTION @@ -277,7 +276,7 @@ interrupt_base: FP_UNAVAILABLE_EXCEPTION #else EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \ - FloatingPointUnavailable, unknown_exception, EXC_XFER_STD) + FloatingPointUnavailable, unknown_exception) #endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -285,15 +284,14 @@ interrupt_base: /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \ - AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD) + AuxillaryProcessorUnavailable, unknown_exception) /* Decrementer Interrupt */ DECREMENTER_EXCEPTION /* Fixed Internal Timer Interrupt */ /* TODO: Add FIT support */ - EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception) /* Watchdog Timer Interrupt */ /* TODO: Add watchdog support */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 46dff3f9c31f..7d445e4342c0 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -29,6 +29,13 @@ #include <asm/ptrace.h> #include <asm/export.h> #include <asm/code-patching-asm.h> +#include <asm/interrupt.h> + +/* + * Value for the bits that have fixed value in RPN entries. + * Also used for tagging DAR for DTLBerror. + */ +#define RPN_PATTERN 0x00f0 #include "head_32.h" @@ -42,12 +49,6 @@ #endif .endm -/* - * Value for the bits that have fixed value in RPN entries. - * Also used for tagging DAR for DTLBerror. - */ -#define RPN_PATTERN 0x00f0 - #define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_8M 23 @@ -118,56 +119,54 @@ instruction_counter: #endif /* System reset */ - EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD) + EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception) /* Machine check */ - . = 0x200 -MachineCheck: - EXCEPTION_PROLOG handle_dar_dsisr=1 - save_dar_dsisr_on_stack r4, r5, r11 - li r6, RPN_PATTERN - mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x200, machine_check_exception) + START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck) + EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1 + prepare_transfer_to_handler + bl machine_check_exception + b interrupt_return /* External interrupt */ - EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ) /* Alignment exception */ - . = 0x600 -Alignment: - EXCEPTION_PROLOG handle_dar_dsisr=1 - save_dar_dsisr_on_stack r4, r5, r11 - li r6, RPN_PATTERN - mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */ - addi r3,r1,STACK_FRAME_OVERHEAD - b .Lalignment_exception_ool + START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment) + EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1 + prepare_transfer_to_handler + bl alignment_exception + REST_NVGPRS(r1) + b interrupt_return /* Program check exception */ - EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) + START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck) + EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck + prepare_transfer_to_handler + bl program_check_exception + REST_NVGPRS(r1) + b interrupt_return /* Decrementer */ - EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) - - /* With VMAP_STACK there's not enough room for this at 0x600 */ - . = 0xa00 -.Lalignment_exception_ool: - EXC_XFER_STD(0x600, alignment_exception) + EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt) /* System call */ - . = 0xc00 -SystemCall: - SYSCALL_ENTRY 0xc00 + START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall) + SYSCALL_ENTRY INTERRUPT_SYSCALL /* Single step - not used on 601 */ - EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) + EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception) /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD) + START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu) + EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu + prepare_transfer_to_handler + bl emulation_assist_interrupt + REST_NVGPRS(r1) + b interrupt_return - . = 0x1100 /* * For the MPC8xx, this is a software tablewalk to load the instruction * TLB. The task switch loads the M_TWB register with the pointer to the first @@ -189,7 +188,7 @@ SystemCall: #define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp) #endif -InstructionTLBMiss: + START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 @@ -245,8 +244,7 @@ InstructionTLBMiss: rfi #endif - . = 0x1200 -DataStoreTLBMiss: + START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 mfcr r11 @@ -309,83 +307,74 @@ DataStoreTLBMiss: * to many reasons, such as executing guarded memory or illegal instruction * addresses. There is nothing to do but handle a big time error fault. */ - . = 0x1300 -InstructionTLBError: - EXCEPTION_PROLOG + START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError) + /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ + EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ andis. r10,r9,SRR1_ISI_NOPT@h beq+ .Litlbie tlbie r12 - /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ .Litlbie: stw r12, _DAR(r11) stw r5, _DSISR(r11) - EXC_XFER_LITE(0x400, handle_page_fault) + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return /* This is the data TLB error on the MPC8xx. This could be due to * many reasons, including a dirty update to a pte. We bail out to * a higher level function that can handle it. */ - . = 0x1400 -DataTLBError: + START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError) EXCEPTION_PROLOG_0 handle_dar_dsisr=1 mfspr r11, SPRN_DAR cmpwi cr1, r11, RPN_PATTERN beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */ DARFixed:/* Return from dcbx instruction bug workaround */ -#ifdef CONFIG_VMAP_STACK - li r11, RPN_PATTERN - mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */ -#endif EXCEPTION_PROLOG_1 - EXCEPTION_PROLOG_2 handle_dar_dsisr=1 - get_and_save_dar_dsisr_on_stack r4, r5, r11 + /* 0x300 is DataAccess exception, needed by bad_page_fault() */ + EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1 + lwz r4, _DAR(r11) + lwz r5, _DSISR(r11) andis. r10,r5,DSISR_NOHPTE@h beq+ .Ldtlbie tlbie r4 .Ldtlbie: -#ifndef CONFIG_VMAP_STACK - li r10,RPN_PATTERN - mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ -#endif - /* 0x300 is DataAccess exception, needed by bad_page_fault() */ - EXC_XFER_LITE(0x300, handle_page_fault) + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return -stack_overflow: +#ifdef CONFIG_VMAP_STACK vmap_stack_overflow_exception +#endif /* On the MPC8xx, these next four traps are used for development * support of breakpoints and such. Someday I will get around to * using them. */ -do_databreakpoint: - EXCEPTION_PROLOG_1 - EXCEPTION_PROLOG_2 handle_dar_dsisr=1 - addi r3,r1,STACK_FRAME_OVERHEAD - mfspr r4,SPRN_BAR - stw r4,_DAR(r11) -#ifndef CONFIG_VMAP_STACK - mfspr r5,SPRN_DSISR - stw r5,_DSISR(r11) -#endif - EXC_XFER_STD(0x1c00, do_break) - - . = 0x1c00 -DataBreakpoint: + START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint) EXCEPTION_PROLOG_0 handle_dar_dsisr=1 mfspr r11, SPRN_SRR0 cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq - bne cr1, do_databreakpoint + bne cr1, 1f mtcr r10 mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 rfi +1: EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1 + mfspr r4,SPRN_BAR + stw r4,_DAR(r11) + prepare_transfer_to_handler + bl do_break + REST_NVGPRS(r1) + b interrupt_return + #ifdef CONFIG_PERF_EVENTS - . = 0x1d00 -InstructionBreakpoint: + START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint) mtspr SPRN_SPRG_SCRATCH0, r10 lwz r10, (instruction_counter - PAGE_OFFSET)@l(0) addi r10, r10, -1 @@ -396,11 +385,12 @@ InstructionBreakpoint: mfspr r10, SPRN_SPRG_SCRATCH0 rfi #else - EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD) + EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception) #endif - EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD) + EXCEPTION(0x1e00, Trap_1e, unknown_exception) + EXCEPTION(0x1f00, Trap_1f, unknown_exception) + __HEAD . = 0x2000 /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions @@ -510,14 +500,10 @@ FixupDAR:/* Entry point for dcbx workaround. */ 152: mfdar r11 mtctr r11 /* restore ctr reg from DAR */ -#ifdef CONFIG_VMAP_STACK mfspr r11, SPRN_SPRG_THREAD stw r10, DAR(r11) mfspr r10, SPRN_DSISR stw r10, DSISR(r11) -#else - mtdar r10 /* save fault EA to DAR */ -#endif mfspr r10,SPRN_M_TW b DARFixed /* Go back to normal TLB handling */ @@ -819,7 +805,7 @@ EXPORT_SYMBOL(empty_zero_page) swapper_pg_dir: .space PGD_TABLE_SIZE -/* Room for two PTE table poiners, usually the kernel and current user +/* Room for two PTE table pointers, usually the kernel and current user * pointer to their respective root page table (pgdir). */ .globl abatron_pteptrs diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 565e84e20a72..065178f19a3d 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -31,6 +31,7 @@ #include <asm/kvm_book3s_asm.h> #include <asm/export.h> #include <asm/feature-fixups.h> +#include <asm/interrupt.h> #include "head_32.h" @@ -239,7 +240,7 @@ __secondary_hold_acknowledge: /* System reset */ /* core99 pmac starts the seconary here by changing the vector, and putting it back to what it was (unknown_async_exception) when done. */ - EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD) + EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception) /* Machine check */ /* @@ -255,40 +256,28 @@ __secondary_hold_acknowledge: * pointer when we take an exception from supervisor mode.) * -- paulus. */ - . = 0x200 - DO_KVM 0x200 -MachineCheck: + START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck) EXCEPTION_PROLOG_0 #ifdef CONFIG_PPC_CHRP -#ifdef CONFIG_VMAP_STACK mtspr SPRN_SPRG_SCRATCH2,r1 mfspr r1, SPRN_SPRG_THREAD lwz r1, RTAS_SP(r1) cmpwi cr1, r1, 0 bne cr1, 7f mfspr r1, SPRN_SPRG_SCRATCH2 -#else - mfspr r11, SPRN_SPRG_THREAD - lwz r11, RTAS_SP(r11) - cmpwi cr1, r11, 0 - bne cr1, 7f -#endif #endif /* CONFIG_PPC_CHRP */ - EXCEPTION_PROLOG_1 for_rtas=1 -7: EXCEPTION_PROLOG_2 - addi r3,r1,STACK_FRAME_OVERHEAD + EXCEPTION_PROLOG_1 +7: EXCEPTION_PROLOG_2 0x200 MachineCheck #ifdef CONFIG_PPC_CHRP - beq cr1, machine_check_tramp + beq cr1, 1f twi 31, 0, 0 -#else - b machine_check_tramp #endif +1: prepare_transfer_to_handler + bl machine_check_exception + b interrupt_return /* Data access exception. */ - . = 0x300 - DO_KVM 0x300 -DataAccess: -#ifdef CONFIG_VMAP_STACK + START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess) #ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION mtspr SPRN_SPRG_SCRATCH2,r10 @@ -309,30 +298,20 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) #endif 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1 EXCEPTION_PROLOG_1 - b handle_page_fault_tramp_1 -#else /* CONFIG_VMAP_STACK */ - EXCEPTION_PROLOG handle_dar_dsisr=1 - get_and_save_dar_dsisr_on_stack r4, r5, r11 -#ifdef CONFIG_PPC_BOOK3S_604 -BEGIN_MMU_FTR_SECTION - andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h - bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ - rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */ - bl hash_page - b handle_page_fault_tramp_1 -MMU_FTR_SECTION_ELSE -#endif - b handle_page_fault_tramp_2 -#ifdef CONFIG_PPC_BOOK3S_604 -ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) -#endif -#endif /* CONFIG_VMAP_STACK */ + EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 + prepare_transfer_to_handler + lwz r5, _DSISR(r11) + andis. r0, r5, DSISR_DABRMATCH@h + bne- 1f + bl do_page_fault + b interrupt_return +1: bl do_break + REST_NVGPRS(r1) + b interrupt_return + /* Instruction access exception. */ - . = 0x400 - DO_KVM 0x400 -InstructionAccess: -#ifdef CONFIG_VMAP_STACK + START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess) mtspr SPRN_SPRG_SCRATCH0,r10 mtspr SPRN_SPRG_SCRATCH1,r11 mfspr r10, SPRN_SPRG_THREAD @@ -352,43 +331,35 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) andi. r11, r11, MSR_PR EXCEPTION_PROLOG_1 - EXCEPTION_PROLOG_2 -#else /* CONFIG_VMAP_STACK */ - EXCEPTION_PROLOG - andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */ - beq 1f /* if so, try to put a PTE */ - li r3,0 /* into the hash table */ - mr r4,r12 /* SRR0 is fault address */ -#ifdef CONFIG_PPC_BOOK3S_604 -BEGIN_MMU_FTR_SECTION - bl hash_page -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) -#endif -#endif /* CONFIG_VMAP_STACK */ + EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ stw r5, _DSISR(r11) stw r12, _DAR(r11) - EXC_XFER_LITE(0x400, handle_page_fault) + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return /* External interrupt */ - EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ) /* Alignment exception */ - . = 0x600 - DO_KVM 0x600 -Alignment: - EXCEPTION_PROLOG handle_dar_dsisr=1 - save_dar_dsisr_on_stack r4, r5, r11 - addi r3,r1,STACK_FRAME_OVERHEAD - b alignment_exception_tramp + START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment) + EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1 + prepare_transfer_to_handler + bl alignment_exception + REST_NVGPRS(r1) + b interrupt_return /* Program check exception */ - EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) + START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck) + EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck + prepare_transfer_to_handler + bl program_check_exception + REST_NVGPRS(r1) + b interrupt_return /* Floating-point unavailable */ - . = 0x800 - DO_KVM 0x800 -FPUnavailable: + START_EXCEPTION(0x800, FPUnavailable) #ifdef CONFIG_PPC_FPU BEGIN_FTR_SECTION /* @@ -397,30 +368,29 @@ BEGIN_FTR_SECTION */ b ProgramCheck END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) - EXCEPTION_PROLOG + EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable beq 1f bl load_up_fpu /* if from user, just load it up */ b fast_exception_return -1: addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception) +1: prepare_transfer_to_handler + bl kernel_fp_unavailable_exception + b interrupt_return #else b ProgramCheck #endif /* Decrementer */ - EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) + EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt) - EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD) - EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD) + EXCEPTION(0xa00, Trap_0a, unknown_exception) + EXCEPTION(0xb00, Trap_0b, unknown_exception) /* System call */ - . = 0xc00 - DO_KVM 0xc00 -SystemCall: - SYSCALL_ENTRY 0xc00 + START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall) + SYSCALL_ENTRY INTERRUPT_SYSCALL - EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) - EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD) + EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception) + EXCEPTION(0xe00, Trap_0e, unknown_exception) /* * The Altivec unavailable trap is at 0x0f20. Foo. @@ -430,19 +400,18 @@ SystemCall: * non-altivec kernel running on a machine with altivec just * by executing an altivec instruction. */ - . = 0xf00 - DO_KVM 0xf00 + START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap) b PerformanceMonitor - . = 0xf20 - DO_KVM 0xf20 + START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap) b AltiVecUnavailable + __HEAD /* * Handle TLB miss for instruction on 603/603e. * Note: we get an alternate set of r0 - r3 to use automatically. */ - . = 0x1000 + . = INTERRUPT_INST_TLB_MISS_603 InstructionTLBMiss: /* * r0: scratch @@ -508,7 +477,7 @@ InstructionAddressInvalid: /* * Handle TLB miss for DATA Load operation on 603/603e */ - . = 0x1100 + . = INTERRUPT_DATA_LOAD_TLB_MISS_603 DataLoadTLBMiss: /* * r0: scratch @@ -586,7 +555,7 @@ DataAddressInvalid: /* * Handle TLB miss for DATA Store on 603/603e */ - . = 0x1200 + . = INTERRUPT_DATA_STORE_TLB_MISS_603 DataStoreTLBMiss: /* * r0: scratch @@ -650,57 +619,39 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) #define TAUException unknown_async_exception #endif - EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD) - EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD) - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD) - EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD) - EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD) + EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception) + EXCEPTION(0x1400, SMI, SMIException) + EXCEPTION(0x1500, Trap_15, unknown_exception) + EXCEPTION(0x1600, Trap_16, altivec_assist_exception) + EXCEPTION(0x1700, Trap_17, TAUException) + EXCEPTION(0x1800, Trap_18, unknown_exception) + EXCEPTION(0x1900, Trap_19, unknown_exception) + EXCEPTION(0x1a00, Trap_1a, unknown_exception) + EXCEPTION(0x1b00, Trap_1b, unknown_exception) + EXCEPTION(0x1c00, Trap_1c, unknown_exception) + EXCEPTION(0x1d00, Trap_1d, unknown_exception) + EXCEPTION(0x1e00, Trap_1e, unknown_exception) + EXCEPTION(0x1f00, Trap_1f, unknown_exception) + EXCEPTION(0x2000, RunMode, RunModeException) + EXCEPTION(0x2100, Trap_21, unknown_exception) + EXCEPTION(0x2200, Trap_22, unknown_exception) + EXCEPTION(0x2300, Trap_23, unknown_exception) + EXCEPTION(0x2400, Trap_24, unknown_exception) + EXCEPTION(0x2500, Trap_25, unknown_exception) + EXCEPTION(0x2600, Trap_26, unknown_exception) + EXCEPTION(0x2700, Trap_27, unknown_exception) + EXCEPTION(0x2800, Trap_28, unknown_exception) + EXCEPTION(0x2900, Trap_29, unknown_exception) + EXCEPTION(0x2a00, Trap_2a, unknown_exception) + EXCEPTION(0x2b00, Trap_2b, unknown_exception) + EXCEPTION(0x2c00, Trap_2c, unknown_exception) + EXCEPTION(0x2d00, Trap_2d, unknown_exception) + EXCEPTION(0x2e00, Trap_2e, unknown_exception) + EXCEPTION(0x2f00, Trap_2f, unknown_exception) + __HEAD . = 0x3000 -machine_check_tramp: - EXC_XFER_STD(0x200, machine_check_exception) - -alignment_exception_tramp: - EXC_XFER_STD(0x600, alignment_exception) - -handle_page_fault_tramp_1: -#ifdef CONFIG_VMAP_STACK - EXCEPTION_PROLOG_2 handle_dar_dsisr=1 -#endif - lwz r5, _DSISR(r11) - /* fall through */ -handle_page_fault_tramp_2: - andis. r0, r5, DSISR_DABRMATCH@h - bne- 1f - EXC_XFER_LITE(0x300, handle_page_fault) -1: EXC_XFER_STD(0x300, do_break) - -#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_PPC_BOOK3S_604 .macro save_regs_thread thread stw r0, THR0(\thread) @@ -775,26 +726,31 @@ fast_hash_page_return: rfi #endif /* CONFIG_PPC_BOOK3S_604 */ -stack_overflow: +#ifdef CONFIG_VMAP_STACK vmap_stack_overflow_exception #endif + __HEAD AltiVecUnavailable: - EXCEPTION_PROLOG + EXCEPTION_PROLOG 0xf20 AltiVecUnavailable #ifdef CONFIG_ALTIVEC beq 1f bl load_up_altivec /* if from user, just load it up */ b fast_exception_return #endif /* CONFIG_ALTIVEC */ -1: addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0xf20, altivec_unavailable_exception) +1: prepare_transfer_to_handler + bl altivec_unavailable_exception + b interrupt_return + __HEAD PerformanceMonitor: - EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0xf00, performance_monitor_exception) + EXCEPTION_PROLOG 0xf00 PerformanceMonitor + prepare_transfer_to_handler + bl performance_monitor_exception + b interrupt_return + __HEAD /* * This code is jumped to from the startup code to copy * the kernel image to physical address PHYSICAL_START. diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 47857795f50a..f82470091697 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -44,7 +44,7 @@ END_BTB_FLUSH_SECTION #endif -#define NORMAL_EXCEPTION_PROLOG(intno) \ +#define NORMAL_EXCEPTION_PROLOG(trapno, intno) \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mfspr r10, SPRN_SPRG_THREAD; \ stw r11, THREAD_NORMSAVE(0)(r10); \ @@ -53,6 +53,8 @@ END_BTB_FLUSH_SECTION mfspr r11, SPRN_SRR1; \ DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL); \ + mtmsr r11; \ mr r11, r1; \ beq 1f; \ BOOKE_CLEAR_BTB(r11) \ @@ -76,12 +78,39 @@ END_BTB_FLUSH_SECTION stw r1, 0(r11); \ mr r1, r11; \ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \ - addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ - stw r10, 8(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) + COMMON_EXCEPTION_PROLOG_END trapno + +.macro COMMON_EXCEPTION_PROLOG_END trapno + stw r0,GPR0(r1) + lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ + addi r10, r10, STACK_FRAME_REGS_MARKER@l + stw r10, 8(r1) + li r10, \trapno + stw r10,_TRAP(r1) + SAVE_4GPRS(3, r1) + SAVE_2GPRS(7, r1) + SAVE_NVGPRS(r1) + stw r2,GPR2(r1) + stw r12,_NIP(r1) + stw r9,_MSR(r1) + mfctr r10 + mfspr r2,SPRN_SPRG_THREAD + stw r10,_CTR(r1) + tovirt(r2, r2) + mfspr r10,SPRN_XER + addi r2, r2, -THREAD + stw r10,_XER(r1) + addi r3,r1,STACK_FRAME_OVERHEAD +.endm + +.macro prepare_transfer_to_handler +#ifdef CONFIG_E500 + andi. r12,r9,MSR_PR + bne 777f + bl prepare_transfer_to_handler +777: +#endif +.endm .macro SYSCALL_ENTRY trapno intno srr1 mfspr r10, SPRN_SPRG_THREAD @@ -180,7 +209,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) * registers as the normal prolog above. Instead we use a portion of the * critical/machine check exception stack at low physical addresses. */ -#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \ +#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, trapno, intno, exc_level_srr0, exc_level_srr1) \ mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ stw r9,GPR9(r8); /* save various registers */\ @@ -192,6 +221,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \ + mtmsr r11; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ @@ -221,16 +252,44 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) stw r1,0(r11); \ mr r1,r11; \ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) + COMMON_EXCEPTION_PROLOG_END trapno + +#define SAVE_xSRR(xSRR) \ + mfspr r0,SPRN_##xSRR##0; \ + stw r0,_##xSRR##0(r1); \ + mfspr r0,SPRN_##xSRR##1; \ + stw r0,_##xSRR##1(r1) + + +.macro SAVE_MMU_REGS +#ifdef CONFIG_PPC_BOOK3E_MMU + mfspr r0,SPRN_MAS0 + stw r0,MAS0(r1) + mfspr r0,SPRN_MAS1 + stw r0,MAS1(r1) + mfspr r0,SPRN_MAS2 + stw r0,MAS2(r1) + mfspr r0,SPRN_MAS3 + stw r0,MAS3(r1) + mfspr r0,SPRN_MAS6 + stw r0,MAS6(r1) +#ifdef CONFIG_PHYS_64BIT + mfspr r0,SPRN_MAS7 + stw r0,MAS7(r1) +#endif /* CONFIG_PHYS_64BIT */ +#endif /* CONFIG_PPC_BOOK3E_MMU */ +#ifdef CONFIG_44x + mfspr r0,SPRN_MMUCR + stw r0,MMUCR(r1) +#endif +.endm -#define CRITICAL_EXCEPTION_PROLOG(intno) \ - EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1) -#define DEBUG_EXCEPTION_PROLOG \ - EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1) -#define MCHECK_EXCEPTION_PROLOG \ - EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \ +#define CRITICAL_EXCEPTION_PROLOG(trapno, intno) \ + EXC_LEVEL_EXCEPTION_PROLOG(CRIT, trapno+2, intno, SPRN_CSRR0, SPRN_CSRR1) +#define DEBUG_EXCEPTION_PROLOG(trapno) \ + EXC_LEVEL_EXCEPTION_PROLOG(DBG, trapno+8, DEBUG, SPRN_DSRR0, SPRN_DSRR1) +#define MCHECK_EXCEPTION_PROLOG(trapno) \ + EXC_LEVEL_EXCEPTION_PROLOG(MC, trapno+4, MACHINE_CHECK, \ SPRN_MCSRR0, SPRN_MCSRR1) /* @@ -257,44 +316,34 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) .align 5; \ label: -#define EXCEPTION(n, intno, label, hdlr, xfer) \ +#define EXCEPTION(n, intno, label, hdlr) \ START_EXCEPTION(label); \ - NORMAL_EXCEPTION_PROLOG(intno); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) + NORMAL_EXCEPTION_PROLOG(n, intno); \ + prepare_transfer_to_handler; \ + bl hdlr; \ + b interrupt_return #define CRITICAL_EXCEPTION(n, intno, label, hdlr) \ START_EXCEPTION(label); \ - CRITICAL_EXCEPTION_PROLOG(intno); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - crit_transfer_to_handler, ret_from_crit_exc) + CRITICAL_EXCEPTION_PROLOG(n, intno); \ + SAVE_MMU_REGS; \ + SAVE_xSRR(SRR); \ + prepare_transfer_to_handler; \ + bl hdlr; \ + b ret_from_crit_exc #define MCHECK_EXCEPTION(n, label, hdlr) \ START_EXCEPTION(label); \ - MCHECK_EXCEPTION_PROLOG; \ + MCHECK_EXCEPTION_PROLOG(n); \ mfspr r5,SPRN_ESR; \ stw r5,_ESR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - mcheck_transfer_to_handler, ret_from_mcheck_exc) - -#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \ - li r10,trap; \ - stw r10,_TRAP(r11); \ - lis r10,msr@h; \ - ori r10,r10,msr@l; \ - bl tfer; \ - .long hdlr; \ - .long ret - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \ - ret_from_except) + SAVE_xSRR(DSRR); \ + SAVE_xSRR(CSRR); \ + SAVE_MMU_REGS; \ + SAVE_xSRR(SRR); \ + prepare_transfer_to_handler; \ + bl hdlr; \ + b ret_from_mcheck_exc /* Check for a single step debug exception while in an exception * handler before state has been saved. This is to catch the case @@ -311,7 +360,7 @@ label: */ #define DEBUG_DEBUG_EXCEPTION \ START_EXCEPTION(DebugDebug); \ - DEBUG_EXCEPTION_PROLOG; \ + DEBUG_EXCEPTION_PROLOG(2000); \ \ /* \ * If there is a single step or branch-taken exception in an \ @@ -360,12 +409,16 @@ label: /* continue normal handling for a debug exception... */ \ 2: mfspr r4,SPRN_DBSR; \ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc) + SAVE_xSRR(CSRR); \ + SAVE_MMU_REGS; \ + SAVE_xSRR(SRR); \ + prepare_transfer_to_handler; \ + bl DebugException; \ + b ret_from_debug_exc #define DEBUG_CRIT_EXCEPTION \ START_EXCEPTION(DebugCrit); \ - CRITICAL_EXCEPTION_PROLOG(DEBUG); \ + CRITICAL_EXCEPTION_PROLOG(2000,DEBUG); \ \ /* \ * If there is a single step or branch-taken exception in an \ @@ -414,58 +467,71 @@ label: /* continue normal handling for a critical exception... */ \ 2: mfspr r4,SPRN_DBSR; \ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc) + SAVE_MMU_REGS; \ + SAVE_xSRR(SRR); \ + prepare_transfer_to_handler; \ + bl DebugException; \ + b ret_from_crit_exc #define DATA_STORAGE_EXCEPTION \ START_EXCEPTION(DataStorage) \ - NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \ + NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE); \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ stw r5,_ESR(r11); \ mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ stw r4, _DEAR(r11); \ - EXC_XFER_LITE(0x0300, handle_page_fault) + prepare_transfer_to_handler; \ + bl do_page_fault; \ + b interrupt_return #define INSTRUCTION_STORAGE_EXCEPTION \ START_EXCEPTION(InstructionStorage) \ - NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \ + NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ stw r5,_ESR(r11); \ stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \ - EXC_XFER_LITE(0x0400, handle_page_fault) + prepare_transfer_to_handler; \ + bl do_page_fault; \ + b interrupt_return #define ALIGNMENT_EXCEPTION \ START_EXCEPTION(Alignment) \ - NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \ + NORMAL_EXCEPTION_PROLOG(0x600, ALIGNMENT); \ mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ stw r4,_DEAR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_STD(0x0600, alignment_exception) + prepare_transfer_to_handler; \ + bl alignment_exception; \ + REST_NVGPRS(r1); \ + b interrupt_return #define PROGRAM_EXCEPTION \ START_EXCEPTION(Program) \ - NORMAL_EXCEPTION_PROLOG(PROGRAM); \ + NORMAL_EXCEPTION_PROLOG(0x700, PROGRAM); \ mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ stw r4,_ESR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_STD(0x0700, program_check_exception) + prepare_transfer_to_handler; \ + bl program_check_exception; \ + REST_NVGPRS(r1); \ + b interrupt_return #define DECREMENTER_EXCEPTION \ START_EXCEPTION(Decrementer) \ - NORMAL_EXCEPTION_PROLOG(DECREMENTER); \ + NORMAL_EXCEPTION_PROLOG(0x900, DECREMENTER); \ lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_LITE(0x0900, timer_interrupt) + prepare_transfer_to_handler; \ + bl timer_interrupt; \ + b interrupt_return #define FP_UNAVAILABLE_EXCEPTION \ START_EXCEPTION(FloatingPointUnavailable) \ - NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \ + NORMAL_EXCEPTION_PROLOG(0x800, FP_UNAVAIL); \ beq 1f; \ bl load_up_fpu; /* if from user, just load it up */ \ b fast_exception_return; \ -1: addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_STD(0x800, kernel_fp_unavailable_exception) +1: prepare_transfer_to_handler; \ + bl kernel_fp_unavailable_exception; \ + b interrupt_return #else /* __ASSEMBLY__ */ struct exception_regs { @@ -481,7 +547,6 @@ struct exception_regs { unsigned long csrr1; unsigned long dsrr0; unsigned long dsrr1; - unsigned long saved_ksp_limit; }; /* ensure this structure is always sized to a multiple of the stack alignment */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3f4a40cccef5..a1a5c3f10dc4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -113,7 +113,7 @@ _ENTRY(_start); 1: /* - * We have the runtime (virutal) address of our base. + * We have the runtime (virtual) address of our base. * We calculate our shift of offset from a 64M page. * We could map the 64M page we belong to at PAGE_OFFSET and * get going from there. @@ -363,23 +363,26 @@ interrupt_base: /* Data Storage Interrupt */ START_EXCEPTION(DataStorage) - NORMAL_EXCEPTION_PROLOG(DATA_STORAGE) + NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE) mfspr r5,SPRN_ESR /* Grab the ESR, save it */ stw r5,_ESR(r11) mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ stw r4, _DEAR(r11) andis. r10,r5,(ESR_ILK|ESR_DLK)@h bne 1f - EXC_XFER_LITE(0x0300, handle_page_fault) + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return 1: - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x0300, CacheLockingException) + prepare_transfer_to_handler + bl CacheLockingException + b interrupt_return /* Instruction Storage Interrupt */ INSTRUCTION_STORAGE_EXCEPTION /* External Input Interrupt */ - EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE) + EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ) /* Alignment Interrupt */ ALIGNMENT_EXCEPTION @@ -391,8 +394,7 @@ interrupt_base: #ifdef CONFIG_PPC_FPU FP_UNAVAILABLE_EXCEPTION #else - EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception) #endif /* System Call Interrupt */ @@ -400,16 +402,14 @@ interrupt_base: SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1 /* Auxiliary Processor Unavailable Interrupt */ - EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception) /* Decrementer Interrupt */ DECREMENTER_EXCEPTION /* Fixed Internal Timer Interrupt */ /* TODO: Add FIT support */ - EXCEPTION(0x3100, FIT, FixedIntervalTimer, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception) /* Watchdog Timer Interrupt */ #ifdef CONFIG_BOOKE_WDT @@ -497,7 +497,7 @@ END_BTB_FLUSH_SECTION #endif #endif - bne 2f /* Bail if permission/valid mismach */ + bne 2f /* Bail if permission/valid mismatch */ /* Jump to common tlb load */ b finish_tlb_load @@ -592,7 +592,7 @@ END_BTB_FLUSH_SECTION #endif #endif - bne 2f /* Bail if permission mismach */ + bne 2f /* Bail if permission mismatch */ /* Jump to common TLB load point */ b finish_tlb_load @@ -614,38 +614,44 @@ END_BTB_FLUSH_SECTION #ifdef CONFIG_SPE /* SPE Unavailable */ START_EXCEPTION(SPEUnavailable) - NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL) + NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL) beq 1f bl load_up_spe b fast_exception_return -1: addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x2010, KernelSPE) +1: prepare_transfer_to_handler + bl KernelSPE + b interrupt_return #elif defined(CONFIG_SPE_POSSIBLE) - EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception) #endif /* CONFIG_SPE_POSSIBLE */ /* SPE Floating Point Data */ #ifdef CONFIG_SPE - EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, - SPEFloatingPointException, EXC_XFER_STD) + START_EXCEPTION(SPEFloatingPointData) + NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA) + prepare_transfer_to_handler + bl SPEFloatingPointException + REST_NVGPRS(r1) + b interrupt_return /* SPE Floating Point Round */ - EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ - SPEFloatingPointRoundException, EXC_XFER_STD) + START_EXCEPTION(SPEFloatingPointRound) + NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND) + prepare_transfer_to_handler + bl SPEFloatingPointRoundException + REST_NVGPRS(r1) + b interrupt_return #elif defined(CONFIG_SPE_POSSIBLE) - EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, - unknown_exception, EXC_XFER_STD) - EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ - unknown_exception, EXC_XFER_STD) + EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception) + EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception) #endif /* CONFIG_SPE_POSSIBLE */ /* Performance Monitor */ EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ - performance_monitor_exception, EXC_XFER_STD) + performance_monitor_exception) - EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD) + EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception) CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \ CriticalDoorbell, unknown_exception) @@ -660,10 +666,10 @@ END_BTB_FLUSH_SECTION unknown_exception) /* Hypercall */ - EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_STD) + EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception) /* Embedded Hypervisor Privilege */ - EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_STD) + EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception) interrupt_end: @@ -854,7 +860,7 @@ KernelSPE: lwz r5,_NIP(r1) bl printk #endif - b ret_from_except + b interrupt_return #ifdef CONFIG_PRINTK 87: .string "SPE used in kernel (task=%p, pc=%x) \n" #endif diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c index 867ee4aa026a..675d1f66ab72 100644 --- a/arch/powerpc/kernel/hw_breakpoint_constraints.c +++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c @@ -141,7 +141,7 @@ void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, { struct instruction_op op; - if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) + if (__get_user_instr(*instr, (void __user *)regs->nip)) return; analyse_instr(&op, regs, *instr); diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 69df840f7253..13cad9297d82 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -145,9 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* * Return from NAP/DOZE mode, restore some CPU specific registers, - * we are called with DR/IR still off and r2 containing physical - * address of current. R11 points to the exception frame (physical - * address). We have to preserve r10. + * R11 points to the exception frame. We have to preserve r10. */ _GLOBAL(power_save_ppc32_restore) lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ @@ -166,11 +164,7 @@ BEGIN_FTR_SECTION mfspr r9,SPRN_HID0 andis. r9,r9,HID0_NAP@h beq 1f -#ifdef CONFIG_VMAP_STACK addis r9, r11, nap_save_msscr0@ha -#else - addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha -#endif lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 sync @@ -178,15 +172,11 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) BEGIN_FTR_SECTION -#ifdef CONFIG_VMAP_STACK addis r9, r11, nap_save_hid1@ha -#else - addis r9,r11,(nap_save_hid1-KERNELBASE)@ha -#endif lwz r9,nap_save_hid1@l(r9) mtspr SPRN_HID1, r9 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) - b transfer_to_handler_cont + blr _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) .data diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index f9e6d83e6720..abb719b21cae 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -209,4 +209,8 @@ _GLOBAL(power4_idle_nap) mtmsrd r7 isync b 1b + + .globl power4_idle_nap_return +power4_idle_nap_return: + blr #endif diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 72c85b6f3898..9e1bc4502c50 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -74,20 +74,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) /* * Return from NAP/DOZE mode, restore some CPU specific registers, - * r2 containing physical address of current. - * r11 points to the exception frame (physical address). + * r2 containing address of current. + * r11 points to the exception frame. * We have to preserve r10. */ _GLOBAL(power_save_ppc32_restore) lwz r9,_LINK(r11) /* interrupted in e500_idle */ stw r9,_NIP(r11) /* make it do a blr */ - -#ifdef CONFIG_SMP - lwz r11,TASK_CPU(r2) /* get cpu number * 4 */ - slwi r11,r11,2 -#else - li r11,0 -#endif - - b transfer_to_handler_cont + blr _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index c475a229a42a..e4559f8914eb 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -20,6 +20,10 @@ #include <asm/time.h> #include <asm/unistd.h> +#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) +unsigned long global_dbcr0[NR_CPUS]; +#endif + typedef long (*syscall_fn)(long, long, long, long, long, long); /* Has to run notrace because it is entered not completely "reconciled" */ @@ -29,20 +33,24 @@ notrace long system_call_exception(long r3, long r4, long r5, { syscall_fn f; + kuep_lock(); +#ifdef CONFIG_PPC32 + kuap_save_and_lock(regs); +#endif + regs->orig_gpr3 = r3; if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); + trace_hardirqs_off(); /* finish reconciling */ + CT_WARN_ON(ct_state() == CONTEXT_KERNEL); user_exit_irqoff(); - trace_hardirqs_off(); /* finish reconciling */ - if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); - BUG_ON(!FULL_REGS(regs)); BUG_ON(arch_irq_disabled_regs(regs)); #ifdef CONFIG_PPC_PKEY @@ -69,9 +77,7 @@ notrace long system_call_exception(long r3, long r4, long r5, isync(); } else #endif -#ifdef CONFIG_PPC64 - kuap_check_amr(); -#endif + kuap_assert_locked(); booke_restore_dbcr0(); @@ -247,9 +253,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, CT_WARN_ON(ct_state() == CONTEXT_USER); -#ifdef CONFIG_PPC64 - kuap_check_amr(); -#endif + kuap_assert_locked(); regs->result = r3; @@ -344,16 +348,13 @@ again: account_cpu_user_exit(); -#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */ - /* - * We do this at the end so that we do context switch with KERNEL AMR - */ + /* Restore user access locks last */ kuap_user_restore(regs); -#endif + kuep_unlock(); + return ret; } -#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) { unsigned long ti_flags; @@ -363,7 +364,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); - BUG_ON(!FULL_REGS(regs)); BUG_ON(arch_irq_disabled_regs(regs)); CT_WARN_ON(ct_state() == CONTEXT_USER); @@ -371,9 +371,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned * We don't need to restore AMR on the way back to userspace for KUAP. * AMR can only have been unlocked if we interrupted the kernel. */ -#ifdef CONFIG_PPC64 - kuap_check_amr(); -#endif + kuap_assert_locked(); local_irq_save(flags); @@ -392,7 +390,7 @@ again: ti_flags = READ_ONCE(current_thread_info()->flags); } - if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) { if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && unlikely((ti_flags & _TIF_RESTORE_TM))) { restore_tm_state(regs); @@ -427,12 +425,9 @@ again: account_cpu_user_exit(); - /* - * We do this at the end so that we do context switch with KERNEL AMR - */ -#ifdef CONFIG_PPC64 + /* Restore user access locks last */ kuap_user_restore(regs); -#endif + return ret; } @@ -442,25 +437,20 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign { unsigned long flags; unsigned long ret = 0; -#ifdef CONFIG_PPC64 - unsigned long amr; -#endif + unsigned long kuap; if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) && unlikely(!(regs->msr & MSR_RI))) unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); - BUG_ON(!FULL_REGS(regs)); /* * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != 0x700) + if (TRAP(regs) != INTERRUPT_PROGRAM) CT_WARN_ON(ct_state() == CONTEXT_USER); -#ifdef CONFIG_PPC64 - amr = kuap_get_and_check_amr(); -#endif + kuap = kuap_get_and_assert_locked(); if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) { clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); @@ -498,14 +488,11 @@ again: #endif /* - * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr, - * which would cause Read-After-Write stalls. Hence, we take the AMR - * value from the check above. + * 64s does not want to mfspr(SPRN_AMR) here, because this comes after + * mtmsr, which would cause Read-After-Write stalls. Hence, take the + * AMR value from the check above. */ -#ifdef CONFIG_PPC64 - kuap_kernel_restore(regs, amr); -#endif + kuap_kernel_restore(regs, kuap); return ret; } -#endif diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c00214a4355c..57d6b85e9b96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -72,8 +72,7 @@ static void iommu_debugfs_del(struct iommu_table *tbl) sprintf(name, "%08lx", tbl->it_index); liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); - if (liobn_entry) - debugfs_remove(liobn_entry); + debugfs_remove(liobn_entry); } #else static void iommu_debugfs_add(struct iommu_table *tbl){} @@ -297,6 +296,15 @@ again: pass++; goto again; + } else if (pass == tbl->nr_pools + 1) { + /* Last resort: try largepool */ + spin_unlock(&pool->lock); + pool = &tbl->large_pool; + spin_lock(&pool->lock); + pool->hint = pool->start; + pass++; + goto again; + } else { /* Give up */ spin_unlock_irqrestore(&(pool->lock), flags); @@ -719,7 +727,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, { unsigned long sz; static int welcomed = 0; - struct page *page; unsigned int i; struct iommu_pool *p; @@ -728,11 +735,11 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, /* number of bytes needed for the bitmap */ sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); - page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); - if (!page) - panic("iommu_init_table: Can't allocate %ld bytes\n", sz); - tbl->it_map = page_address(page); - memset(tbl->it_map, 0, sz); + tbl->it_map = vzalloc_node(sz, nid); + if (!tbl->it_map) { + pr_err("%s: Can't allocate %ld bytes\n", __func__, sz); + return NULL; + } iommu_table_reserve_pages(tbl, res_start, res_end); @@ -774,8 +781,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, static void iommu_table_free(struct kref *kref) { - unsigned long bitmap_sz; - unsigned int order; struct iommu_table *tbl; tbl = container_of(kref, struct iommu_table, it_kref); @@ -796,12 +801,8 @@ static void iommu_table_free(struct kref *kref) if (!bitmap_empty(tbl->it_map, tbl->it_size)) pr_warn("%s: Unexpected TCEs\n", __func__); - /* calculate bitmap size in bytes */ - bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); - /* free bitmap */ - order = get_order(bitmap_sz); - free_pages((unsigned long) tbl->it_map, order); + vfree(tbl->it_map); /* free table */ kfree(tbl); @@ -897,6 +898,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, unsigned int order; unsigned int nio_pages, io_order; struct page *page; + size_t size_io = size; size = PAGE_ALIGN(size); order = get_order(size); @@ -923,8 +925,9 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - nio_pages = size >> tbl->it_page_shift; - io_order = get_iommu_order(size, tbl); + size_io = IOMMU_PAGE_ALIGN(size_io, tbl); + nio_pages = size_io >> tbl->it_page_shift; + io_order = get_iommu_order(size_io, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); if (mapping == DMA_MAPPING_ERROR) { @@ -939,10 +942,9 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) { if (tbl) { - unsigned int nio_pages; + size_t size_io = IOMMU_PAGE_ALIGN(size, tbl); + unsigned int nio_pages = size_io >> tbl->it_page_shift; - size = PAGE_ALIGN(size); - nio_pages = size >> tbl->it_page_shift; iommu_free(tbl, dma_handle, nio_pages); size = PAGE_ALIGN(size); free_pages((unsigned long)vaddr, get_order(size)); @@ -1096,7 +1098,7 @@ int iommu_take_ownership(struct iommu_table *tbl) spin_lock_irqsave(&tbl->large_pool.lock, flags); for (i = 0; i < tbl->nr_pools; i++) - spin_lock(&tbl->pools[i].lock); + spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); iommu_table_release_pages(tbl); @@ -1124,7 +1126,7 @@ void iommu_release_ownership(struct iommu_table *tbl) spin_lock_irqsave(&tbl->large_pool.lock, flags); for (i = 0; i < tbl->nr_pools; i++) - spin_lock(&tbl->pools[i].lock); + spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); memset(tbl->it_map, 0, sz); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d71fd10a1dd4..72cb45393ef2 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -104,82 +104,6 @@ static inline notrace unsigned long get_irq_happened(void) return happened; } -#ifdef CONFIG_PPC_BOOK3E - -/* This is called whenever we are re-enabling interrupts - * and returns either 0 (nothing to do) or 500/900/280 if - * there's an EE, DEC or DBELL to generate. - * - * This is called in two contexts: From arch_local_irq_restore() - * before soft-enabling interrupts, and from the exception exit - * path when returning from an interrupt from a soft-disabled to - * a soft enabled context. In both case we have interrupts hard - * disabled. - * - * We take care of only clearing the bits we handled in the - * PACA irq_happened field since we can only re-emit one at a - * time and we don't want to "lose" one. - */ -notrace unsigned int __check_irq_replay(void) -{ - /* - * We use local_paca rather than get_paca() to avoid all - * the debug_smp_processor_id() business in this low level - * function - */ - unsigned char happened = local_paca->irq_happened; - - /* - * We are responding to the next interrupt, so interrupt-off - * latencies should be reset here. - */ - trace_hardirqs_on(); - trace_hardirqs_off(); - - if (happened & PACA_IRQ_DEC) { - local_paca->irq_happened &= ~PACA_IRQ_DEC; - return 0x900; - } - - if (happened & PACA_IRQ_EE) { - local_paca->irq_happened &= ~PACA_IRQ_EE; - return 0x500; - } - - if (happened & PACA_IRQ_DBELL) { - local_paca->irq_happened &= ~PACA_IRQ_DBELL; - return 0x280; - } - - if (happened & PACA_IRQ_HARD_DIS) - local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; - - /* There should be nothing left ! */ - BUG_ON(local_paca->irq_happened != 0); - - return 0; -} - -/* - * This is specifically called by assembly code to re-enable interrupts - * if they are currently disabled. This is typically called before - * schedule() or do_signal() when returning to userspace. We do it - * in C to avoid the burden of dealing with lockdep etc... - * - * NOTE: This is called with interrupts hard disabled but not marked - * as such in paca->irq_happened, so we need to resync this. - */ -void notrace restore_interrupts(void) -{ - if (irqs_disabled()) { - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - local_irq_enable(); - } else - __hard_irq_enable(); -} - -#endif /* CONFIG_PPC_BOOK3E */ - void replay_soft_interrupts(void) { struct pt_regs regs; @@ -218,7 +142,7 @@ again: */ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) { local_paca->irq_happened &= ~PACA_IRQ_HMI; - regs.trap = 0xe60; + regs.trap = INTERRUPT_HMI; handle_hmi_exception(®s); if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) hard_irq_disable(); @@ -226,7 +150,7 @@ again: if (local_paca->irq_happened & PACA_IRQ_DEC) { local_paca->irq_happened &= ~PACA_IRQ_DEC; - regs.trap = 0x900; + regs.trap = INTERRUPT_DECREMENTER; timer_interrupt(®s); if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) hard_irq_disable(); @@ -234,7 +158,7 @@ again: if (local_paca->irq_happened & PACA_IRQ_EE) { local_paca->irq_happened &= ~PACA_IRQ_EE; - regs.trap = 0x500; + regs.trap = INTERRUPT_EXTERNAL; do_IRQ(®s); if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) hard_irq_disable(); @@ -242,10 +166,7 @@ again: if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) { local_paca->irq_happened &= ~PACA_IRQ_DBELL; - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) - regs.trap = 0x280; - else - regs.trap = 0xa00; + regs.trap = INTERRUPT_DOORBELL; doorbell_exception(®s); if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) hard_irq_disable(); @@ -254,7 +175,7 @@ again: /* Book3E does not support soft-masking PMI interrupts */ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) { local_paca->irq_happened &= ~PACA_IRQ_PMI; - regs.trap = 0xf00; + regs.trap = INTERRUPT_PERFMON; performance_monitor_exception(®s); if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) hard_irq_disable(); @@ -282,7 +203,7 @@ static inline void replay_soft_interrupts_irqrestore(void) * and re-locking AMR but we shouldn't get here in the first place, * hence the warning. */ - kuap_check_amr(); + kuap_assert_locked(); if (kuap_state != AMR_KUAP_BLOCKED) set_kuap(AMR_KUAP_BLOCKED); @@ -667,6 +588,47 @@ static inline void check_stack_overflow(void) } } +static __always_inline void call_do_softirq(const void *sp) +{ + /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ + asm volatile ( + PPC_STLU " %%r1, %[offset](%[sp]) ;" + "mr %%r1, %[sp] ;" + "bl %[callee] ;" + PPC_LL " %%r1, 0(%%r1) ;" + : // Outputs + : // Inputs + [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), + [callee] "i" (__do_softirq) + : // Clobbers + "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", + "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", + "r11", "r12" + ); +} + +static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) +{ + register unsigned long r3 asm("r3") = (unsigned long)regs; + + /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */ + asm volatile ( + PPC_STLU " %%r1, %[offset](%[sp]) ;" + "mr %%r1, %[sp] ;" + "bl %[callee] ;" + PPC_LL " %%r1, 0(%%r1) ;" + : // Outputs + "+r" (r3) + : // Inputs + [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), + [callee] "i" (__do_irq) + : // Clobbers + "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", + "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", + "r11", "r12" + ); +} + void __do_irq(struct pt_regs *regs) { unsigned int irq; diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 2257d24e6a26..39c625737c09 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size) if (slab_is_available()) { if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa, pgprot_noncached(PAGE_KERNEL))) - unmap_kernel_range(ISA_IO_BASE, size); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size); } else { early_ioremap_range(ISA_IO_BASE, pa, size, pgprot_noncached(PAGE_KERNEL)); @@ -311,7 +311,7 @@ static void isa_bridge_remove(void) isa_bridge_pcidev = NULL; /* Unmap the ISA area */ - unmap_kernel_range(ISA_IO_BASE, 0x10000); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000); } /** diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 144858027fa3..ce87dc5ea23c 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,10 +11,10 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code; + struct ppc_inst *addr = (struct ppc_inst *)jump_entry_code(entry); if (type == JUMP_LABEL_JMP) - patch_branch(addr, entry->target, 0); + patch_branch(addr, jump_entry_target(entry), 0); else patch_instruction(addr, ppc_inst(PPC_INST_NOP)); } diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 409080208a6c..7dd2ad3603ad 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -376,7 +376,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) } /* - * This function does PowerPC specific procesing for interfacing to gdb. + * This function does PowerPC specific processing for interfacing to gdb. */ int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index f061e06e9f51..8b2c1a8553a0 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -15,6 +15,7 @@ #include <asm/udbg.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> +#include <asm/early_ioremap.h> #undef DEBUG @@ -34,6 +35,7 @@ static struct legacy_serial_info { unsigned int clock; int irq_check_parent; phys_addr_t taddr; + void __iomem *early_addr; } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; static const struct of_device_id legacy_serial_parents[] __initconst = { @@ -325,17 +327,16 @@ static void __init setup_legacy_serial_console(int console) { struct legacy_serial_info *info = &legacy_serial_infos[console]; struct plat_serial8250_port *port = &legacy_serial_ports[console]; - void __iomem *addr; unsigned int stride; stride = 1 << port->regshift; /* Check if a translated MMIO address has been found */ if (info->taddr) { - addr = ioremap(info->taddr, 0x1000); - if (addr == NULL) + info->early_addr = early_ioremap(info->taddr, 0x1000); + if (info->early_addr == NULL) return; - udbg_uart_init_mmio(addr, stride); + udbg_uart_init_mmio(info->early_addr, stride); } else { /* Check if it's PIO and we support untranslated PIO */ if (port->iotype == UPIO_PORT && isa_io_special) @@ -353,6 +354,30 @@ static void __init setup_legacy_serial_console(int console) udbg_uart_setup(info->speed, info->clock); } +static int __init ioremap_legacy_serial_console(void) +{ + struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console]; + struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console]; + void __iomem *vaddr; + + if (legacy_serial_console < 0) + return 0; + + if (!info->early_addr) + return 0; + + vaddr = ioremap(info->taddr, 0x1000); + if (WARN_ON(!vaddr)) + return -ENOMEM; + + udbg_uart_init_mmio(vaddr, 1 << port->regshift); + early_iounmap(info->early_addr, 0x1000); + info->early_addr = NULL; + + return 0; +} +early_initcall(ioremap_legacy_serial_console); + /* * This is called very early, as part of setup_system() or eventually * setup_arch(), basically before anything else in this file. This function diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 11f0cae086ed..9a3c2a84a2ac 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -40,7 +40,7 @@ static struct irq_work mce_ue_event_irq_work = { .func = machine_check_ue_irq_work, }; -DECLARE_WORK(mce_ue_event_work, machine_process_ue_event); +static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event); static BLOCKING_NOTIFIER_HEAD(mce_notifier_list); @@ -131,6 +131,8 @@ void save_mce_event(struct pt_regs *regs, long handled, * Populate the mce error_type and type-specific error_type. */ mce_set_error_info(mce, mce_err); + if (mce->error_type == MCE_ERROR_TYPE_UE) + mce->u.ue_error.ignore_event = mce_err->ignore_event; if (!addr) return; @@ -159,7 +161,6 @@ void save_mce_event(struct pt_regs *regs, long handled, if (phys_addr != ULONG_MAX) { mce->u.ue_error.physical_address_provided = true; mce->u.ue_error.physical_address = phys_addr; - mce->u.ue_error.ignore_event = mce_err->ignore_event; machine_check_ue_event(mce); } } diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 717e658b90fd..6a076bef2932 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -28,45 +28,6 @@ .text /* - * We store the saved ksp_limit in the unused part - * of the STACK_FRAME_OVERHEAD - */ -_GLOBAL(call_do_softirq) - mflr r0 - stw r0,4(r1) - lwz r10,THREAD+KSP_LIMIT(r2) - stw r3, THREAD+KSP_LIMIT(r2) - stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) - mr r1,r3 - stw r10,8(r1) - bl __do_softirq - lwz r10,8(r1) - lwz r1,0(r1) - lwz r0,4(r1) - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr - -/* - * void call_do_irq(struct pt_regs *regs, void *sp); - */ -_GLOBAL(call_do_irq) - mflr r0 - stw r0,4(r1) - lwz r10,THREAD+KSP_LIMIT(r2) - stw r4, THREAD+KSP_LIMIT(r2) - stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) - mr r1,r4 - stw r10,8(r1) - bl __do_irq - lwz r10,8(r1) - lwz r1,0(r1) - lwz r0,4(r1) - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr - -/* * This returns the high 64 bits of the product of two 64-bit numbers. */ _GLOBAL(mulhdu) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 070465825c21..4b761a18a74d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -27,28 +27,6 @@ .text -_GLOBAL(call_do_softirq) - mflr r0 - std r0,16(r1) - stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) - mr r1,r3 - bl __do_softirq - ld r1,0(r1) - ld r0,16(r1) - mtlr r0 - blr - -_GLOBAL(call_do_irq) - mflr r0 - std r0,16(r1) - stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) - mr r1,r4 - bl __do_irq - ld r1,0(r1) - ld r0,16(r1) - mtlr r0 - blr - _GLOBAL(__bswapdi2) EXPORT_SYMBOL(__bswapdi2) srdi r8,r3,32 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index a211b0253cdb..3f35c8d20be7 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -8,12 +8,14 @@ #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/bug.h> #include <asm/module.h> #include <linux/uaccess.h> #include <asm/firmware.h> #include <linux/sort.h> #include <asm/setup.h> +#include <asm/sections.h> static LIST_HEAD(module_bug_list); @@ -87,13 +89,36 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } -#ifdef MODULES_VADDR +static __always_inline void * +__module_alloc(unsigned long size, unsigned long start, unsigned long end) +{ + /* + * Don't do huge page allocations for modules yet until more testing + * is done. STRICT_MODULE_RWX may require extra work to support this + * too. + */ + return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC, + VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP, + NUMA_NO_NODE, __builtin_return_address(0)); +} + void *module_alloc(unsigned long size) { +#ifdef MODULES_VADDR + unsigned long limit = (unsigned long)_etext - SZ_32M; + void *ptr = NULL; + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, - __builtin_return_address(0)); -} + /* First try within 32M limit from _etext to avoid branch trampolines */ + if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) + ptr = __module_alloc(size, limit, MODULES_END); + + if (!ptr) + ptr = __module_alloc(size, MODULES_VADDR, MODULES_END); + + return ptr; +#else + return __module_alloc(size, VMALLOC_START, VMALLOC_END); #endif +} diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 532f22637783..3c8d9bbb51cf 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -647,6 +647,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, { struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; static unsigned int oops_count = 0; + static struct kmsg_dump_iter iter; static bool panicking = false; static DEFINE_SPINLOCK(lock); unsigned long flags; @@ -681,13 +682,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, return; if (big_oops_buf) { - kmsg_dump_get_buffer(dumper, false, + kmsg_dump_rewind(&iter); + kmsg_dump_get_buffer(&iter, false, big_oops_buf, big_oops_buf_sz, &text_len); rc = zip_oops(text_len); } if (rc != 0) { - kmsg_dump_rewind(dumper); - kmsg_dump_get_buffer(dumper, false, + kmsg_dump_rewind(&iter); + kmsg_dump_get_buffer(&iter, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 7f7cdbeacd1a..cdf87086fa33 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -141,11 +141,21 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) } } +static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) +{ + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_RAW_LIS(reg, IMM_H(val)))); + addr++; + + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_RAW_ORI(reg, reg, IMM_L(val)))); +} + /* * Generate instructions to load provided immediate 64-bit value * to register 'reg' and patch these instructions at 'addr'. */ -static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) +static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr) { /* lis reg,(op)@highest */ patch_instruction((struct ppc_inst *)addr, @@ -177,6 +187,14 @@ static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t * ___PPC_RS(reg) | (val & 0xffff))); } +static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) +{ + if (IS_ENABLED(CONFIG_PPC64)) + patch_imm64_load_insns(val, reg, addr); + else + patch_imm32_load_insns(val, reg, addr); +} + int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { struct ppc_inst branch_op_callback, branch_emulate_step, temp; @@ -230,7 +248,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) * Fixup the template with instructions to: * 1. load the address of the actual probepoint */ - patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX); + patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX); /* * 2. branch to optimized_callback() and emulate_step() @@ -264,7 +282,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) * 3. load instruction to be emulated into relevant register, and */ temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn); - patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX); + patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX); /* * 4. branch back from trampoline diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index ff8ba4d3824d..19ea3312403c 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -9,6 +9,16 @@ #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#ifdef CONFIG_PPC64 +#define SAVE_30GPRS(base) SAVE_10GPRS(2,base); SAVE_10GPRS(12,base); SAVE_10GPRS(22,base) +#define REST_30GPRS(base) REST_10GPRS(2,base); REST_10GPRS(12,base); REST_10GPRS(22,base) +#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop +#else +#define SAVE_30GPRS(base) stmw r2, GPR2(base) +#define REST_30GPRS(base) lmw r2, GPR2(base) +#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop +#endif + #define OPT_SLOT_SIZE 65536 .balign 4 @@ -30,39 +40,41 @@ optinsn_slot: .global optprobe_template_entry optprobe_template_entry: /* Create an in-memory pt_regs */ - stdu r1,-INT_FRAME_SIZE(r1) + PPC_STLU r1,-INT_FRAME_SIZE(r1) SAVE_GPR(0,r1) /* Save the previous SP into stack */ addi r0,r1,INT_FRAME_SIZE - std r0,GPR1(r1) - SAVE_10GPRS(2,r1) - SAVE_10GPRS(12,r1) - SAVE_10GPRS(22,r1) + PPC_STL r0,GPR1(r1) + SAVE_30GPRS(r1) /* Save SPRS */ mfmsr r5 - std r5,_MSR(r1) + PPC_STL r5,_MSR(r1) li r5,0x700 - std r5,_TRAP(r1) + PPC_STL r5,_TRAP(r1) li r5,0 - std r5,ORIG_GPR3(r1) - std r5,RESULT(r1) + PPC_STL r5,ORIG_GPR3(r1) + PPC_STL r5,RESULT(r1) mfctr r5 - std r5,_CTR(r1) + PPC_STL r5,_CTR(r1) mflr r5 - std r5,_LINK(r1) + PPC_STL r5,_LINK(r1) mfspr r5,SPRN_XER - std r5,_XER(r1) + PPC_STL r5,_XER(r1) mfcr r5 - std r5,_CCR(r1) + PPC_STL r5,_CCR(r1) +#ifdef CONFIG_PPC64 lbz r5,PACAIRQSOFTMASK(r13) std r5,SOFTE(r1) +#endif /* * We may get here from a module, so load the kernel TOC in r2. * The original TOC gets restored when pt_regs is restored * further below. */ +#ifdef CONFIG_PPC64 ld r2,PACATOC(r13) +#endif .global optprobe_template_op_address optprobe_template_op_address: @@ -70,11 +82,8 @@ optprobe_template_op_address: * Parameters to optimized_callback(): * 1. optimized_kprobe structure in r3 */ - nop - nop - nop - nop - nop + TEMPLATE_FOR_IMM_LOAD_INSNS + /* 2. pt_regs pointer in r4 */ addi r4,r1,STACK_FRAME_OVERHEAD @@ -92,11 +101,7 @@ optprobe_template_call_handler: .global optprobe_template_insn optprobe_template_insn: /* 2, Pass instruction to be emulated in r4 */ - nop - nop - nop - nop - nop + TEMPLATE_FOR_IMM_LOAD_INSNS .global optprobe_template_call_emulate optprobe_template_call_emulate: @@ -107,20 +112,18 @@ optprobe_template_call_emulate: * All done. * Now, restore the registers... */ - ld r5,_MSR(r1) + PPC_LL r5,_MSR(r1) mtmsr r5 - ld r5,_CTR(r1) + PPC_LL r5,_CTR(r1) mtctr r5 - ld r5,_LINK(r1) + PPC_LL r5,_LINK(r1) mtlr r5 - ld r5,_XER(r1) + PPC_LL r5,_XER(r1) mtxer r5 - ld r5,_CCR(r1) + PPC_LL r5,_CCR(r1) mtcr r5 REST_GPR(0,r1) - REST_10GPRS(2,r1) - REST_10GPRS(12,r1) - REST_10GPRS(22,r1) + REST_30GPRS(r1) /* Restore the previous SP */ addi r1,r1,INT_FRAME_SIZE diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 9312e6eda7ff..3fb7e572abed 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size) addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, paddr, pgprot_noncached(PAGE_KERNEL))) { - unmap_kernel_range(addr, size); + vunmap_range(addr, addr + size); return NULL; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3231c2df9e26..89e34aa273e2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1117,9 +1117,10 @@ void restore_tm_state(struct pt_regs *regs) regs->msr |= msr_diff; } -#else +#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */ #define tm_recheckpoint_new_task(new) #define __switch_to_tm(prev, new) +void tm_reclaim_current(uint8_t cause) {} #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ static inline void save_sprs(struct thread_struct *t) @@ -1255,6 +1256,9 @@ struct task_struct *__switch_to(struct task_struct *prev, */ restore_sprs(old_thread, new_thread); +#ifdef CONFIG_PPC32 + kuap_assert_locked(); +#endif last = _switch(old_thread, new_thread); #ifdef CONFIG_PPC_BOOK3S_64 @@ -1444,11 +1448,9 @@ static void print_msr_bits(unsigned long val) #ifdef CONFIG_PPC64 #define REG "%016lx" #define REGS_PER_LINE 4 -#define LAST_VOLATILE 13 #else #define REG "%08lx" #define REGS_PER_LINE 8 -#define LAST_VOLATILE 12 #endif static void __show_regs(struct pt_regs *regs) @@ -1465,7 +1467,9 @@ static void __show_regs(struct pt_regs *regs) trap = TRAP(regs); if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR)) pr_cont("CFAR: "REG" ", regs->orig_gpr3); - if (trap == 0x200 || trap == 0x300 || trap == 0x600) { + if (trap == INTERRUPT_MACHINE_CHECK || + trap == INTERRUPT_DATA_STORAGE || + trap == INTERRUPT_ALIGNMENT) { if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE)) pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); else @@ -1484,8 +1488,6 @@ static void __show_regs(struct pt_regs *regs) if ((i % REGS_PER_LINE) == 0) pr_cont("\nGPR%02d: ", i); pr_cont(REG " ", regs->gpr[i]); - if (i == LAST_VOLATILE && !FULL_REGS(regs)) - break; } pr_cont("\n"); /* @@ -1688,7 +1690,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, } else { /* user thread */ struct pt_regs *regs = current_pt_regs(); - CHECK_FULL_REGS(regs); *childregs = *regs; if (usp) childregs->gpr[1] = usp; @@ -1724,9 +1725,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; -#ifdef CONFIG_PPC32 - p->thread.ksp_limit = (unsigned long)end_of_stack(p); -#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT for (i = 0; i < nr_wp_slots(); i++) p->thread.ptrace_bps[i] = NULL; @@ -1796,13 +1794,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->ccr = 0; regs->gpr[1] = sp; - /* - * We have just cleared all the nonvolatile GPRs, so make - * FULL_REGS(regs) return true. This is necessary to allow - * ptrace to examine the thread immediately after exec. - */ - SET_FULL_REGS(regs); - #ifdef CONFIG_PPC32 regs->mq = 0; regs->nip = start; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9a4797d1d40d..fbe9deebc8e1 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -65,6 +65,8 @@ #define DBG(fmt...) #endif +int *chip_id_lookup_table; + #ifdef CONFIG_PPC64 int __initdata iommu_is_off; int __initdata iommu_force_on; @@ -267,7 +269,7 @@ static struct feature_property { }; #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) -static inline void identical_pvr_fixup(unsigned long node) +static __init void identical_pvr_fixup(unsigned long node) { unsigned int pvr; const char *model = of_get_flat_dt_prop(node, "model", NULL); @@ -914,13 +916,22 @@ EXPORT_SYMBOL(of_get_ibm_chip_id); int cpu_to_chip_id(int cpu) { struct device_node *np; + int ret = -1, idx; + + idx = cpu / threads_per_core; + if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1) + return chip_id_lookup_table[idx]; np = of_get_cpu_node(cpu, NULL); - if (!np) - return -1; + if (np) { + ret = of_get_ibm_chip_id(np); + of_node_put(np); + + if (chip_id_lookup_table) + chip_id_lookup_table[idx] = ret; + } - of_node_put(np); - return of_get_ibm_chip_id(np); + return ret; } EXPORT_SYMBOL(cpu_to_chip_id); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ccf77b985c8f..41ed7e33d897 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2983,7 +2983,7 @@ static void __init fixup_device_tree_efika_add_phy(void) " 0x3 encode-int encode+" " s\" interrupts\" property" " finish-device"); - }; + } /* Check for a PHY device node - if missing then create one and * give it's phandle to the ethernet node */ diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile index 8ebc11d1168d..77abd1a5a508 100644 --- a/arch/powerpc/kernel/ptrace/Makefile +++ b/arch/powerpc/kernel/ptrace/Makefile @@ -6,11 +6,11 @@ CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y += ptrace.o ptrace-view.o -obj-$(CONFIG_PPC_FPU_REGS) += ptrace-fpu.o +obj-y += ptrace-fpu.o obj-$(CONFIG_COMPAT) += ptrace32.o obj-$(CONFIG_VSX) += ptrace-vsx.o ifneq ($(CONFIG_VSX),y) -obj-$(CONFIG_PPC_FPU_REGS) += ptrace-novsx.o +obj-y += ptrace-novsx.o endif obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o obj-$(CONFIG_SPE) += ptrace-spe.o diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h index 3487f2c9735c..eafe5f0f6289 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-decl.h +++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h @@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data); extern const struct user_regset_view user_ppc_native_view; /* ptrace-fpu */ -#ifdef CONFIG_PPC_FPU_REGS int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data); int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data); -#else -static inline int -ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) -{ - return -EIO; -} - -static inline int -ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) -{ - return -EIO; -} -#endif /* ptrace-(no)adv */ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo); diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c index 8301cb52dd99..5dca19361316 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c +++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c @@ -8,32 +8,42 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) { +#ifdef CONFIG_PPC_FPU_REGS unsigned int fpidx = index - PT_FPR0; +#endif if (index > PT_FPSCR) return -EIO; +#ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); else *data = child->thread.fp_state.fpscr; +#else + *data = 0; +#endif return 0; } int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) { +#ifdef CONFIG_PPC_FPU_REGS unsigned int fpidx = index - PT_FPR0; +#endif if (index > PT_FPSCR) return -EIO; +#ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; +#endif return 0; } diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c index b3b36835658a..7433f3db979a 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-novsx.c +++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c @@ -21,12 +21,16 @@ int fpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { +#ifdef CONFIG_PPC_FPU_REGS BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32])); flush_fp_to_thread(target); return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64)); +#else + return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64)); +#endif } /* @@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { +#ifdef CONFIG_PPC_FPU_REGS BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32])); @@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset, return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fp_state, 0, -1); +#else + return 0; +#endif } diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 2bad8068f598..773bcc4ca843 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -111,7 +111,7 @@ static unsigned long get_user_msr(struct task_struct *task) return task->thread.regs->msr | task->thread.fpexc_mode; } -static int set_user_msr(struct task_struct *task, unsigned long msr) +static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr) { task->thread.regs->msr &= ~MSR_DEBUGCHANGE; task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; @@ -147,7 +147,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr) * We prevent mucking around with the reserved area of trap * which are used internally by the kernel. */ -static int set_user_trap(struct task_struct *task, unsigned long trap) +static __always_inline int set_user_trap(struct task_struct *task, unsigned long trap) { set_trap(task->thread.regs, trap); return 0; @@ -221,17 +221,9 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_PPC64 struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe)); #endif - int i; - if (target->thread.regs == NULL) return -EIO; - if (!FULL_REGS(target->thread.regs)) { - /* We have a partial register set. Fill 14-31 with bogus values */ - for (i = 14; i < 32; i++) - target->thread.regs->gpr[i] = NV_REG_POISON; - } - membuf_write(&to, target->thread.regs, sizeof(struct user_pt_regs)); membuf_store(&to_msr, get_user_msr(target)); @@ -252,8 +244,6 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, if (target->thread.regs == NULL) return -EIO; - CHECK_FULL_REGS(target->thread.regs); - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, target->thread.regs, 0, PT_MSR * sizeof(reg)); @@ -522,13 +512,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(long), .align = sizeof(long), .regset_get = gpr_get, .set = gpr_set }, -#ifdef CONFIG_PPC_FPU_REGS [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .regset_get = fpr_get, .set = fpr_set }, -#endif #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, @@ -661,6 +649,9 @@ int gpr32_set_common(struct task_struct *target, const compat_ulong_t __user *u = ubuf; compat_ulong_t reg; + if (!kbuf && !user_read_access_begin(u, count)) + return -EFAULT; + pos /= sizeof(reg); count /= sizeof(reg); @@ -669,8 +660,7 @@ int gpr32_set_common(struct task_struct *target, regs[pos++] = *k++; else for (; count > 0 && pos < PT_MSR; --count) { - if (__get_user(reg, u++)) - return -EFAULT; + unsafe_get_user(reg, u++, Efault); regs[pos++] = reg; } @@ -678,8 +668,8 @@ int gpr32_set_common(struct task_struct *target, if (count > 0 && pos == PT_MSR) { if (kbuf) reg = *k++; - else if (__get_user(reg, u++)) - return -EFAULT; + else + unsafe_get_user(reg, u++, Efault); set_user_msr(target, reg); ++pos; --count; @@ -692,24 +682,24 @@ int gpr32_set_common(struct task_struct *target, ++k; } else { for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) { - if (__get_user(reg, u++)) - return -EFAULT; + unsafe_get_user(reg, u++, Efault); regs[pos++] = reg; } for (; count > 0 && pos < PT_TRAP; --count, ++pos) - if (__get_user(reg, u++)) - return -EFAULT; + unsafe_get_user(reg, u++, Efault); } if (count > 0 && pos == PT_TRAP) { if (kbuf) reg = *k++; - else if (__get_user(reg, u++)) - return -EFAULT; + else + unsafe_get_user(reg, u++, Efault); set_user_trap(target, reg); ++pos; --count; } + if (!kbuf) + user_read_access_end(); kbuf = k; ubuf = u; @@ -717,25 +707,19 @@ int gpr32_set_common(struct task_struct *target, count *= sizeof(reg); return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, (PT_TRAP + 1) * sizeof(reg), -1); + +Efault: + user_read_access_end(); + return -EFAULT; } static int gpr32_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - int i; - if (target->thread.regs == NULL) return -EIO; - if (!FULL_REGS(target->thread.regs)) { - /* - * We have a partial register set. - * Fill 14-31 with bogus values. - */ - for (i = 14; i < 32; i++) - target->thread.regs->gpr[i] = NV_REG_POISON; - } return gpr32_get_common(target, regset, to, &target->thread.regs->gpr[0]); } @@ -748,7 +732,6 @@ static int gpr32_set(struct task_struct *target, if (target->thread.regs == NULL) return -EIO; - CHECK_FULL_REGS(target->thread.regs); return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, &target->thread.regs->gpr[0]); } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 4f3d4ff3728c..0a0a33eb0d28 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -59,7 +59,6 @@ long arch_ptrace(struct task_struct *child, long request, if ((addr & (sizeof(long) - 1)) || !child->thread.regs) break; - CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) ret = ptrace_get_reg(child, (int) index, &tmp); else @@ -81,7 +80,6 @@ long arch_ptrace(struct task_struct *child, long request, if ((addr & (sizeof(long) - 1)) || !child->thread.regs) break; - CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) ret = ptrace_put_reg(child, index, data); else @@ -354,8 +352,6 @@ void __init pt_regs_check(void) offsetof(struct user_pt_regs, nip)); BUILD_BUG_ON(offsetof(struct pt_regs, msr) != offsetof(struct user_pt_regs, msr)); - BUILD_BUG_ON(offsetof(struct pt_regs, msr) != - offsetof(struct user_pt_regs, msr)); BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != offsetof(struct user_pt_regs, orig_gpr3)); BUILD_BUG_ON(offsetof(struct pt_regs, ctr) != diff --git a/arch/powerpc/kernel/ptrace/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c index d30b9ad70edc..19c224808982 100644 --- a/arch/powerpc/kernel/ptrace/ptrace32.c +++ b/arch/powerpc/kernel/ptrace/ptrace32.c @@ -83,7 +83,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if ((addr & 3) || (index > PT_FPSCR32)) break; - CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_get_reg(child, index, &tmp); if (ret) @@ -133,7 +132,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if ((addr & 3) || numReg > PT_FPSCR) break; - CHECK_FULL_REGS(child->thread.regs); if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ @@ -187,7 +185,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if ((addr & 3) || (index > PT_FPSCR32)) break; - CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { @@ -226,7 +223,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, */ if ((addr & 3) || (numReg > PT_FPSCR)) break; - CHECK_FULL_REGS(child->thread.regs); if (numReg < PT_FPR0) { unsigned long freg; ret = ptrace_get_reg(child, numReg, &freg); diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 2d33f342a293..6857a5b0a1c3 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -755,11 +755,18 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v) return 0; } -#define RMO_READ_BUF_MAX 30 - -/* RTAS Userspace access */ +/** + * ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space. + * + * Base + size description of a range of RTAS-addressable memory set + * aside for user space to use as work area(s) for certain RTAS + * functions. User space accesses this region via /dev/mem. Apart from + * security policies, the kernel does not arbitrate or serialize + * access to this region, and user space must ensure that concurrent + * users do not interfere with each other. + */ static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v) { - seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX); + seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_USER_REGION_SIZE); return 0; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index d126d71ea5bd..6bada744402b 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -828,7 +828,6 @@ void rtas_activate_firmware(void) pr_err("ibm,activate-firmware failed (%i)\n", fwrc); } -static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES /** * rtas_call_reentrant() - Used for reentrant rtas calls @@ -988,10 +987,10 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { static bool in_rmo_buf(u32 base, u32 end) { return base >= rtas_rmo_buf && - base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && + base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) && base <= end && end >= rtas_rmo_buf && - end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); + end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE); } static bool block_rtas_call(int token, int nargs, @@ -1052,6 +1051,14 @@ err: return true; } +static void __init rtas_syscall_filter_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) + rtas_filters[i].token = rtas_token(rtas_filters[i].name); +} + #else static bool block_rtas_call(int token, int nargs, @@ -1060,6 +1067,10 @@ static bool block_rtas_call(int token, int nargs, return false; } +static void __init rtas_syscall_filter_init(void) +{ +} + #endif /* CONFIG_PPC_RTAS_FILTER */ /* We assume to be passed big endian arguments */ @@ -1103,7 +1114,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) return -EINVAL; /* Need to handle ibm,suspend_me call specially */ - if (token == ibm_suspend_me_token) { + if (token == rtas_token("ibm,suspend-me")) { /* * rtas_ibm_suspend_me assumes the streamid handle is in cpu @@ -1163,9 +1174,6 @@ void __init rtas_initialize(void) unsigned long rtas_region = RTAS_INSTANTIATE_MAX; u32 base, size, entry; int no_base, no_size, no_entry; -#ifdef CONFIG_PPC_RTAS_FILTER - int i; -#endif /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. @@ -1191,12 +1199,10 @@ void __init rtas_initialize(void) * the stop-self token if any */ #ifdef CONFIG_PPC64 - if (firmware_has_feature(FW_FEATURE_LPAR)) { + if (firmware_has_feature(FW_FEATURE_LPAR)) rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); - ibm_suspend_me_token = rtas_token("ibm,suspend-me"); - } #endif - rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE, + rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE, 0, rtas_region); if (!rtas_rmo_buf) panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n", @@ -1206,11 +1212,7 @@ void __init rtas_initialize(void) rtas_last_error_token = rtas_token("rtas-last-error"); #endif -#ifdef CONFIG_PPC_RTAS_FILTER - for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { - rtas_filters[i].token = rtas_token(rtas_filters[i].name); - } -#endif + rtas_syscall_filter_init(); } int __init early_init_dt_scan_rtas(unsigned long node, diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index e4e1a94ccf6a..0fdfcdd9d880 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -7,6 +7,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/device.h> +#include <linux/memblock.h> #include <linux/nospec.h> #include <linux/prctl.h> #include <linux/seq_buf.h> @@ -18,6 +19,7 @@ #include <asm/setup.h> #include <asm/inst.h> +#include "setup.h" u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; @@ -250,7 +252,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c static enum stf_barrier_type stf_enabled_flush_types; static bool no_stf_barrier; -bool stf_barrier; +static bool stf_barrier; static int __init handle_no_stf_barrier(char *p) { @@ -541,6 +543,178 @@ void setup_count_cache_flush(void) toggle_branch_cache_flush(enable); } +static enum l1d_flush_type enabled_flush_types; +static void *l1d_flush_fallback_area; +static bool no_rfi_flush; +static bool no_entry_flush; +static bool no_uaccess_flush; +bool rfi_flush; +static bool entry_flush; +static bool uaccess_flush; +DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); +EXPORT_SYMBOL(uaccess_flush_key); + +static int __init handle_no_rfi_flush(char *p) +{ + pr_info("rfi-flush: disabled on command line."); + no_rfi_flush = true; + return 0; +} +early_param("no_rfi_flush", handle_no_rfi_flush); + +static int __init handle_no_entry_flush(char *p) +{ + pr_info("entry-flush: disabled on command line."); + no_entry_flush = true; + return 0; +} +early_param("no_entry_flush", handle_no_entry_flush); + +static int __init handle_no_uaccess_flush(char *p) +{ + pr_info("uaccess-flush: disabled on command line."); + no_uaccess_flush = true; + return 0; +} +early_param("no_uaccess_flush", handle_no_uaccess_flush); + +/* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. + */ +static int __init handle_no_pti(char *p) +{ + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); + handle_no_rfi_flush(NULL); + return 0; +} +early_param("nopti", handle_no_pti); + +static void do_nothing(void *unused) +{ + /* + * We don't need to do the flush explicitly, just enter+exit kernel is + * sufficient, the RFI exit handlers will do the right thing. + */ +} + +void rfi_flush_enable(bool enable) +{ + if (enable) { + do_rfi_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else + do_rfi_flush_fixups(L1D_FLUSH_NONE); + + rfi_flush = enable; +} + +static void entry_flush_enable(bool enable) +{ + if (enable) { + do_entry_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else { + do_entry_flush_fixups(L1D_FLUSH_NONE); + } + + entry_flush = enable; +} + +static void uaccess_flush_enable(bool enable) +{ + if (enable) { + do_uaccess_flush_fixups(enabled_flush_types); + static_branch_enable(&uaccess_flush_key); + on_each_cpu(do_nothing, NULL, 1); + } else { + static_branch_disable(&uaccess_flush_key); + do_uaccess_flush_fixups(L1D_FLUSH_NONE); + } + + uaccess_flush = enable; +} + +static void __ref init_fallback_flush(void) +{ + u64 l1d_size, limit; + int cpu; + + /* Only allocate the fallback flush area once (at boot time). */ + if (l1d_flush_fallback_area) + return; + + l1d_size = ppc64_caches.l1d.size; + + /* + * If there is no d-cache-size property in the device tree, l1d_size + * could be zero. That leads to the loop in the asm wrapping around to + * 2^64-1, and then walking off the end of the fallback area and + * eventually causing a page fault which is fatal. Just default to + * something vaguely sane. + */ + if (!l1d_size) + l1d_size = (64 * 1024); + + limit = min(ppc64_bolted_size(), ppc64_rma_size); + + /* + * Align to L1d size, and size it at 2x L1d size, to catch possible + * hardware prefetch runoff. We don't have a recipe for load patterns to + * reliably avoid the prefetcher. + */ + l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, + l1d_size, MEMBLOCK_LOW_LIMIT, + limit, NUMA_NO_NODE); + if (!l1d_flush_fallback_area) + panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", + __func__, l1d_size * 2, l1d_size, &limit); + + + for_each_possible_cpu(cpu) { + struct paca_struct *paca = paca_ptrs[cpu]; + paca->rfi_flush_fallback_area = l1d_flush_fallback_area; + paca->l1d_flush_size = l1d_size; + } +} + +void setup_rfi_flush(enum l1d_flush_type types, bool enable) +{ + if (types & L1D_FLUSH_FALLBACK) { + pr_info("rfi-flush: fallback displacement flush available\n"); + init_fallback_flush(); + } + + if (types & L1D_FLUSH_ORI) + pr_info("rfi-flush: ori type flush available\n"); + + if (types & L1D_FLUSH_MTTRIG) + pr_info("rfi-flush: mttrig type flush available\n"); + + enabled_flush_types = types; + + if (!cpu_mitigations_off() && !no_rfi_flush) + rfi_flush_enable(enable); +} + +void setup_entry_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_entry_flush) + entry_flush_enable(enable); +} + +void setup_uaccess_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_uaccess_flush) + uaccess_flush_enable(enable); +} + #ifdef CONFIG_DEBUG_FS static int count_cache_flush_set(void *data, u64 val) { @@ -579,5 +753,92 @@ static __init int count_cache_flush_debugfs_init(void) return 0; } device_initcall(count_cache_flush_debugfs_init); + +static int rfi_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != rfi_flush) + rfi_flush_enable(enable); + + return 0; +} + +static int rfi_flush_get(void *data, u64 *val) +{ + *val = rfi_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + +static int entry_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != entry_flush) + entry_flush_enable(enable); + + return 0; +} + +static int entry_flush_get(void *data, u64 *val) +{ + *val = entry_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); + +static int uaccess_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != uaccess_flush) + uaccess_flush_enable(enable); + + return 0; +} + +static int uaccess_flush_get(void *data, u64 *val) +{ + *val = uaccess_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); + +static __init int rfi_flush_debugfs_init(void) +{ + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); + debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); + return 0; +} +device_initcall(rfi_flush_debugfs_init); #endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bee984b1887b..74a98fff2c2f 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -69,7 +69,6 @@ #include "setup.h" #ifdef DEBUG -#include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) @@ -829,7 +828,7 @@ static __init void print_system_info(void) } #ifdef CONFIG_SMP -static void smp_setup_pacas(void) +static void __init smp_setup_pacas(void) { int cpu; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 8ba49a6bf515..d7c1f92152af 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -164,7 +164,7 @@ void __init irqstack_early_init(void) } #ifdef CONFIG_VMAP_STACK -void *emergency_ctx[NR_CPUS] __ro_after_init; +void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack}; void __init emergency_stack_init(void) { diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 560ed8b975e7..b779d25761cf 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void) * If we are not in hypervisor mode the job is done once for * the whole partition in configure_exceptions(). */ - if (cpu_has_feature(CPU_FTR_HVMODE) && - cpu_has_feature(CPU_FTR_ARCH_207S)) { + if (cpu_has_feature(CPU_FTR_HVMODE)) { unsigned long lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); + unsigned long new_lpcr = lpcr; + + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + /* P10 DD1 does not have HAIL */ + if (pvr_version_is(PVR_POWER10) && + (mfspr(SPRN_PVR) & 0xf00) == 0x100) + new_lpcr |= LPCR_AIL_3; + else + new_lpcr |= LPCR_HAIL; + } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + new_lpcr |= LPCR_AIL_3; + } + + if (new_lpcr != lpcr) + mtspr(SPRN_LPCR, new_lpcr); } /* @@ -941,266 +954,3 @@ static int __init disable_hardlockup_detector(void) return 0; } early_initcall(disable_hardlockup_detector); - -#ifdef CONFIG_PPC_BOOK3S_64 -static enum l1d_flush_type enabled_flush_types; -static void *l1d_flush_fallback_area; -static bool no_rfi_flush; -static bool no_entry_flush; -static bool no_uaccess_flush; -bool rfi_flush; -bool entry_flush; -bool uaccess_flush; -DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); -EXPORT_SYMBOL(uaccess_flush_key); - -static int __init handle_no_rfi_flush(char *p) -{ - pr_info("rfi-flush: disabled on command line."); - no_rfi_flush = true; - return 0; -} -early_param("no_rfi_flush", handle_no_rfi_flush); - -static int __init handle_no_entry_flush(char *p) -{ - pr_info("entry-flush: disabled on command line."); - no_entry_flush = true; - return 0; -} -early_param("no_entry_flush", handle_no_entry_flush); - -static int __init handle_no_uaccess_flush(char *p) -{ - pr_info("uaccess-flush: disabled on command line."); - no_uaccess_flush = true; - return 0; -} -early_param("no_uaccess_flush", handle_no_uaccess_flush); - -/* - * The RFI flush is not KPTI, but because users will see doco that says to use - * nopti we hijack that option here to also disable the RFI flush. - */ -static int __init handle_no_pti(char *p) -{ - pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); - handle_no_rfi_flush(NULL); - return 0; -} -early_param("nopti", handle_no_pti); - -static void do_nothing(void *unused) -{ - /* - * We don't need to do the flush explicitly, just enter+exit kernel is - * sufficient, the RFI exit handlers will do the right thing. - */ -} - -void rfi_flush_enable(bool enable) -{ - if (enable) { - do_rfi_flush_fixups(enabled_flush_types); - on_each_cpu(do_nothing, NULL, 1); - } else - do_rfi_flush_fixups(L1D_FLUSH_NONE); - - rfi_flush = enable; -} - -static void entry_flush_enable(bool enable) -{ - if (enable) { - do_entry_flush_fixups(enabled_flush_types); - on_each_cpu(do_nothing, NULL, 1); - } else { - do_entry_flush_fixups(L1D_FLUSH_NONE); - } - - entry_flush = enable; -} - -static void uaccess_flush_enable(bool enable) -{ - if (enable) { - do_uaccess_flush_fixups(enabled_flush_types); - static_branch_enable(&uaccess_flush_key); - on_each_cpu(do_nothing, NULL, 1); - } else { - static_branch_disable(&uaccess_flush_key); - do_uaccess_flush_fixups(L1D_FLUSH_NONE); - } - - uaccess_flush = enable; -} - -static void __ref init_fallback_flush(void) -{ - u64 l1d_size, limit; - int cpu; - - /* Only allocate the fallback flush area once (at boot time). */ - if (l1d_flush_fallback_area) - return; - - l1d_size = ppc64_caches.l1d.size; - - /* - * If there is no d-cache-size property in the device tree, l1d_size - * could be zero. That leads to the loop in the asm wrapping around to - * 2^64-1, and then walking off the end of the fallback area and - * eventually causing a page fault which is fatal. Just default to - * something vaguely sane. - */ - if (!l1d_size) - l1d_size = (64 * 1024); - - limit = min(ppc64_bolted_size(), ppc64_rma_size); - - /* - * Align to L1d size, and size it at 2x L1d size, to catch possible - * hardware prefetch runoff. We don't have a recipe for load patterns to - * reliably avoid the prefetcher. - */ - l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, - l1d_size, MEMBLOCK_LOW_LIMIT, - limit, NUMA_NO_NODE); - if (!l1d_flush_fallback_area) - panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", - __func__, l1d_size * 2, l1d_size, &limit); - - - for_each_possible_cpu(cpu) { - struct paca_struct *paca = paca_ptrs[cpu]; - paca->rfi_flush_fallback_area = l1d_flush_fallback_area; - paca->l1d_flush_size = l1d_size; - } -} - -void setup_rfi_flush(enum l1d_flush_type types, bool enable) -{ - if (types & L1D_FLUSH_FALLBACK) { - pr_info("rfi-flush: fallback displacement flush available\n"); - init_fallback_flush(); - } - - if (types & L1D_FLUSH_ORI) - pr_info("rfi-flush: ori type flush available\n"); - - if (types & L1D_FLUSH_MTTRIG) - pr_info("rfi-flush: mttrig type flush available\n"); - - enabled_flush_types = types; - - if (!cpu_mitigations_off() && !no_rfi_flush) - rfi_flush_enable(enable); -} - -void setup_entry_flush(bool enable) -{ - if (cpu_mitigations_off()) - return; - - if (!no_entry_flush) - entry_flush_enable(enable); -} - -void setup_uaccess_flush(bool enable) -{ - if (cpu_mitigations_off()) - return; - - if (!no_uaccess_flush) - uaccess_flush_enable(enable); -} - -#ifdef CONFIG_DEBUG_FS -static int rfi_flush_set(void *data, u64 val) -{ - bool enable; - - if (val == 1) - enable = true; - else if (val == 0) - enable = false; - else - return -EINVAL; - - /* Only do anything if we're changing state */ - if (enable != rfi_flush) - rfi_flush_enable(enable); - - return 0; -} - -static int rfi_flush_get(void *data, u64 *val) -{ - *val = rfi_flush ? 1 : 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); - -static int entry_flush_set(void *data, u64 val) -{ - bool enable; - - if (val == 1) - enable = true; - else if (val == 0) - enable = false; - else - return -EINVAL; - - /* Only do anything if we're changing state */ - if (enable != entry_flush) - entry_flush_enable(enable); - - return 0; -} - -static int entry_flush_get(void *data, u64 *val) -{ - *val = entry_flush ? 1 : 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); - -static int uaccess_flush_set(void *data, u64 val) -{ - bool enable; - - if (val == 1) - enable = true; - else if (val == 0) - enable = false; - else - return -EINVAL; - - /* Only do anything if we're changing state */ - if (enable != uaccess_flush) - uaccess_flush_enable(enable); - - return 0; -} - -static int uaccess_flush_get(void *data, u64 *val) -{ - *val = uaccess_flush ? 1 : 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); - -static __init int rfi_flush_debugfs_init(void) -{ - debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); - debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); - debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); - return 0; -} -device_initcall(rfi_flush_debugfs_init); -#endif -#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 2559a681536e..f4aafa337c2e 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -19,6 +19,15 @@ extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset, extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, struct task_struct *tsk); +static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src) +{ + BUILD_BUG_ON(sizeof(sigset_t) != sizeof(u64)); + + return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]); +} +#define unsafe_get_user_sigset(dst, src, label) \ + unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label) + #ifdef CONFIG_VSX extern unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task); @@ -53,6 +62,26 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from); &buf[i], label);\ } while (0) +#define unsafe_copy_fpr_from_user(task, from, label) do { \ + struct task_struct *__t = task; \ + u64 __user *buf = (u64 __user *)from; \ + int i; \ + \ + for (i = 0; i < ELF_NFPREG - 1; i++) \ + unsafe_get_user(__t->thread.TS_FPR(i), &buf[i], label); \ + unsafe_get_user(__t->thread.fp_state.fpscr, &buf[i], label); \ +} while (0) + +#define unsafe_copy_vsx_from_user(task, from, label) do { \ + struct task_struct *__t = task; \ + u64 __user *buf = (u64 __user *)from; \ + int i; \ + \ + for (i = 0; i < ELF_NVSRHALFREG ; i++) \ + unsafe_get_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \ + &buf[i], label); \ +} while (0) + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM #define unsafe_copy_ckfpr_to_user(to, task, label) do { \ struct task_struct *__t = task; \ @@ -73,6 +102,26 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from); unsafe_put_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \ &buf[i], label);\ } while (0) + +#define unsafe_copy_ckfpr_from_user(task, from, label) do { \ + struct task_struct *__t = task; \ + u64 __user *buf = (u64 __user *)from; \ + int i; \ + \ + for (i = 0; i < ELF_NFPREG - 1 ; i++) \ + unsafe_get_user(__t->thread.TS_CKFPR(i), &buf[i], label);\ + unsafe_get_user(__t->thread.ckfp_state.fpscr, &buf[i], failed); \ +} while (0) + +#define unsafe_copy_ckvsx_from_user(task, from, label) do { \ + struct task_struct *__t = task; \ + u64 __user *buf = (u64 __user *)from; \ + int i; \ + \ + for (i = 0; i < ELF_NVSRHALFREG ; i++) \ + unsafe_get_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \ + &buf[i], label); \ +} while (0) #endif #elif defined(CONFIG_PPC_FPU_REGS) @@ -80,6 +129,10 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from); unsafe_copy_to_user(to, (task)->thread.fp_state.fpr, \ ELF_NFPREG * sizeof(double), label) +#define unsafe_copy_fpr_from_user(task, from, label) \ + unsafe_copy_from_user((task)->thread.fp_state.fpr, from, \ + ELF_NFPREG * sizeof(double), label) + static inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { @@ -115,6 +168,8 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from) #else #define unsafe_copy_fpr_to_user(to, task, label) do { } while (0) +#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0) + static inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 75ee918a120a..8f05ed0da292 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -83,24 +83,17 @@ * implementation that makes things simple for little endian only) */ #define unsafe_put_sigset_t unsafe_put_compat_sigset - -static inline int get_sigset_t(sigset_t *set, - const compat_sigset_t __user *uset) -{ - return get_compat_sigset(set, uset); -} +#define unsafe_get_sigset_t unsafe_get_compat_sigset #define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static __always_inline int -save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame) +__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int val, i; - WARN_ON(!FULL_REGS(regs)); - for (i = 0; i <= PT_RESULT; i ++) { /* Force usr to alway see softe as 1 (interrupts enabled) */ if (i == PT_SOFTE) @@ -116,8 +109,8 @@ failed: return 1; } -static inline int restore_general_regs(struct pt_regs *regs, - struct mcontext __user *sr) +static __always_inline int +__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; @@ -125,10 +118,12 @@ static inline int restore_general_regs(struct pt_regs *regs, for (i = 0; i <= PT_RESULT; i++) { if ((i == PT_MSR) || (i == PT_SOFTE)) continue; - if (__get_user(gregs[i], &sr->mc_gregs[i])) - return -EFAULT; + unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed); } return 0; + +failed: + return 1; } #else /* CONFIG_PPC64 */ @@ -142,18 +137,14 @@ static inline int restore_general_regs(struct pt_regs *regs, unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \ } while (0) -static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) -{ - return copy_from_user(set, uset, sizeof(*uset)); -} +#define unsafe_get_sigset_t unsafe_get_user_sigset #define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p)) static __always_inline int -save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame) +__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { - WARN_ON(!FULL_REGS(regs)); unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed); return 0; @@ -161,23 +152,30 @@ failed: return 1; } -static inline int restore_general_regs(struct pt_regs *regs, - struct mcontext __user *sr) +static __always_inline +int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { /* copy up to but not including MSR */ - if (__copy_from_user(regs, &sr->mc_gregs, - PT_MSR * sizeof(elf_greg_t))) - return -EFAULT; + unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed); + /* copy from orig_r3 (the word after the MSR) up to the end */ - if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], - GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) - return -EFAULT; + unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], + GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed); + return 0; + +failed: + return 1; } #endif #define unsafe_save_general_regs(regs, frame, label) do { \ - if (save_general_regs_unsafe(regs, frame)) \ + if (__unsafe_save_general_regs(regs, frame)) \ + goto label; \ +} while (0) + +#define unsafe_restore_general_regs(regs, frame, label) do { \ + if (__unsafe_restore_general_regs(regs, frame)) \ goto label; \ } while (0) @@ -260,8 +258,8 @@ static void prepare_save_user_regs(int ctx_has_vsx_region) #endif } -static int save_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, - struct mcontext __user *tm_frame, int ctx_has_vsx_region) +static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, + struct mcontext __user *tm_frame, int ctx_has_vsx_region) { unsigned long msr = regs->msr; @@ -338,7 +336,7 @@ failed: } #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \ - if (save_user_regs_unsafe(regs, frame, tm_frame, has_vsx)) \ + if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \ goto label; \ } while (0) @@ -350,7 +348,7 @@ failed: * We also save the transactional registers to a second ucontext in the * frame. * - * See save_user_regs_unsafe() and signal_64.c:setup_tm_sigcontexts(). + * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts(). */ static void prepare_save_tm_user_regs(void) { @@ -441,7 +439,7 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is - * simply the same as in save_user_regs_unsafe(). + * simply the same as in __unsafe_save_user_regs(). */ if (current->thread.used_spe) { unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr, @@ -485,26 +483,25 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user static long restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) { - long err; unsigned int save_r2 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif + if (!user_read_access_begin(sr, sizeof(*sr))) + return 1; /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal */ if (!sig) save_r2 = (unsigned int)regs->gpr[2]; - err = restore_general_regs(regs, sr); + unsafe_restore_general_regs(regs, sr, failed); set_trap_norestart(regs); - err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); + unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); if (!sig) regs->gpr[2] = (unsigned long) save_r2; - if (err) - return 1; /* if doing signal return, restore the previous little-endian mode */ if (sig) @@ -518,22 +515,19 @@ static long restore_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, - sizeof(sr->mc_vregs))) - return 1; + unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, + sizeof(sr->mc_vregs), failed); current->thread.used_vr = true; } else if (current->thread.used_vr) memset(¤t->thread.vr_state, 0, ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ - if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) - return 1; + unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed); if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ - if (copy_fpr_from_user(current, &sr->mc_fregs)) - return 1; + unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); #ifdef CONFIG_VSX /* @@ -546,8 +540,7 @@ static long restore_user_regs(struct pt_regs *regs, * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ - if (copy_vsx_from_user(current, &sr->mc_vsregs)) - return 1; + unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed); current->thread.used_vsr = true; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) @@ -565,19 +558,22 @@ static long restore_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { /* restore spe registers from the stack */ - if (__copy_from_user(current->thread.evr, &sr->mc_vregs, - ELF_NEVRREG * sizeof(u32))) - return 1; + unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs, + ELF_NEVRREG * sizeof(u32), failed); current->thread.used_spe = true; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ - if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) - return 1; + unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed); #endif /* CONFIG_SPE */ + user_read_access_end(); return 0; + +failed: + user_read_access_end(); + return 1; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -590,7 +586,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr, struct mcontext __user *tm_sr) { - long err; unsigned long msr, msr_hi; #ifdef CONFIG_VSX int i; @@ -605,15 +600,13 @@ static long restore_tm_user_regs(struct pt_regs *regs, * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR * were set by the signal delivery. */ - err = restore_general_regs(regs, tm_sr); - err |= restore_general_regs(¤t->thread.ckpt_regs, sr); - - err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); - - err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); - if (err) + if (!user_read_access_begin(sr, sizeof(*sr))) return 1; + unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed); + unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed); + unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); + /* Restore the previous little-endian mode */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); @@ -621,12 +614,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs, - sizeof(sr->mc_vregs)) || - __copy_from_user(¤t->thread.vr_state, - &tm_sr->mc_vregs, - sizeof(sr->mc_vregs))) - return 1; + unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs, + sizeof(sr->mc_vregs), failed); current->thread.used_vr = true; } else if (current->thread.used_vr) { memset(¤t->thread.vr_state, 0, @@ -636,20 +625,15 @@ static long restore_tm_user_regs(struct pt_regs *regs, } /* Always get VRSAVE back */ - if (__get_user(current->thread.ckvrsave, - (u32 __user *)&sr->mc_vregs[32]) || - __get_user(current->thread.vrsave, - (u32 __user *)&tm_sr->mc_vregs[32])) - return 1; + unsafe_get_user(current->thread.ckvrsave, + (u32 __user *)&sr->mc_vregs[32], failed); if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.ckvrsave); #endif /* CONFIG_ALTIVEC */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); - if (copy_fpr_from_user(current, &sr->mc_fregs) || - copy_ckfpr_from_user(current, &tm_sr->mc_fregs)) - return 1; + unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); #ifdef CONFIG_VSX regs->msr &= ~MSR_VSX; @@ -658,9 +642,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ - if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) || - copy_ckvsx_from_user(current, &sr->mc_vsregs)) - return 1; + unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed); current->thread.used_vsr = true; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) { @@ -675,23 +657,54 @@ static long restore_tm_user_regs(struct pt_regs *regs, */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { - if (__copy_from_user(current->thread.evr, &sr->mc_vregs, - ELF_NEVRREG * sizeof(u32))) - return 1; + unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs, + ELF_NEVRREG * sizeof(u32), failed); current->thread.used_spe = true; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ - if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs - + ELF_NEVRREG)) - return 1; + unsafe_get_user(current->thread.spefscr, + (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed); #endif /* CONFIG_SPE */ - /* Get the top half of the MSR from the user context */ - if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) + user_read_access_end(); + + if (!user_read_access_begin(tm_sr, sizeof(*tm_sr))) return 1; + + unsafe_restore_general_regs(regs, tm_sr, failed); + +#ifdef CONFIG_ALTIVEC + /* restore altivec registers from the stack */ + if (msr & MSR_VEC) + unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs, + sizeof(sr->mc_vregs), failed); + + /* Always get VRSAVE back */ + unsafe_get_user(current->thread.vrsave, + (u32 __user *)&tm_sr->mc_vregs[32], failed); +#endif /* CONFIG_ALTIVEC */ + + unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed); + +#ifdef CONFIG_VSX + if (msr & MSR_VSX) { + /* + * Restore altivec registers from the stack to a local + * buffer, then write this out to the thread_struct + */ + unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed); + current->thread.used_vsr = true; + } +#endif /* CONFIG_VSX */ + + /* Get the top half of the MSR from the user context */ + unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed); msr_hi <<= 32; + + user_read_access_end(); + /* If TM bits are set to the reserved value, it's an invalid context */ if (MSR_TM_RESV(msr_hi)) return 1; @@ -739,6 +752,16 @@ static long restore_tm_user_regs(struct pt_regs *regs, preempt_enable(); return 0; + +failed: + user_read_access_end(); + return 1; +} +#else +static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr, + struct mcontext __user *tm_sr) +{ + return 0; } #endif @@ -775,7 +798,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, else prepare_save_user_regs(1); - if (!user_write_access_begin(frame, sizeof(*frame))) + if (!user_access_begin(frame, sizeof(*frame))) goto badframe; /* Put the siginfo & fill in most of the ucontext */ @@ -809,17 +832,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0], failed); unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed); + asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0])); } unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed); - user_write_access_end(); + user_access_end(); if (copy_siginfo_to_user(&frame->info, &ksig->info)) goto badframe; - if (tramp == (unsigned long)mctx->mc_pad) - flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long)); - regs->link = tramp; #ifdef CONFIG_PPC_FPU_REGS @@ -844,7 +865,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, return 0; failed: - user_write_access_end(); + user_access_end(); badframe: signal_fault(tsk, regs, "handle_rt_signal32", frame); @@ -879,7 +900,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, else prepare_save_user_regs(1); - if (!user_write_access_begin(frame, sizeof(*frame))) + if (!user_access_begin(frame, sizeof(*frame))) goto badframe; sc = (struct sigcontext __user *) &frame->sctx; @@ -908,11 +929,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, /* Set up the sigreturn trampoline: li r0,sigret; sc */ unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed); unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed); + asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0])); } - user_write_access_end(); - - if (tramp == (unsigned long)mctx->mc_pad) - flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long)); + user_access_end(); regs->link = tramp; @@ -935,7 +954,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, return 0; failed: - user_write_access_end(); + user_access_end(); badframe: signal_fault(tsk, regs, "handle_signal32", frame); @@ -948,28 +967,31 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sigset_t set; struct mcontext __user *mcp; - if (get_sigset_t(&set, &ucp->uc_sigmask)) + if (!user_read_access_begin(ucp, sizeof(*ucp))) return -EFAULT; + + unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed); #ifdef CONFIG_PPC64 { u32 cmcp; - if (__get_user(cmcp, &ucp->uc_regs)) - return -EFAULT; + unsafe_get_user(cmcp, &ucp->uc_regs, failed); mcp = (struct mcontext __user *)(u64)cmcp; - /* no need to check access_ok(mcp), since mcp < 4GB */ } #else - if (__get_user(mcp, &ucp->uc_regs)) - return -EFAULT; - if (!access_ok(mcp, sizeof(*mcp))) - return -EFAULT; + unsafe_get_user(mcp, &ucp->uc_regs, failed); #endif + user_read_access_end(); + set_current_blocked(&set); if (restore_user_regs(regs, mcp, sig)) return -EFAULT; return 0; + +failed: + user_read_access_end(); + return -EFAULT; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -983,11 +1005,15 @@ static int do_setcontext_tm(struct ucontext __user *ucp, u32 cmcp; u32 tm_cmcp; - if (get_sigset_t(&set, &ucp->uc_sigmask)) + if (!user_read_access_begin(ucp, sizeof(*ucp))) return -EFAULT; - if (__get_user(cmcp, &ucp->uc_regs) || - __get_user(tm_cmcp, &tm_ucp->uc_regs)) + unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed); + unsafe_get_user(cmcp, &ucp->uc_regs, failed); + + user_read_access_end(); + + if (__get_user(tm_cmcp, &tm_ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; tm_mcp = (struct mcontext __user *)(u64)tm_cmcp; @@ -998,6 +1024,10 @@ static int do_setcontext_tm(struct ucontext __user *ucp, return -EFAULT; return 0; + +failed: + user_read_access_end(); + return -EFAULT; } #endif @@ -1315,19 +1345,16 @@ SYSCALL_DEFINE0(sigreturn) struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; - void __user *addr; sigset_t set; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - struct mcontext __user *mcp, *tm_mcp; - unsigned long msr_hi; -#endif + struct mcontext __user *mcp; + struct mcontext __user *tm_mcp = NULL; + unsigned long long msr_hi = 0; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); sc = &sf->sctx; - addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; @@ -1343,31 +1370,32 @@ SYSCALL_DEFINE0(sigreturn) #endif set_current_blocked(&set); -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM mcp = (struct mcontext __user *)&sf->mctx; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mcp = (struct mcontext __user *)&sf->mctx_transact; if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR])) goto badframe; +#endif if (MSR_TM_ACTIVE(msr_hi<<32)) { if (!cpu_has_feature(CPU_FTR_TM)) goto badframe; if (restore_tm_user_regs(regs, mcp, tm_mcp)) goto badframe; - } else -#endif - { + } else { sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); - addr = sr; - if (!access_ok(sr, sizeof(*sr)) - || restore_user_regs(regs, sr, 1)) - goto badframe; + if (restore_user_regs(regs, sr, 1)) { + signal_fault(current, regs, "sys_sigreturn", sr); + + force_sig(SIGSEGV); + return 0; + } } set_thread_flag(TIF_RESTOREALL); return 0; badframe: - signal_fault(current, regs, "sys_sigreturn", addr); + signal_fault(current, regs, "sys_sigreturn", sc); force_sig(SIGSEGV); return 0; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index f9e4a1ac440f..dca66481d0c2 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -79,13 +79,36 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc) } #endif +static void prepare_setup_sigcontext(struct task_struct *tsk) +{ +#ifdef CONFIG_ALTIVEC + /* save altivec registers */ + if (tsk->thread.used_vr) + flush_altivec_to_thread(tsk); + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + tsk->thread.vrsave = mfspr(SPRN_VRSAVE); +#endif /* CONFIG_ALTIVEC */ + + flush_fp_to_thread(tsk); + +#ifdef CONFIG_VSX + if (tsk->thread.used_vsr) + flush_vsx_to_thread(tsk); +#endif /* CONFIG_VSX */ +} + /* * Set up the sigcontext for the signal frame. */ -static long setup_sigcontext(struct sigcontext __user *sc, - struct task_struct *tsk, int signr, sigset_t *set, - unsigned long handler, int ctx_has_vsx_region) +#define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\ +do { \ + if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\ + goto label; \ +} while (0) +static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc, + struct task_struct *tsk, int signr, sigset_t *set, + unsigned long handler, int ctx_has_vsx_region) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -97,25 +120,22 @@ static long setup_sigcontext(struct sigcontext __user *sc, */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); - unsigned long vrsave; #endif struct pt_regs *regs = tsk->thread.regs; unsigned long msr = regs->msr; - long err = 0; /* Force usr to alway see softe as 1 (interrupts enabled) */ unsigned long softe = 0x1; BUG_ON(tsk != current); #ifdef CONFIG_ALTIVEC - err |= __put_user(v_regs, &sc->v_regs); + unsafe_put_user(v_regs, &sc->v_regs, efault_out); /* save altivec registers */ if (tsk->thread.used_vr) { - flush_altivec_to_thread(tsk); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, &tsk->thread.vr_state, - 33 * sizeof(vector128)); + unsafe_copy_to_user(v_regs, &tsk->thread.vr_state, + 33 * sizeof(vector128), efault_out); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ @@ -124,19 +144,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ - vrsave = 0; - if (cpu_has_feature(CPU_FTR_ALTIVEC)) { - vrsave = mfspr(SPRN_VRSAVE); - tsk->thread.vrsave = vrsave; - } - - err |= __put_user(vrsave, (u32 __user *)&v_regs[33]); + unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); #else /* CONFIG_ALTIVEC */ - err |= __put_user(0, &sc->v_regs); + unsafe_put_user(0, &sc->v_regs, efault_out); #endif /* CONFIG_ALTIVEC */ - flush_fp_to_thread(tsk); /* copy fpr regs and fpscr */ - err |= copy_fpr_to_user(&sc->fp_regs, tsk); + unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out); /* * Clear the MSR VSX bit to indicate there is no valid state attached @@ -150,26 +163,27 @@ static long setup_sigcontext(struct sigcontext __user *sc, * VMX data. */ if (tsk->thread.used_vsr && ctx_has_vsx_region) { - flush_vsx_to_thread(tsk); v_regs += ELF_NVRREG; - err |= copy_vsx_to_user(v_regs, tsk); + unsafe_copy_vsx_to_user(v_regs, tsk, efault_out); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ - err |= __put_user(&sc->gp_regs, &sc->regs); - WARN_ON(!FULL_REGS(regs)); - err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); - err |= __put_user(msr, &sc->gp_regs[PT_MSR]); - err |= __put_user(softe, &sc->gp_regs[PT_SOFTE]); - err |= __put_user(signr, &sc->signal); - err |= __put_user(handler, &sc->handler); + unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out); + unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out); + unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); + unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out); + unsafe_put_user(signr, &sc->signal, efault_out); + unsafe_put_user(handler, &sc->handler, efault_out); if (set != NULL) - err |= __put_user(set->sig[0], &sc->oldmask); + unsafe_put_user(set->sig[0], &sc->oldmask, efault_out); - return err; + return 0; + +efault_out: + return -EFAULT; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -294,7 +308,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, err |= __put_user(&sc->gp_regs, &sc->regs); err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); - WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->gp_regs, &tsk->thread.ckpt_regs, GP_REGS_SIZE); @@ -312,14 +325,16 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, /* * Restore the sigcontext from the signal frame. */ - -static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, - struct sigcontext __user *sc) +#define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \ + if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \ + goto label; \ +} while (0) +static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set, + int sig, struct sigcontext __user *sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs; #endif - unsigned long err = 0; unsigned long save_r13 = 0; unsigned long msr; struct pt_regs *regs = tsk->thread.regs; @@ -334,27 +349,27 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, save_r13 = regs->gpr[13]; /* copy the GPRs */ - err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); - err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); + unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out); + unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out); /* get MSR separately, transfer the LE bit if doing signal return */ - err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out); if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); - err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); - err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); - err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); - err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); - err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); + unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out); + unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out); + unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out); + unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out); + unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out); /* Don't allow userspace to set SOFTE */ set_trap_norestart(regs); - err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); - err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); - err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); + unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out); + unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out); + unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out); if (!sig) regs->gpr[13] = save_r13; if (set != NULL) - err |= __get_user(set->sig[0], &sc->oldmask); + unsafe_get_user(set->sig[0], &sc->oldmask, efault_out); /* * Force reload of FP/VEC. @@ -364,29 +379,27 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC - err |= __get_user(v_regs, &sc->v_regs); - if (err) - return err; + unsafe_get_user(v_regs, &sc->v_regs, efault_out); if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) { - err |= __copy_from_user(&tsk->thread.vr_state, v_regs, - 33 * sizeof(vector128)); + unsafe_copy_from_user(&tsk->thread.vr_state, v_regs, + 33 * sizeof(vector128), efault_out); tsk->thread.used_vr = true; } else if (tsk->thread.used_vr) { memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL) - err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]); + unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); else tsk->thread.vrsave = 0; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, tsk->thread.vrsave); #endif /* CONFIG_ALTIVEC */ /* restore floating point */ - err |= copy_fpr_from_user(tsk, &sc->fp_regs); + unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the @@ -395,14 +408,17 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, */ v_regs += ELF_NVRREG; if ((msr & MSR_VSX) != 0) { - err |= copy_vsx_from_user(tsk, v_regs); + unsafe_copy_vsx_from_user(tsk, v_regs, efault_out); tsk->thread.used_vsr = true; } else { for (i = 0; i < 32 ; i++) tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; } #endif - return err; + return 0; + +efault_out: + return -EFAULT; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -586,6 +602,12 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, return err; } +#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */ +static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc, + struct sigcontext __user *tm_sc) +{ + return -EINVAL; +} #endif /* @@ -655,12 +677,16 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, ctx_has_vsx_region = 1; if (old_ctx != NULL) { - if (!access_ok(old_ctx, ctx_size) - || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, - ctx_has_vsx_region) - || __copy_to_user(&old_ctx->uc_sigmask, - ¤t->blocked, sizeof(sigset_t))) + prepare_setup_sigcontext(current); + if (!user_write_access_begin(old_ctx, ctx_size)) return -EFAULT; + + unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, + 0, ctx_has_vsx_region, efault_out); + unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked, + sizeof(sigset_t), efault_out); + + user_write_access_end(); } if (new_ctx == NULL) return 0; @@ -680,15 +706,25 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, * We kill the task with a SIGSEGV in this situation. */ - if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) + if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) do_exit(SIGSEGV); set_current_blocked(&set); - if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) + + if (!user_read_access_begin(new_ctx, ctx_size)) + return -EFAULT; + if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { + user_read_access_end(); do_exit(SIGSEGV); + } + user_read_access_end(); /* This returns like rt_sigreturn */ set_thread_flag(TIF_RESTOREALL); return 0; + +efault_out: + user_write_access_end(); + return -EFAULT; } @@ -701,9 +737,7 @@ SYSCALL_DEFINE0(rt_sigreturn) struct pt_regs *regs = current_pt_regs(); struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; -#endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -711,52 +745,54 @@ SYSCALL_DEFINE0(rt_sigreturn) if (!access_ok(uc, sizeof(*uc))) goto badframe; - if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) + if (__get_user_sigset(&set, &uc->uc_sigmask)) goto badframe; set_current_blocked(&set); -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - /* - * If there is a transactional state then throw it away. - * The purpose of a sigreturn is to destroy all traces of the - * signal frame, this includes any transactional state created - * within in. We only check for suspended as we can never be - * active in the kernel, we are active, there is nothing better to - * do than go ahead and Bad Thing later. - * The cause is not important as there will never be a - * recheckpoint so it's not user visible. - */ - if (MSR_TM_SUSPENDED(mfmsr())) - tm_reclaim_current(0); + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) { + /* + * If there is a transactional state then throw it away. + * The purpose of a sigreturn is to destroy all traces of the + * signal frame, this includes any transactional state created + * within in. We only check for suspended as we can never be + * active in the kernel, we are active, there is nothing better to + * do than go ahead and Bad Thing later. + * The cause is not important as there will never be a + * recheckpoint so it's not user visible. + */ + if (MSR_TM_SUSPENDED(mfmsr())) + tm_reclaim_current(0); - /* - * Disable MSR[TS] bit also, so, if there is an exception in the - * code below (as a page fault in copy_ckvsx_to_user()), it does - * not recheckpoint this task if there was a context switch inside - * the exception. - * - * A major page fault can indirectly call schedule(). A reschedule - * process in the middle of an exception can have a side effect - * (Changing the CPU MSR[TS] state), since schedule() is called - * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended - * (switch_to() calls tm_recheckpoint() for the 'new' process). In - * this case, the process continues to be the same in the CPU, but - * the CPU state just changed. - * - * This can cause a TM Bad Thing, since the MSR in the stack will - * have the MSR[TS]=0, and this is what will be used to RFID. - * - * Clearing MSR[TS] state here will avoid a recheckpoint if there - * is any process reschedule in kernel space. The MSR[TS] state - * does not need to be saved also, since it will be replaced with - * the MSR[TS] that came from user context later, at - * restore_tm_sigcontexts. - */ - regs->msr &= ~MSR_TS_MASK; + /* + * Disable MSR[TS] bit also, so, if there is an exception in the + * code below (as a page fault in copy_ckvsx_to_user()), it does + * not recheckpoint this task if there was a context switch inside + * the exception. + * + * A major page fault can indirectly call schedule(). A reschedule + * process in the middle of an exception can have a side effect + * (Changing the CPU MSR[TS] state), since schedule() is called + * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended + * (switch_to() calls tm_recheckpoint() for the 'new' process). In + * this case, the process continues to be the same in the CPU, but + * the CPU state just changed. + * + * This can cause a TM Bad Thing, since the MSR in the stack will + * have the MSR[TS]=0, and this is what will be used to RFID. + * + * Clearing MSR[TS] state here will avoid a recheckpoint if there + * is any process reschedule in kernel space. The MSR[TS] state + * does not need to be saved also, since it will be replaced with + * the MSR[TS] that came from user context later, at + * restore_tm_sigcontexts. + */ + regs->msr &= ~MSR_TS_MASK; - if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) - goto badframe; - if (MSR_TM_ACTIVE(msr)) { + if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) + goto badframe; + } + + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; @@ -769,9 +805,7 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; - } else -#endif - { + } else { /* * Fall through, for non-TM restore * @@ -785,8 +819,13 @@ SYSCALL_DEFINE0(rt_sigreturn) * causing a TM bad thing. */ current->thread.regs->msr &= ~MSR_TS_MASK; - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) + if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext))) goto badframe; + + unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext, + badframe_block); + + user_read_access_end(); } if (restore_altstack(&uc->uc_stack)) @@ -795,6 +834,8 @@ SYSCALL_DEFINE0(rt_sigreturn) set_thread_flag(TIF_RESTOREALL); return 0; +badframe_block: + user_read_access_end(); badframe: signal_fault(current, regs, "rt_sigreturn", uc); @@ -809,46 +850,57 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Save the thread's msr before get_tm_stackpointer() changes it */ unsigned long msr = regs->msr; -#endif frame = get_sigframe(ksig, tsk, sizeof(*frame), 0); - if (!access_ok(frame, sizeof(*frame))) - goto badframe; - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); - err |= copy_siginfo_to_user(&frame->info, &ksig->info); - if (err) + /* + * This only applies when calling unsafe_setup_sigcontext() and must be + * called before opening the uaccess window. + */ + if (!MSR_TM_ACTIVE(msr)) + prepare_setup_sigcontext(tsk); + + if (!user_write_access_begin(frame, sizeof(*frame))) goto badframe; + unsafe_put_user(&frame->info, &frame->pinfo, badframe_block); + unsafe_put_user(&frame->uc, &frame->puc, badframe_block); + /* Create the ucontext. */ - err |= __put_user(0, &frame->uc.uc_flags); - err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + unsafe_put_user(0, &frame->uc.uc_flags, badframe_block); + unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block); + if (MSR_TM_ACTIVE(msr)) { +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ - err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); + unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block); + + user_write_access_end(); + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, msr); - } else + + if (!user_write_access_begin(&frame->uc.uc_sigmask, + sizeof(frame->uc.uc_sigmask))) + goto badframe; + #endif - { - err |= __put_user(0, &frame->uc.uc_link); - err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, + } else { + unsafe_put_user(0, &frame->uc.uc_link, badframe_block); + unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, - 1); + 1, badframe_block); } - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - goto badframe; + + unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block); + user_write_access_end(); /* Make sure signal handler doesn't get spurious FP exceptions */ tsk->thread.fp_state.fpscr = 0; @@ -863,6 +915,11 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, regs->nip = (unsigned long) &frame->tramp[0]; } + + /* Save the siginfo outside of the unsafe block. */ + if (copy_siginfo_to_user(&frame->info, &ksig->info)) + goto badframe; + /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); @@ -902,6 +959,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, return 0; +badframe_block: + user_write_access_end(); badframe: signal_fault(current, regs, "handle_rt_signal64", frame); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 5a4d59a1070d..2e05c783440a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -83,7 +83,7 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); -DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); +static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); @@ -122,14 +122,14 @@ static struct thread_groups_list tgl[NR_CPUS] __initdata; * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to * the set its siblings that share the L1-cache. */ -DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map); +static DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map); /* * On some big-cores system, thread_group_l2_cache_map for each CPU * corresponds to the set its siblings within the core that share the * L2-cache. */ -DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map); +static DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map); /* SMP operations for this machine */ struct smp_ops_t *smp_ops; @@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) local_memory_node(numa_cpu_lookup_table[cpu])); } #endif - /* - * cpu_core_map is now more updated and exists only since - * its been exported for long. It only will have a snapshot - * of cpu_cpu_mask. - */ - cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); } /* Init the cpumasks so the boot CPU is related to itself */ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); + cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); if (has_coregroup_support()) cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); @@ -1078,6 +1073,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpu_smallcore_mask(boot_cpuid)); } + if (cpu_to_chip_id(boot_cpuid) != -1) { + int idx = num_possible_cpus() / threads_per_core; + + /* + * All threads of a core will all belong to the same core, + * chip_id_lookup_table will have one entry per core. + * Assumption: if boot_cpuid doesn't have a chip-id, then no + * other CPUs, will also not have chip-id. + */ + chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL); + if (chip_id_lookup_table) + memset(chip_id_lookup_table, -1, sizeof(int) * idx); + } + if (smp_ops && smp_ops->probe) smp_ops->probe(); } @@ -1408,6 +1417,9 @@ static void remove_cpu_from_masks(int cpu) set_cpus_unrelated(cpu, i, cpu_smallcore_mask); } + for_each_cpu(i, cpu_core_mask(cpu)) + set_cpus_unrelated(cpu, i, cpu_core_mask); + if (has_coregroup_support()) { for_each_cpu(i, cpu_coregroup_mask(cpu)) set_cpus_unrelated(cpu, i, cpu_coregroup_mask); @@ -1468,8 +1480,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask) static void add_cpu_to_masks(int cpu) { + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; int first_thread = cpu_first_thread_sibling(cpu); cpumask_var_t mask; + int chip_id = -1; + bool ret; int i; /* @@ -1485,12 +1500,39 @@ static void add_cpu_to_masks(int cpu) add_cpu_to_smallcore_masks(cpu); /* In CPU-hotplug path, hence use GFP_ATOMIC */ - alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); + ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); update_mask_by_l2(cpu, &mask); if (has_coregroup_support()) update_coregroup_mask(cpu, &mask); + if (chip_id_lookup_table && ret) + chip_id = cpu_to_chip_id(cpu); + + if (chip_id == -1) { + cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); + goto out; + } + + if (shared_caches) + submask_fn = cpu_l2_cache_mask; + + /* Update core_mask with all the CPUs that are part of submask */ + or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); + + /* Skip all CPUs already part of current CPU core mask */ + cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); + + for_each_cpu(i, mask) { + if (chip_id == cpu_to_chip_id(i)) { + or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); + cpumask_andnot(mask, mask, submask_fn(i)); + } else { + cpumask_andnot(mask, mask, cpu_core_mask(i)); + } + } + +out: free_cpumask_var(mask); } @@ -1521,6 +1563,9 @@ void start_secondary(void *unused) vdso_getcpu_init(); #endif + set_numa_node(numa_cpu_lookup_table[cpu]); + set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); + /* Update topology CPU masks */ add_cpu_to_masks(cpu); @@ -1539,9 +1584,6 @@ void start_secondary(void *unused) shared_caches = true; } - set_numa_node(numa_cpu_lookup_table[cpu]); - set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); - smp_wmb(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index b6440657ef92..1deb1bf331dd 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -23,90 +23,56 @@ #include <asm/paca.h> -/* - * Save stack-backtrace addresses into a stack_trace buffer. - */ -static void save_context_stack(struct stack_trace *trace, unsigned long sp, - struct task_struct *tsk, int savesched) +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { + unsigned long sp; + + if (regs && !consume_entry(cookie, regs->nip)) + return; + + if (regs) + sp = regs->gpr[1]; + else if (task == current) + sp = current_stack_frame(); + else + sp = task->thread.ksp; + for (;;) { unsigned long *stack = (unsigned long *) sp; unsigned long newsp, ip; - if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD)) return; newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; - if (savesched || !in_sched_functions(ip)) { - if (!trace->skip) - trace->entries[trace->nr_entries++] = ip; - else - trace->skip--; - } - - if (trace->nr_entries >= trace->max_entries) + if (!consume_entry(cookie, ip)) return; sp = newsp; } } -void save_stack_trace(struct stack_trace *trace) -{ - unsigned long sp; - - sp = current_stack_frame(); - - save_context_stack(trace, sp, current, 1); -} -EXPORT_SYMBOL_GPL(save_stack_trace); - -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) -{ - unsigned long sp; - - if (!try_get_task_stack(tsk)) - return; - - if (tsk == current) - sp = current_stack_frame(); - else - sp = tsk->thread.ksp; - - save_context_stack(trace, sp, tsk, 0); - - put_task_stack(tsk); -} -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); - -void -save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) -{ - save_context_stack(trace, regs->gpr[1], current, 0); -} -EXPORT_SYMBOL_GPL(save_stack_trace_regs); - -#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE /* * This function returns an error if it detects any unreliable features of the * stack. Otherwise it guarantees that the stack trace is reliable. * * If the task is not 'current', the caller *must* ensure the task is inactive. */ -static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, - struct stack_trace *trace) +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) { unsigned long sp; unsigned long newsp; - unsigned long stack_page = (unsigned long)task_stack_page(tsk); + unsigned long stack_page = (unsigned long)task_stack_page(task); unsigned long stack_end; int graph_idx = 0; bool firstframe; stack_end = stack_page + THREAD_SIZE; - if (!is_idle_task(tsk)) { + if (!is_idle_task(task)) { /* * For user tasks, this is the SP value loaded on * kernel entry, see "PACAKSAVE(r13)" in _switch() and @@ -130,10 +96,10 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, stack_end -= STACK_FRAME_OVERHEAD; } - if (tsk == current) + if (task == current) sp = current_stack_frame(); else - sp = tsk->thread.ksp; + sp = task->thread.ksp; if (sp < stack_page + sizeof(struct thread_struct) || sp > stack_end - STACK_FRAME_MIN_SIZE) { @@ -182,7 +148,7 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, * FIXME: IMHO these tests do not belong in * arch-dependent code, they are generic. */ - ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack); + ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); #ifdef CONFIG_KPROBES /* * Mark stacktraces with kretprobed functions on them @@ -192,36 +158,12 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, return -EINVAL; #endif - if (trace->nr_entries >= trace->max_entries) - return -E2BIG; - if (!trace->skip) - trace->entries[trace->nr_entries++] = ip; - else - trace->skip--; + if (!consume_entry(cookie, ip)) + return -EINVAL; } return 0; } -int save_stack_trace_tsk_reliable(struct task_struct *tsk, - struct stack_trace *trace) -{ - int ret; - - /* - * If the task doesn't have a stack (e.g., a zombie), the stack is - * "reliably" empty. - */ - if (!try_get_task_stack(tsk)) - return 0; - - ret = __save_stack_trace_tsk_reliable(tsk, trace); - - put_task_stack(tsk); - - return ret; -} -#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ - #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) static void handle_backtrace_ipi(struct pt_regs *regs) { diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 078608ec2e92..a552c9e68d7e 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -82,16 +82,8 @@ int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp) { if ( (unsigned long)n >= 4096 ) - { - unsigned long __user *buffer = (unsigned long __user *)n; - if (!access_ok(buffer, 5*sizeof(unsigned long)) - || __get_user(n, buffer) - || __get_user(inp, ((fd_set __user * __user *)(buffer+1))) - || __get_user(outp, ((fd_set __user * __user *)(buffer+2))) - || __get_user(exp, ((fd_set __user * __user *)(buffer+3))) - || __get_user(tvp, ((struct __kernel_old_timeval __user * __user *)(buffer+4)))) - return -EFAULT; - } + return sys_old_select((void __user *)n); + return sys_select(n, inp, outp, exp, tvp); } #endif diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile index 9e3be295dbba..5476f62eb80f 100644 --- a/arch/powerpc/kernel/syscalls/Makefile +++ b/arch/powerpc/kernel/syscalls/Makefile @@ -6,53 +6,38 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ -syshdr_abis_unistd_32 := common,nospu,32 +$(uapi)/unistd_32.h: abis := common,nospu,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abis_unistd_64 := common,nospu,64 +$(uapi)/unistd_64.h: abis := common,nospu,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,nospu,32 -systbl_abi_syscall_table_32 := 32 +$(kapi)/syscall_table_32.h: abis := common,nospu,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,nospu,64 -systbl_abi_syscall_table_64 := 64 +$(kapi)/syscall_table_64.h: abis := common,nospu,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,nospu,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - -systbl_abis_syscall_table_spu := common,spu -systbl_abi_syscall_table_spu := spu +$(kapi)/syscall_table_spu.h: abis := common,spu $(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ syscall_table_64.h \ - syscall_table_c32.h \ syscall_table_spu.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 0b2480cf3e47..2e68fbb57cc6 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -522,3 +522,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 02d6751f3be3..000000000000 --- a/arch/powerpc/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_POWERPC_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index f7393a7b18aa..000000000000 --- a/arch/powerpc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index d34276f3c495..cb3358886203 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -21,6 +21,7 @@ #define __SYSCALL(nr, entry) .long entry #endif +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) .globl sys_call_table sys_call_table: #ifdef CONFIG_PPC64 @@ -30,8 +31,10 @@ sys_call_table: #endif #ifdef CONFIG_COMPAT +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) .globl compat_sys_call_table compat_sys_call_table: #define compat_sys_sigsuspend sys_sigsuspend -#include <asm/syscall_table_c32.h> +#include <asm/syscall_table_32.h> #endif diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 42761ebec9f7..ffe9537195aa 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -68,7 +68,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) */ /* read the text we want to modify */ - if (probe_kernel_read_inst(&replaced, (void *)ip)) + if (copy_inst_from_kernel_nofault(&replaced, (void *)ip)) return -EFAULT; /* Make sure it is what we expect it to be */ @@ -130,7 +130,7 @@ __ftrace_make_nop(struct module *mod, struct ppc_inst op, pop; /* read where this goes */ - if (probe_kernel_read_inst(&op, (void *)ip)) { + if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } @@ -164,7 +164,7 @@ __ftrace_make_nop(struct module *mod, /* When using -mkernel_profile there is no load to jump over */ pop = ppc_inst(PPC_INST_NOP); - if (probe_kernel_read_inst(&op, (void *)(ip - 4))) { + if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) { pr_err("Fetching instruction at %lx failed.\n", ip - 4); return -EFAULT; } @@ -197,7 +197,7 @@ __ftrace_make_nop(struct module *mod, * Check what is in the next instruction. We can see ld r2,40(r1), but * on first pass after boot we will see mflr r0. */ - if (probe_kernel_read_inst(&op, (void *)(ip + 4))) { + if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) { pr_err("Fetching op failed.\n"); return -EFAULT; } @@ -349,7 +349,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) return -1; /* New trampoline -- read where this goes */ - if (probe_kernel_read_inst(&op, (void *)tramp)) { + if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) { pr_debug("Fetching opcode failed.\n"); return -1; } @@ -399,7 +399,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) struct ppc_inst op; /* Read where this goes */ - if (probe_kernel_read_inst(&op, (void *)ip)) { + if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } @@ -526,10 +526,10 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) struct module *mod = rec->arch.mod; /* read where this goes */ - if (probe_kernel_read_inst(op, ip)) + if (copy_inst_from_kernel_nofault(op, ip)) return -EFAULT; - if (probe_kernel_read_inst(op + 1, ip + 4)) + if (copy_inst_from_kernel_nofault(op + 1, ip + 4)) return -EFAULT; if (!expected_nop_sequence(ip, op[0], op[1])) { @@ -592,7 +592,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned long ip = rec->ip; /* read where this goes */ - if (probe_kernel_read_inst(&op, (void *)ip)) + if (copy_inst_from_kernel_nofault(&op, (void *)ip)) return -EFAULT; /* It should be pointing to a nop */ @@ -648,7 +648,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) } /* Make sure we have a nop */ - if (probe_kernel_read_inst(&op, ip)) { + if (copy_inst_from_kernel_nofault(&op, ip)) { pr_err("Unable to read ftrace location %p\n", ip); return -EFAULT; } @@ -726,7 +726,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* read where this goes */ - if (probe_kernel_read_inst(&op, (void *)ip)) { + if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a44a30b0688c..b4ab95c9e94a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -53,7 +53,6 @@ #ifdef CONFIG_PPC64 #include <asm/firmware.h> #include <asm/processor.h> -#include <asm/tm.h> #endif #include <asm/kexec.h> #include <asm/ppc-opcode.h> @@ -222,7 +221,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, /* * system_reset_excption handles debugger, crash dump, panic, for 0x100 */ - if (TRAP(regs) == 0x100) + if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) return; crash_fadump(regs, "die oops"); @@ -290,7 +289,7 @@ void die(const char *str, struct pt_regs *regs, long err) /* * system_reset_excption handles debugger, crash dump, panic, for 0x100 */ - if (TRAP(regs) != 0x100) { + if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) { if (debugger(regs)) return; } @@ -405,7 +404,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs) * Now test if the interrupt has hit a range that may be using * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The * problem ranges all run un-relocated. Test real and virt modes - * at the same time by droping the high bit of the nip (virt mode + * at the same time by dropping the high bit of the nip (virt mode * entry points still have the +0x4000 offset). */ nip &= ~0xc000000000000000ULL; @@ -864,7 +863,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs) unsigned long ea, msr, msr_mask; bool swap; - if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip)) + if (__get_user(instr, (unsigned int __user *)regs->nip)) return; /* @@ -1079,6 +1078,16 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception) _exception(SIGTRAP, regs, TRAP_UNK, 0); } +DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception) +{ + printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", + regs->nip, regs->msr, regs->trap); + + _exception(SIGTRAP, regs, TRAP_UNK, 0); + + return 0; +} + DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception) { if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, @@ -1309,7 +1318,6 @@ static int emulate_instruction(struct pt_regs *regs) if (!user_mode(regs)) return -EINVAL; - CHECK_FULL_REGS(regs); if (get_user(instword, (u32 __user *)(regs->nip))) return -EFAULT; @@ -1406,7 +1414,6 @@ int is_valid_bugaddr(unsigned long addr) static int emulate_math(struct pt_regs *regs) { int ret; - extern int do_mathemu(struct pt_regs *regs); ret = do_mathemu(regs); if (ret >= 0) @@ -1606,15 +1613,6 @@ bad: bad_page_fault(regs, sig); } -DEFINE_INTERRUPT_HANDLER(StackOverflow) -{ - pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", - current->comm, task_pid_nr(current), regs->gpr[1]); - debugger(regs); - show_regs(regs); - panic("kernel stack overflow"); -} - DEFINE_INTERRUPT_HANDLER(stack_overflow_exception) { die("Kernel stack overflow", regs, SIGSEGV); @@ -1693,7 +1691,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) u8 status; bool hv; - hv = (TRAP(regs) == 0xf80); + hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL); if (hv) value = mfspr(SPRN_HFSCR); else @@ -2170,11 +2168,14 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) * in the MSR is 0. This indicates that SRR0/1 are live, and that * we therefore lost state by taking this exception. */ -void unrecoverable_exception(struct pt_regs *regs) +void __noreturn unrecoverable_exception(struct pt_regs *regs) { pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", regs->trap, regs->nip, regs->msr); die("Unrecoverable exception", regs, SIGABRT); + /* die() should not return */ + for (;;) + ; } #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) @@ -2189,10 +2190,11 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) return; } -DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */ +DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException) { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); WatchdogHandler(regs); + return 0; } #endif diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index e8a63713e655..186f69b11e94 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -41,6 +41,13 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, if (addr & 0x03) return -EINVAL; + if (cpu_has_feature(CPU_FTR_ARCH_31) && + ppc_inst_prefixed(auprobe->insn) && + (addr & 0x3f) == 60) { + pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n"); + return -EINVAL; + } + return 0; } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index e839a906fdf2..717f2c9a7573 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -18,6 +18,7 @@ #include <linux/security.h> #include <linux/memblock.h> #include <linux/syscalls.h> +#include <linux/time_namespace.h> #include <vdso/datapage.h> #include <asm/syscall.h> @@ -50,15 +51,21 @@ static union { } vdso_data_store __page_aligned_data; struct vdso_arch_data *vdso_data = &vdso_data_store.data; +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES, +}; + static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma, unsigned long text_size) { unsigned long new_size = new_vma->vm_end - new_vma->vm_start; - if (new_size != text_size + PAGE_SIZE) + if (new_size != text_size) return -EINVAL; - current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE; + current->mm->context.vdso = (void __user *)new_vma->vm_start; return 0; } @@ -73,6 +80,14 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start); } +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf); + +static struct vm_special_mapping vvar_spec __ro_after_init = { + .name = "[vvar]", + .fault = vvar_fault, +}; + static struct vm_special_mapping vdso32_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso32_mremap, @@ -83,17 +98,105 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = { .mremap = vdso64_mremap, }; +#ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return ((struct vdso_arch_data *)vvar_page)->data; +} + +/* + * The vvar mapping contains data for a specific time namespace, so when a task + * changes namespace we must unmap its vvar data for the old namespace. + * Subsequent faults will map in data for the new namespace. + * + * For more details see timens_setup_vdso_data(). + */ +int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) +{ + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; + + mmap_read_lock(mm); + + for (vma = mm->mmap; vma; vma = vma->vm_next) { + unsigned long size = vma->vm_end - vma->vm_start; + + if (vma_is_special_mapping(vma, &vvar_spec)) + zap_page_range(vma, vma->vm_start, size); + } + + mmap_read_unlock(mm); + return 0; +} + +static struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + if (likely(vma->vm_mm == current->mm)) + return current->nsproxy->time_ns->vvar_page; + + /* + * VM_PFNMAP | VM_IO protect .fault() handler from being called + * through interfaces like /proc/$pid/mem or + * process_vm_{readv,writev}() as long as there's no .access() + * in special_mapping_vmops. + * For more details check_vma_flags() and __access_remote_vm() + */ + WARN(1, "vvar_page accessed remotely"); + + return NULL; +} +#else +static struct page *find_timens_vvar_page(struct vm_area_struct *vma) +{ + return NULL; +} +#endif + +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *timens_page = find_timens_vvar_page(vma); + unsigned long pfn; + + switch (vmf->pgoff) { + case VVAR_DATA_PAGE_OFFSET: + if (timens_page) + pfn = page_to_pfn(timens_page); + else + pfn = virt_to_pfn(vdso_data); + break; +#ifdef CONFIG_TIME_NS + case VVAR_TIMENS_PAGE_OFFSET: + /* + * If a task belongs to a time namespace then a namespace + * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and + * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET + * offset. + * See also the comment near timens_setup_vdso_data(). + */ + if (!timens_page) + return VM_FAULT_SIGBUS; + pfn = virt_to_pfn(vdso_data); + break; +#endif /* CONFIG_TIME_NS */ + default: + return VM_FAULT_SIGBUS; + } + + return vmf_insert_pfn(vma, vmf->address, pfn); +} + /* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - struct mm_struct *mm = current->mm; + unsigned long vdso_size, vdso_base, mappings_size; struct vm_special_mapping *vdso_spec; + unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE; + struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long vdso_size; - unsigned long vdso_base; if (is_32bit_task()) { vdso_spec = &vdso32_spec; @@ -110,8 +213,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int vdso_base = 0; } - /* Add a page to the vdso size for the data page */ - vdso_size += PAGE_SIZE; + mappings_size = vdso_size + vvar_size; + mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK; /* * pick a base address for the vDSO in process space. We try to put it @@ -119,9 +222,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int * and end up putting it elsewhere. * Add enough to the size so that the result can be aligned. */ - vdso_base = get_unmapped_area(NULL, vdso_base, - vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), - 0, 0); + vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0); if (IS_ERR_VALUE(vdso_base)) return vdso_base; @@ -133,7 +234,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO. */ - mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE; + mm->context.vdso = (void __user *)vdso_base + vvar_size; + + vma = _install_special_mapping(mm, vdso_base, vvar_size, + VM_READ | VM_MAYREAD | VM_IO | + VM_DONTDUMP | VM_PFNMAP, &vvar_spec); + if (IS_ERR(vma)) + return PTR_ERR(vma); /* * our vma flags don't have VM_WRITE so by default, the process isn't @@ -145,9 +252,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int * It's fine to use that for setting breakpoints in the vDSO code * pages though. */ - vma = _install_special_mapping(mm, vdso_base, vdso_size, + vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, vdso_spec); + if (IS_ERR(vma)) + do_munmap(mm, vdso_base, vvar_size, NULL); + return PTR_ERR_OR_ZERO(vma); } @@ -249,10 +359,8 @@ static struct page ** __init vdso_setup_pages(void *start, void *end) if (!pagelist) panic("%s: Cannot allocate page list for VDSO", __func__); - pagelist[0] = virt_to_page(vdso_data); - for (i = 0; i < pages; i++) - pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE); + pagelist[i] = virt_to_page(start + i * PAGE_SIZE); return pagelist; } diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index a6e29f880e0e..d21d08140a5e 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres) V_FUNCTION_BEGIN(__kernel_time) cvdso_call_time __c_kernel_time V_FUNCTION_END(__kernel_time) + +/* Routines for restoring integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer restore area. */ +_GLOBAL(_restgpr_31_x) +_GLOBAL(_rest32gpr_31_x) + lwz r0,4(r11) + lwz r31,-4(r11) + mtlr r0 + mr r1,r11 + blr diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index a4b806b0d618..58e0099f70f4 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -17,7 +17,7 @@ ENTRY(_start) SECTIONS { - PROVIDE(_vdso_datapage = . - PAGE_SIZE); + PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 2f3c359cacd3..0288cad428b0 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -17,7 +17,7 @@ ENTRY(_start) SECTIONS { - PROVIDE(_vdso_datapage = . - PAGE_SIZE); + PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 801dc28fdcca..f5a52f444e36 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -67,9 +67,7 @@ _GLOBAL(load_up_altivec) #ifdef CONFIG_PPC32 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ oris r9,r9,MSR_VEC@h -#ifdef CONFIG_VMAP_STACK tovirt(r5, r5) -#endif #else ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index 4aff6846c772..b6c52608cb49 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -9,13 +9,6 @@ obj-$(CONFIG_PPC32) += relocate_32.o obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o -ifdef CONFIG_HAVE_IMA_KEXEC -ifdef CONFIG_IMA -obj-y += ima.o -endif -endif - - # Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_core_$(BITS).o := n KCOV_INSTRUMENT_core_$(BITS).o := n diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index c9a889880214..0196d0c211ac 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -24,6 +24,7 @@ #include <asm/smp.h> #include <asm/setjmp.h> #include <asm/debug.h> +#include <asm/interrupt.h> /* * The primary CPU waits a while for all secondary CPUs to enter. This is to @@ -336,7 +337,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) * If we came in via system reset, wait a while for the secondary * CPUs to enter. */ - if (TRAP(regs) == 0x100) + if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) mdelay(PRIMARY_TIMEOUT); crash_kexec_prepare_cpus(crashing_cpu); diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 9842e33533df..eeb258002d1e 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -19,6 +19,7 @@ #include <linux/kexec.h> #include <linux/libfdt.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_fdt.h> #include <linux/slab.h> #include <linux/types.h> @@ -29,7 +30,6 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, unsigned long cmdline_len) { int ret; - unsigned int fdt_size; unsigned long kernel_load_addr; unsigned long initrd_load_addr = 0, fdt_load_addr; void *fdt; @@ -45,7 +45,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); if (ret) - goto out; + return ERR_PTR(ret); if (image->type == KEXEC_TYPE_CRASH) { /* min & max buffer values for kdump case */ @@ -102,15 +102,10 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr); } - fdt_size = kexec_fdt_totalsize_ppc64(image); - fdt = kmalloc(fdt_size, GFP_KERNEL); + fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, + initrd_len, cmdline, + kexec_extra_fdt_size_ppc64(image)); if (!fdt) { - pr_err("Not enough memory for the device tree.\n"); - ret = -ENOMEM; - goto out; - } - ret = fdt_open_into(initial_boot_params, fdt, fdt_size); - if (ret < 0) { pr_err("Error setting up the new device tree.\n"); ret = -EINVAL; goto out; @@ -119,18 +114,22 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr, initrd_len, cmdline); if (ret) - goto out; + goto out_free_fdt; fdt_pack(fdt); kbuf.buffer = fdt; - kbuf.bufsz = kbuf.memsz = fdt_size; + kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); kbuf.buf_align = PAGE_SIZE; kbuf.top_down = true; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) - goto out; + goto out_free_fdt; + + /* FDT will be freed in arch_kimage_file_post_load_cleanup */ + image->arch.fdt = fdt; + fdt_load_addr = kbuf.mem; pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr); @@ -141,12 +140,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, if (ret) pr_err("Error setting up the purgatory.\n"); + goto out; + +out_free_fdt: + kvfree(fdt); out: kfree(modified_cmdline); kexec_free_elf_info(&elf_info); - /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */ - return ret ? ERR_PTR(ret) : fdt; + return ret ? ERR_PTR(ret) : NULL; } const struct kexec_file_ops kexec_elf64_ops = { diff --git a/arch/powerpc/kexec/file_load.c b/arch/powerpc/kexec/file_load.c index 9a232bc36c8f..4284f76cbef5 100644 --- a/arch/powerpc/kexec/file_load.c +++ b/arch/powerpc/kexec/file_load.c @@ -19,7 +19,6 @@ #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <asm/setup.h> -#include <asm/ima.h> #define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */ @@ -45,7 +44,7 @@ char *setup_kdump_cmdline(struct kimage *image, char *cmdline, return NULL; elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", - image->arch.elfcorehdr_addr); + image->elf_load_addr); if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) { pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n"); @@ -108,183 +107,3 @@ int setup_purgatory(struct kimage *image, const void *slave_code, return 0; } - -/** - * delete_fdt_mem_rsv - delete memory reservation with given address and size - * - * Return: 0 on success, or negative errno on error. - */ -int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size) -{ - int i, ret, num_rsvs = fdt_num_mem_rsv(fdt); - - for (i = 0; i < num_rsvs; i++) { - uint64_t rsv_start, rsv_size; - - ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size); - if (ret) { - pr_err("Malformed device tree.\n"); - return -EINVAL; - } - - if (rsv_start == start && rsv_size == size) { - ret = fdt_del_mem_rsv(fdt, i); - if (ret) { - pr_err("Error deleting device tree reservation.\n"); - return -EINVAL; - } - - return 0; - } - } - - return -ENOENT; -} - -/* - * setup_new_fdt - modify /chosen and memory reservation for the next kernel - * @image: kexec image being loaded. - * @fdt: Flattened device tree for the next kernel. - * @initrd_load_addr: Address where the next initrd will be loaded. - * @initrd_len: Size of the next initrd, or 0 if there will be none. - * @cmdline: Command line for the next kernel, or NULL if there will - * be none. - * - * Return: 0 on success, or negative errno on error. - */ -int setup_new_fdt(const struct kimage *image, void *fdt, - unsigned long initrd_load_addr, unsigned long initrd_len, - const char *cmdline) -{ - int ret, chosen_node; - const void *prop; - - /* Remove memory reservation for the current device tree. */ - ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params), - fdt_totalsize(initial_boot_params)); - if (ret == 0) - pr_debug("Removed old device tree reservation.\n"); - else if (ret != -ENOENT) - return ret; - - chosen_node = fdt_path_offset(fdt, "/chosen"); - if (chosen_node == -FDT_ERR_NOTFOUND) { - chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), - "chosen"); - if (chosen_node < 0) { - pr_err("Error creating /chosen.\n"); - return -EINVAL; - } - } else if (chosen_node < 0) { - pr_err("Malformed device tree: error reading /chosen.\n"); - return -EINVAL; - } - - /* Did we boot using an initrd? */ - prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL); - if (prop) { - uint64_t tmp_start, tmp_end, tmp_size; - - tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop)); - - prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL); - if (!prop) { - pr_err("Malformed device tree.\n"); - return -EINVAL; - } - tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop)); - - /* - * kexec reserves exact initrd size, while firmware may - * reserve a multiple of PAGE_SIZE, so check for both. - */ - tmp_size = tmp_end - tmp_start; - ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size); - if (ret == -ENOENT) - ret = delete_fdt_mem_rsv(fdt, tmp_start, - round_up(tmp_size, PAGE_SIZE)); - if (ret == 0) - pr_debug("Removed old initrd reservation.\n"); - else if (ret != -ENOENT) - return ret; - - /* If there's no new initrd, delete the old initrd's info. */ - if (initrd_len == 0) { - ret = fdt_delprop(fdt, chosen_node, - "linux,initrd-start"); - if (ret) { - pr_err("Error deleting linux,initrd-start.\n"); - return -EINVAL; - } - - ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end"); - if (ret) { - pr_err("Error deleting linux,initrd-end.\n"); - return -EINVAL; - } - } - } - - if (initrd_len) { - ret = fdt_setprop_u64(fdt, chosen_node, - "linux,initrd-start", - initrd_load_addr); - if (ret < 0) - goto err; - - /* initrd-end is the first address after the initrd image. */ - ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end", - initrd_load_addr + initrd_len); - if (ret < 0) - goto err; - - ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len); - if (ret) { - pr_err("Error reserving initrd memory: %s\n", - fdt_strerror(ret)); - return -EINVAL; - } - } - - if (cmdline != NULL) { - ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); - if (ret < 0) - goto err; - } else { - ret = fdt_delprop(fdt, chosen_node, "bootargs"); - if (ret && ret != -FDT_ERR_NOTFOUND) { - pr_err("Error deleting bootargs.\n"); - return -EINVAL; - } - } - - if (image->type == KEXEC_TYPE_CRASH) { - /* - * Avoid elfcorehdr from being stomped on in kdump kernel by - * setting up memory reserve map. - */ - ret = fdt_add_mem_rsv(fdt, image->arch.elfcorehdr_addr, - image->arch.elf_headers_sz); - if (ret) { - pr_err("Error reserving elfcorehdr memory: %s\n", - fdt_strerror(ret)); - goto err; - } - } - - ret = setup_ima_buffer(image, fdt, chosen_node); - if (ret) { - pr_err("Error setting up the new device tree.\n"); - return ret; - } - - ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0); - if (ret) - goto err; - - return 0; - -err: - pr_err("Error setting up the new device tree.\n"); - return -EINVAL; -} diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 02b9e4d0dc40..5056e175ca2c 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -816,9 +816,9 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf) goto out; } - image->arch.elfcorehdr_addr = kbuf->mem; - image->arch.elf_headers_sz = headers_sz; - image->arch.elf_headers = headers; + image->elf_load_addr = kbuf->mem; + image->elf_headers_sz = headers_sz; + image->elf_headers = headers; out: kfree(cmem); return ret; @@ -852,7 +852,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, return ret; } pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n", - image->arch.elfcorehdr_addr, kbuf->bufsz, kbuf->memsz); + image->elf_load_addr, kbuf->bufsz, kbuf->memsz); return 0; } @@ -927,37 +927,114 @@ out: } /** - * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT - * for kexec/kdump kernel. - * @image: kexec image being loaded. + * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to + * setup FDT for kexec/kdump kernel. + * @image: kexec image being loaded. * - * Returns the estimated size needed for kexec/kdump kernel FDT. + * Returns the estimated extra size needed for kexec/kdump kernel FDT. */ -unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image) +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) { - unsigned int fdt_size; u64 usm_entries; - /* - * The below estimate more than accounts for a typical kexec case where - * the additional space is to accommodate things like kexec cmdline, - * chosen node with properties for initrd start & end addresses and - * a property to indicate kexec boot.. - */ - fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE); if (image->type != KEXEC_TYPE_CRASH) - return fdt_size; + return 0; /* - * For kdump kernel, also account for linux,usable-memory and + * For kdump kernel, account for linux,usable-memory and * linux,drconf-usable-memory properties. Get an approximate on the * number of usable memory entries and use for FDT size estimation. */ usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) + (2 * (resource_size(&crashk_res) / drmem_lmb_size()))); - fdt_size += (unsigned int)(usm_entries * sizeof(u64)); + return (unsigned int)(usm_entries * sizeof(u64)); +} + +/** + * add_node_props - Reads node properties from device node structure and add + * them to fdt. + * @fdt: Flattened device tree of the kernel + * @node_offset: offset of the node to add a property at + * @dn: device node pointer + * + * Returns 0 on success, negative errno on error. + */ +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn) +{ + int ret = 0; + struct property *pp; - return fdt_size; + if (!dn) + return -EINVAL; + + for_each_property_of_node(dn, pp) { + ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length); + if (ret < 0) { + pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret)); + return ret; + } + } + return ret; +} + +/** + * update_cpus_node - Update cpus node of flattened device tree using of_root + * device node. + * @fdt: Flattened device tree of the kernel. + * + * Returns 0 on success, negative errno on error. + */ +static int update_cpus_node(void *fdt) +{ + struct device_node *cpus_node, *dn; + int cpus_offset, cpus_subnode_offset, ret = 0; + + cpus_offset = fdt_path_offset(fdt, "/cpus"); + if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) { + pr_err("Malformed device tree: error reading /cpus node: %s\n", + fdt_strerror(cpus_offset)); + return cpus_offset; + } + + if (cpus_offset > 0) { + ret = fdt_del_node(fdt, cpus_offset); + if (ret < 0) { + pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret)); + return -EINVAL; + } + } + + /* Add cpus node to fdt */ + cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus"); + if (cpus_offset < 0) { + pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset)); + return -EINVAL; + } + + /* Add cpus node properties */ + cpus_node = of_find_node_by_path("/cpus"); + ret = add_node_props(fdt, cpus_offset, cpus_node); + of_node_put(cpus_node); + if (ret < 0) + return ret; + + /* Loop through all subnodes of cpus and add them to fdt */ + for_each_node_by_type(dn, "cpu") { + cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name); + if (cpus_subnode_offset < 0) { + pr_err("Unable to add %s subnode: %s\n", dn->full_name, + fdt_strerror(cpus_subnode_offset)); + ret = cpus_subnode_offset; + goto out; + } + + ret = add_node_props(fdt, cpus_subnode_offset, dn); + if (ret < 0) + goto out; + } +out: + of_node_put(dn); + return ret; } /** @@ -979,10 +1056,6 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *umem = NULL, *rmem = NULL; int i, nr_ranges, ret; - ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); - if (ret) - goto out; - /* * Restrict memory usage for kdump kernel by setting up * usable memory ranges and memory reserve map. @@ -1020,6 +1093,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, } } + /* Update cpus nodes information to account hotplug CPUs. */ + ret = update_cpus_node(fdt); + if (ret < 0) + goto out; + /* Update memory reserve map */ ret = get_reserved_memory_ranges(&rmem); if (ret) @@ -1142,9 +1220,12 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) vfree(image->arch.backup_buf); image->arch.backup_buf = NULL; - vfree(image->arch.elf_headers); - image->arch.elf_headers = NULL; - image->arch.elf_headers_sz = 0; + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; + + kvfree(image->arch.fdt); + image->arch.fdt = NULL; return kexec_image_post_load_cleanup_default(image); } diff --git a/arch/powerpc/kexec/ima.c b/arch/powerpc/kexec/ima.c deleted file mode 100644 index 720e50e490b6..000000000000 --- a/arch/powerpc/kexec/ima.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2016 IBM Corporation - * - * Authors: - * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com> - */ - -#include <linux/slab.h> -#include <linux/kexec.h> -#include <linux/of.h> -#include <linux/memblock.h> -#include <linux/libfdt.h> - -static int get_addr_size_cells(int *addr_cells, int *size_cells) -{ - struct device_node *root; - - root = of_find_node_by_path("/"); - if (!root) - return -EINVAL; - - *addr_cells = of_n_addr_cells(root); - *size_cells = of_n_size_cells(root); - - of_node_put(root); - - return 0; -} - -static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, - size_t *size) -{ - int ret, addr_cells, size_cells; - - ret = get_addr_size_cells(&addr_cells, &size_cells); - if (ret) - return ret; - - if (len < 4 * (addr_cells + size_cells)) - return -ENOENT; - - *addr = of_read_number(prop, addr_cells); - *size = of_read_number(prop + 4 * addr_cells, size_cells); - - return 0; -} - -/** - * ima_get_kexec_buffer - get IMA buffer from the previous kernel - * @addr: On successful return, set to point to the buffer contents. - * @size: On successful return, set to the buffer size. - * - * Return: 0 on success, negative errno on error. - */ -int ima_get_kexec_buffer(void **addr, size_t *size) -{ - int ret, len; - unsigned long tmp_addr; - size_t tmp_size; - const void *prop; - - prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len); - if (!prop) - return -ENOENT; - - ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size); - if (ret) - return ret; - - *addr = __va(tmp_addr); - *size = tmp_size; - - return 0; -} - -/** - * ima_free_kexec_buffer - free memory used by the IMA buffer - */ -int ima_free_kexec_buffer(void) -{ - int ret; - unsigned long addr; - size_t size; - struct property *prop; - - prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL); - if (!prop) - return -ENOENT; - - ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size); - if (ret) - return ret; - - ret = of_remove_property(of_chosen, prop); - if (ret) - return ret; - - return memblock_free(addr, size); - -} - -/** - * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt - * - * The IMA measurement buffer is of no use to a subsequent kernel, so we always - * remove it from the device tree. - */ -void remove_ima_buffer(void *fdt, int chosen_node) -{ - int ret, len; - unsigned long addr; - size_t size; - const void *prop; - - prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len); - if (!prop) - return; - - ret = do_get_kexec_buffer(prop, len, &addr, &size); - fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer"); - if (ret) - return; - - ret = delete_fdt_mem_rsv(fdt, addr, size); - if (!ret) - pr_debug("Removed old IMA buffer reservation.\n"); -} - -#ifdef CONFIG_IMA_KEXEC -/** - * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer - * - * Architectures should use this function to pass on the IMA buffer - * information to the next kernel. - * - * Return: 0 on success, negative errno on error. - */ -int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr, - size_t size) -{ - image->arch.ima_buffer_addr = load_addr; - image->arch.ima_buffer_size = size; - - return 0; -} - -static int write_number(void *p, u64 value, int cells) -{ - if (cells == 1) { - u32 tmp; - - if (value > U32_MAX) - return -EINVAL; - - tmp = cpu_to_be32(value); - memcpy(p, &tmp, sizeof(tmp)); - } else if (cells == 2) { - u64 tmp; - - tmp = cpu_to_be64(value); - memcpy(p, &tmp, sizeof(tmp)); - } else - return -EINVAL; - - return 0; -} - -/** - * setup_ima_buffer - add IMA buffer information to the fdt - * @image: kexec image being loaded. - * @fdt: Flattened device tree for the next kernel. - * @chosen_node: Offset to the chosen node. - * - * Return: 0 on success, or negative errno on error. - */ -int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node) -{ - int ret, addr_cells, size_cells, entry_size; - u8 value[16]; - - remove_ima_buffer(fdt, chosen_node); - if (!image->arch.ima_buffer_size) - return 0; - - ret = get_addr_size_cells(&addr_cells, &size_cells); - if (ret) - return ret; - - entry_size = 4 * (addr_cells + size_cells); - - if (entry_size > sizeof(value)) - return -EINVAL; - - ret = write_number(value, image->arch.ima_buffer_addr, addr_cells); - if (ret) - return ret; - - ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size, - size_cells); - if (ret) - return ret; - - ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value, - entry_size); - if (ret < 0) - return -EINVAL; - - ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr, - image->arch.ima_buffer_size); - if (ret) - return -EINVAL; - - pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n", - image->arch.ima_buffer_addr, image->arch.ima_buffer_size); - - return 0; -} -#endif /* CONFIG_IMA_KEXEC */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 44bf567b6589..2b691f4d1f26 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -834,26 +834,24 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); + return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm->arch.kvm_ops->age_hva(kvm, start, end); + return kvm->arch.kvm_ops->age_gfn(kvm, range); } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm->arch.kvm_ops->test_age_hva(kvm, hva); + return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); - return 0; + return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); } int kvmppc_core_init_vm(struct kvm *kvm) diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 9b6323ec8e60..740e51def5a5 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -9,12 +9,10 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot); -extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, - unsigned long end); -extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, - unsigned long end); -extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva); -extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); +extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range); +extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); +extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); +extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e452158a18d7..c3e31fef0be1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -8,6 +8,7 @@ */ #include <linux/kvm_host.h> +#include <linux/pkeys.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, else kvmppc_mmu_flush_icache(pfn); + rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY); rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg; /* diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index bb6773594cf8..2d9193cd73be 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -752,51 +752,6 @@ void kvmppc_rmap_reset(struct kvm *kvm) srcu_read_unlock(&kvm->srcu, srcu_idx); } -typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn); - -static int kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - hva_handler_fn handler) -{ - int ret; - int retval = 0; - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - - slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn, gfn+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for (; gfn < gfn_end; ++gfn) { - ret = handler(kvm, memslot, gfn); - retval |= ret; - } - } - - return retval; -} - -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, - hva_handler_fn handler) -{ - return kvm_handle_hva_range(kvm, hva, hva + 1, handler); -} - /* Must be called with both HPTE and rmap locked */ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, @@ -840,8 +795,8 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, } } -static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { unsigned long i; __be64 *hptep; @@ -874,16 +829,21 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unlock_rmap(rmapp); __unlock_hpte(hptep, be64_to_cpu(hptep[0])); } - return 0; } -int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) { - hva_handler_fn handler; + gfn_t gfn; - handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; - kvm_handle_hva_range(kvm, start, end, handler); - return 0; + if (kvm_is_radix(kvm)) { + for (gfn = range->start; gfn < range->end; gfn++) + kvm_unmap_radix(kvm, range->slot, gfn); + } else { + for (gfn = range->start; gfn < range->end; gfn++) + kvm_unmap_rmapp(kvm, range->slot, range->start); + } + + return false; } void kvmppc_core_flush_memslot_hv(struct kvm *kvm, @@ -913,8 +873,8 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm, } } -static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; @@ -968,26 +928,34 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, return ret; } -int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { - hva_handler_fn handler; + gfn_t gfn; + bool ret = false; - handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp; - return kvm_handle_hva_range(kvm, start, end, handler); + if (kvm_is_radix(kvm)) { + for (gfn = range->start; gfn < range->end; gfn++) + ret |= kvm_age_radix(kvm, range->slot, gfn); + } else { + for (gfn = range->start; gfn < range->end; gfn++) + ret |= kvm_age_rmapp(kvm, range->slot, gfn); + } + + return ret; } -static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; unsigned long *hp; - int ret = 1; + bool ret = true; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; if (*rmapp & KVMPPC_RMAP_REFERENCED) - return 1; + return true; lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_REFERENCED) @@ -1002,27 +970,33 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, goto out; } while ((i = j) != head); } - ret = 0; + ret = false; out: unlock_rmap(rmapp); return ret; } -int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) +bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { - hva_handler_fn handler; + WARN_ON(range->start + 1 != range->end); - handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp; - return kvm_handle_hva(kvm, hva, handler); + if (kvm_is_radix(kvm)) + return kvm_test_age_radix(kvm, range->slot, range->start); + else + return kvm_test_age_rmapp(kvm, range->slot, range->start); } -void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) { - hva_handler_fn handler; + WARN_ON(range->start + 1 != range->end); + + if (kvm_is_radix(kvm)) + kvm_unmap_radix(kvm, range->slot, range->start); + else + kvm_unmap_rmapp(kvm, range->slot, range->start); - handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; - kvm_handle_hva(kvm, hva, handler); + return false; } static int vcpus_running(struct kvm *kvm) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index e603de7ade52..d909c069363e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -993,8 +993,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, } /* Called with kvm->mmu_lock held */ -int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; @@ -1002,24 +1002,23 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); - return 0; + return; } ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); - return 0; } /* Called with kvm->mmu_lock held */ -int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - int ref = 0; + bool ref = false; unsigned long old, *rmapp; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) @@ -1035,26 +1034,27 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, old & PTE_RPN_MASK, 1UL << shift); - ref = 1; + ref = true; } return ref; } /* Called with kvm->mmu_lock held */ -int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) + { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - int ref = 0; + bool ref = false; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) - ref = 1; + ref = true; return ref; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 13bad6bf4c95..28a80d240b76 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -803,7 +803,10 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, vcpu->arch.dawrx1 = value2; return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: - /* KVM does not support mflags=2 (AIL=2) */ + /* + * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. + * Keep this in synch with kvmppc_filter_guest_lpcr_hv. + */ if (mflags != 0 && mflags != 3) return H_UNSUPPORTED_FLAG_START; return H_TOO_HARD; @@ -1635,6 +1638,41 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, return 0; } +/* + * Enforce limits on guest LPCR values based on hardware availability, + * guest configuration, and possibly hypervisor support and security + * concerns. + */ +unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) +{ + /* LPCR_TC only applies to HPT guests */ + if (kvm_is_radix(kvm)) + lpcr &= ~LPCR_TC; + + /* On POWER8 and above, userspace can modify AIL */ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + lpcr &= ~LPCR_AIL; + if ((lpcr & LPCR_AIL) != LPCR_AIL_3) + lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ + + /* + * On POWER9, allow userspace to enable large decrementer for the + * guest, whether or not the host has it enabled. + */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + lpcr &= ~LPCR_LD; + + return lpcr; +} + +static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) +{ + if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { + WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", + lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); + } +} + static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, bool preserve_top32) { @@ -1643,6 +1681,23 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, u64 mask; spin_lock(&vc->lock); + + /* + * Userspace can only modify + * DPFD (default prefetch depth), ILE (interrupt little-endian), + * TC (translation control), AIL (alternate interrupt location), + * LD (large decrementer). + * These are subject to restrictions from kvmppc_filter_lcpr_hv(). + */ + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; + + /* Broken 32-bit version of LPCR must not clear top bits */ + if (preserve_top32) + mask &= 0xFFFFFFFF; + + new_lpcr = kvmppc_filter_lpcr_hv(kvm, + (vc->lpcr & ~mask) | (new_lpcr & mask)); + /* * If ILE (interrupt little-endian) has changed, update the * MSR_LE bit in the intr_msr for each vcpu in this vcore. @@ -1661,25 +1716,8 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, } } - /* - * Userspace can only modify DPFD (default prefetch depth), - * ILE (interrupt little-endian) and TC (translation control). - * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.). - */ - mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - mask |= LPCR_AIL; - /* - * On POWER9, allow userspace to enable large decrementer for the - * guest, whether or not the host has it enabled. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - mask |= LPCR_LD; + vc->lpcr = new_lpcr; - /* Broken 32-bit version of LPCR must not clear top bits */ - if (preserve_top32) - mask &= 0xFFFFFFFF; - vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); } @@ -3728,7 +3766,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.dec_expires = dec + tb; vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; + /* Save guest CTRL register, set runlatch to 1 */ vcpu->arch.ctrl = mfspr(SPRN_CTRLF); + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1); vcpu->arch.iamr = mfspr(SPRN_IAMR); vcpu->arch.pspb = mfspr(SPRN_PSPB); @@ -3749,7 +3790,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_DSCR, host_dscr); mtspr(SPRN_TIDR, host_tidr); mtspr(SPRN_IAMR, host_iamr); - mtspr(SPRN_PSPB, 0); if (host_amr != vcpu->arch.amr) mtspr(SPRN_AMR, host_amr); @@ -4641,8 +4681,10 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) struct kvmppc_vcore *vc = kvm->arch.vcores[i]; if (!vc) continue; + spin_lock(&vc->lock); vc->lpcr = (vc->lpcr & ~mask) | lpcr; + verify_lpcr(kvm, vc->lpcr); spin_unlock(&vc->lock); if (++cores_done >= kvm->arch.online_vcores) break; @@ -4770,7 +4812,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) kvmhv_release_all_nested(kvm); kvmppc_rmap_reset(kvm); kvm->arch.process_table = 0; - /* Mutual exclusion with kvm_unmap_hva_range etc. */ + /* Mutual exclusion with kvm_unmap_gfn_range etc. */ spin_lock(&kvm->mmu_lock); kvm->arch.radix = 0; spin_unlock(&kvm->mmu_lock); @@ -4792,7 +4834,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) if (err) return err; kvmppc_rmap_reset(kvm); - /* Mutual exclusion with kvm_unmap_hva_range etc. */ + /* Mutual exclusion with kvm_unmap_gfn_range etc. */ spin_lock(&kvm->mmu_lock); kvm->arch.radix = 1; spin_unlock(&kvm->mmu_lock); @@ -4970,6 +5012,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvmppc_setup_partition_table(kvm); } + verify_lpcr(kvm, lpcr); kvm->arch.lpcr = lpcr; /* Initialization for future HPT resizes */ @@ -5369,8 +5412,10 @@ static unsigned int default_hcall_list[] = { H_READ, H_PROTECT, H_BULK_REMOVE, +#ifdef CONFIG_SPAPR_TCE_IOMMU H_GET_TCE, H_PUT_TCE, +#endif H_SET_DABR, H_SET_XDABR, H_CEDE, @@ -5654,10 +5699,10 @@ static struct kvmppc_ops kvm_ops_hv = { .flush_memslot = kvmppc_core_flush_memslot_hv, .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, .commit_memory_region = kvmppc_core_commit_memory_region_hv, - .unmap_hva_range = kvm_unmap_hva_range_hv, - .age_hva = kvm_age_hva_hv, - .test_age_hva = kvm_test_age_hva_hv, - .set_spte_hva = kvm_set_spte_hva_hv, + .unmap_gfn_range = kvm_unmap_gfn_range_hv, + .age_gfn = kvm_age_gfn_hv, + .test_age_gfn = kvm_test_age_gfn_hv, + .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 158d309b42a3..7a0e33a9c980 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -662,6 +662,9 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { + /* Guest must always run with ME enabled, HV disabled. */ + msr = (msr | MSR_ME) & ~MSR_HV; + /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 0cd0e7aad588..60724f674421 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -132,8 +132,33 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, } } +/* + * This can result in some L0 HV register state being leaked to an L1 + * hypervisor when the hv_guest_state is copied back to the guest after + * being modified here. + * + * There is no known problem with such a leak, and in many cases these + * register settings could be derived by the guest by observing behaviour + * and timing, interrupts, etc., but it is an issue to consider. + */ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { + struct kvmppc_vcore *vc = vcpu->arch.vcore; + u64 mask; + + /* + * Don't let L1 change LPCR bits for the L2 except these: + */ + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | + LPCR_LPES | LPCR_MER; + + /* + * Additional filtering is required depending on hardware + * and configuration. + */ + hr->lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm, + (vc->lpcr & ~mask) | (hr->lpcr & mask)); + /* * Don't let L1 enable features for L2 which we've disabled for L1, * but preserve the interrupt cause field. @@ -271,8 +296,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) u64 hv_ptr, regs_ptr; u64 hdec_exp; s64 delta_purr, delta_spurr, delta_ic, delta_vtb; - u64 mask; - unsigned long lpcr; if (vcpu->kvm->arch.l1_ptcr == 0) return H_NOT_AVAILABLE; @@ -320,10 +343,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) vcpu->arch.nested = l2; vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; vcpu->arch.regs = l2_regs; - vcpu->arch.shregs.msr = vcpu->arch.regs.msr; - mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | - LPCR_LPES | LPCR_MER; - lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask); + + /* Guest must always run with ME enabled, HV disabled. */ + vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV; + sanitise_hv_regs(vcpu, &l2_hv); restore_hv_regs(vcpu, &l2_hv); @@ -335,7 +358,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) r = RESUME_HOST; break; } - r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr); + r = kvmhv_run_single_vcpu(vcpu, hdec_exp, l2_hv.lpcr); } while (is_kvmppc_resume_guest(r)); /* save L2 state for return */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 88da2764c1bb..7af7c70f1468 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -673,8 +673,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) } long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, - unsigned long pte_index, unsigned long avpn, - unsigned long va) + unsigned long pte_index, unsigned long avpn) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 913944dc3620..d7733b07f489 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -425,61 +425,39 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) } /************* MMU Notifiers *************/ -static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, - unsigned long end) +static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { long i; struct kvm_vcpu *vcpu; - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; + kvm_for_each_vcpu(i, vcpu, kvm) + kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT); - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn, gfn+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - kvm_for_each_vcpu(i, vcpu, kvm) - kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, - gfn_end << PAGE_SHIFT); - } + return false; } -static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, - unsigned long end) +static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range) { - do_kvm_unmap_hva(kvm, start, end); - - return 0; + return do_kvm_unmap_gfn(kvm, range); } -static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, - unsigned long end) +static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ - return 0; + return false; } -static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) +static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ - return 0; + return false; } -static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) +static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) { /* The page will get remapped properly on its next fault */ - do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); + return do_kvm_unmap_gfn(kvm, range); } /*****************************************/ @@ -2079,10 +2057,10 @@ static struct kvmppc_ops kvm_ops_pr = { .flush_memslot = kvmppc_core_flush_memslot_pr, .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, .commit_memory_region = kvmppc_core_commit_memory_region_pr, - .unmap_hva_range = kvm_unmap_hva_range_pr, - .age_hva = kvm_age_hva_pr, - .test_age_hva = kvm_test_age_hva_pr, - .set_spte_hva = kvm_set_spte_hva_pr, + .unmap_gfn_range = kvm_unmap_gfn_range_pr, + .age_gfn = kvm_age_gfn_pr, + .test_age_gfn = kvm_test_age_gfn_pr, + .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index ed0c9c43d0cf..7f16afc331ef 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -721,45 +721,36 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, /************* MMU Notifiers *************/ -static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - trace_kvm_unmap_hva(hva); - /* * Flush all shadow tlb entries everywhere. This is slow, but * we are 100% sure that we catch the to be unmapped page */ - kvm_flush_remote_tlbs(kvm); - - return 0; + return true; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - /* kvm_unmap_hva flushes everything anyways */ - kvm_unmap_hva(kvm, start); - - return 0; + return kvm_e500_mmu_unmap_gfn(kvm, range); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ - return 0; + return false; } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* XXX could be more clever ;) */ - return 0; + return false; } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { /* The page will get remapped properly on its next fault */ - kvm_unmap_hva(kvm, hva); - return 0; + return kvm_e500_mmu_unmap_gfn(kvm, range); } /*****************************************/ diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h index 3837842986aa..eff6e82dbcd4 100644 --- a/arch/powerpc/kvm/trace_booke.h +++ b/arch/powerpc/kvm/trace_booke.h @@ -69,21 +69,6 @@ TRACE_EVENT(kvm_exit, ) ); -TRACE_EVENT(kvm_unmap_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("unmap hva 0x%lx\n", __entry->hva) -); - TRACE_EVENT(kvm_booke206_stlb_write, TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index d4efc182662a..cc1a8a0f311e 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -5,6 +5,9 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) +CFLAGS_code-patching.o += -fno-stack-protector +CFLAGS_feature-fixups.o += -fno-stack-protector + CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) @@ -16,7 +19,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif -obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o +obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o ifndef CONFIG_KASAN obj-y += string.o memcmp_$(BITS).o diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index b895166afc82..f3999cbb2fcc 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -16,16 +16,12 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, { __wsum csum; - might_sleep(); - - if (unlikely(!access_ok(src, len))) + if (unlikely(!user_read_access_begin(src, len))) return 0; - allow_read_from_user(src, len); - csum = csum_partial_copy_generic((void __force *)src, dst, len); - prevent_read_from_user(src, len); + user_read_access_end(); return csum; } EXPORT_SYMBOL(csum_and_copy_from_user); @@ -34,15 +30,12 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { __wsum csum; - might_sleep(); - if (unlikely(!access_ok(dst, len))) + if (unlikely(!user_write_access_begin(dst, len))) return 0; - allow_write_to_user(dst, len); - csum = csum_partial_copy_generic(src, (void __force *)dst, len); - prevent_write_to_user(dst, len); + user_write_access_end(); return csum; } EXPORT_SYMBOL(csum_and_copy_to_user); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 2333625b5e31..870b30d9be2f 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -21,10 +21,15 @@ static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr, struct ppc_inst *patch_addr) { - if (!ppc_inst_prefixed(instr)) - __put_user_asm_goto(ppc_inst_val(instr), patch_addr, failed, "stw"); - else - __put_user_asm_goto(ppc_inst_as_u64(instr), patch_addr, failed, "std"); + if (!ppc_inst_prefixed(instr)) { + u32 val = ppc_inst_val(instr); + + __put_kernel_nofault(patch_addr, &val, u32, failed); + } else { + u64 val = ppc_inst_as_ulong(instr); + + __put_kernel_nofault(patch_addr, &val, u64, failed); + } asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), "r" (exec_addr)); diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c deleted file mode 100644 index 9cc17eb62462..000000000000 --- a/arch/powerpc/lib/inst.c +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2020, IBM Corporation. - */ - -#include <linux/uaccess.h> -#include <asm/disassemble.h> -#include <asm/inst.h> -#include <asm/ppc-opcode.h> - -#ifdef CONFIG_PPC64 -int probe_user_read_inst(struct ppc_inst *inst, - struct ppc_inst __user *nip) -{ - unsigned int val, suffix; - int err; - - err = copy_from_user_nofault(&val, nip, sizeof(val)); - if (err) - return err; - if (get_op(val) == OP_PREFIX) { - err = copy_from_user_nofault(&suffix, (void __user *)nip + 4, 4); - *inst = ppc_inst_prefix(val, suffix); - } else { - *inst = ppc_inst(val); - } - return err; -} - -int probe_kernel_read_inst(struct ppc_inst *inst, - struct ppc_inst *src) -{ - unsigned int val, suffix; - int err; - - err = copy_from_kernel_nofault(&val, src, sizeof(val)); - if (err) - return err; - if (get_op(val) == OP_PREFIX) { - err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4); - *inst = ppc_inst_prefix(val, suffix); - } else { - *inst = ppc_inst(val); - } - return err; -} -#else /* !CONFIG_PPC64 */ -int probe_user_read_inst(struct ppc_inst *inst, - struct ppc_inst __user *nip) -{ - unsigned int val; - int err; - - err = copy_from_user_nofault(&val, nip, sizeof(val)); - if (!err) - *inst = ppc_inst(val); - - return err; -} - -int probe_kernel_read_inst(struct ppc_inst *inst, - struct ppc_inst *src) -{ - unsigned int val; - int err; - - err = copy_from_kernel_nofault(&val, src, sizeof(val)); - if (!err) - *inst = ppc_inst(val); - - return err; -} -#endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index c6aebc149d14..45bda2520755 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1401,10 +1401,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; } - /* Following cases refer to regs->gpr[], so we need all regs */ - if (!FULL_REGS(regs)) - return -1; - rd = (word >> 21) & 0x1f; ra = (word >> 16) & 0x1f; rb = (word >> 11) & 0x1f; @@ -3086,15 +3082,6 @@ NOKPROBE_SYMBOL(analyse_instr); */ static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) { -#ifdef CONFIG_PPC32 - /* - * Check if we will touch kernel stack overflow - */ - if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { - printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n"); - return -EINVAL; - } -#endif /* CONFIG_PPC32 */ /* * Check if we already set since that means we'll * lose the previous value. diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c index 30b4b69c6941..327165f26ca6 100644 --- a/arch/powerpc/math-emu/math.c +++ b/arch/powerpc/math-emu/math.c @@ -225,7 +225,7 @@ record_exception(struct pt_regs *regs, int eflag) int do_mathemu(struct pt_regs *regs) { - void *op0 = 0, *op1 = 0, *op2 = 0, *op3 = 0; + void *op0 = NULL, *op1 = NULL, *op2 = NULL, *op3 = NULL; unsigned long pc = regs->nip; signed short sdisp; u32 insn = 0; @@ -234,7 +234,7 @@ do_mathemu(struct pt_regs *regs) int type = 0; int eflag, trap; - if (get_user(insn, (u32 *)pc)) + if (get_user(insn, (u32 __user *)pc)) return -EFAULT; switch (insn >> 26) { diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 3b4e9e4e25ea..c3df3a8501d4 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -8,7 +8,8 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \ init_$(BITS).o pgtable_$(BITS).o \ pgtable-frag.o ioremap.o ioremap_$(BITS).o \ - init-common.o mmu_context.o drmem.o + init-common.o mmu_context.o drmem.o \ + cacheflush.o obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/ diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile index 446d9de88ce4..7f0c8a78ba0c 100644 --- a/arch/powerpc/mm/book3s32/Makefile +++ b/arch/powerpc/mm/book3s32/Makefile @@ -9,3 +9,4 @@ endif obj-y += mmu.o mmu_context.o obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o +obj-$(CONFIG_PPC_KUEP) += kuep.o diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 0e6dc830c38b..fb4233a5bdf7 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -140,10 +140,6 @@ _GLOBAL(hash_page) bne- .Lretry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ -#ifndef CONFIG_VMAP_STACK - mfctr r0 - stw r0,_CTR(r11) -#endif bl create_hpte /* add the hash table entry */ #ifdef CONFIG_SMP @@ -152,17 +148,7 @@ _GLOBAL(hash_page) li r0,0 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif - -#ifdef CONFIG_VMAP_STACK b fast_hash_page_return -#else - /* Return from the exception */ - lwz r5,_CTR(r11) - mtctr r5 - lwz r0,GPR0(r11) - lwz r8,GPR8(r11) - b fast_exception_return -#endif #ifdef CONFIG_SMP .Lhash_page_out: diff --git a/arch/powerpc/mm/book3s32/kuep.c b/arch/powerpc/mm/book3s32/kuep.c new file mode 100644 index 000000000000..8ed1b8634839 --- /dev/null +++ b/arch/powerpc/mm/book3s32/kuep.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <asm/kup.h> +#include <asm/reg.h> +#include <asm/task_size_32.h> +#include <asm/mmu.h> + +#define KUEP_UPDATE_TWO_USER_SEGMENTS(n) do { \ + if (TASK_SIZE > ((n) << 28)) \ + mtsr(val1, (n) << 28); \ + if (TASK_SIZE > (((n) + 1) << 28)) \ + mtsr(val2, ((n) + 1) << 28); \ + val1 = (val1 + 0x222) & 0xf0ffffff; \ + val2 = (val2 + 0x222) & 0xf0ffffff; \ +} while (0) + +static __always_inline void kuep_update(u32 val) +{ + int val1 = val; + int val2 = (val + 0x111) & 0xf0ffffff; + + KUEP_UPDATE_TWO_USER_SEGMENTS(0); + KUEP_UPDATE_TWO_USER_SEGMENTS(2); + KUEP_UPDATE_TWO_USER_SEGMENTS(4); + KUEP_UPDATE_TWO_USER_SEGMENTS(6); + KUEP_UPDATE_TWO_USER_SEGMENTS(8); + KUEP_UPDATE_TWO_USER_SEGMENTS(10); + KUEP_UPDATE_TWO_USER_SEGMENTS(12); + KUEP_UPDATE_TWO_USER_SEGMENTS(14); +} + +void kuep_lock(void) +{ + kuep_update(mfsr(0) | SR_NX); +} + +void kuep_unlock(void) +{ + kuep_update(mfsr(0) & ~SR_NX); +} diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index d7eb266a3f7a..159930351d9f 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; - if (debug_pagealloc_enabled() || __map_without_bats) { + if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) { pr_debug_once("Read-Write memory mapped without BATs\n"); if (base >= border) return base; @@ -184,17 +184,10 @@ static bool is_module_segment(unsigned long addr) { if (!IS_ENABLED(CONFIG_MODULES)) return false; -#ifdef MODULES_VADDR if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) return false; if (addr > ALIGN(MODULES_END, SZ_256M) - 1) return false; -#else - if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M)) - return false; - if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1) - return false; -#endif return true; } diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 567e0c6b3978..ad5eff097d31 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/mm.h> +#include <linux/stop_machine.h> #include <asm/sections.h> #include <asm/mmu.h> @@ -400,10 +401,103 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_STRICT_KERNEL_RWX + +struct change_memory_parms { + unsigned long start, end, newpp; + unsigned int step, nr_cpus, master_cpu; + atomic_t cpu_counter; +}; + +// We'd rather this was on the stack but it has to be in the RMO +static struct change_memory_parms chmem_parms; + +// And therefore we need a lock to protect it from concurrent use +static DEFINE_MUTEX(chmem_lock); + +static void change_memory_range(unsigned long start, unsigned long end, + unsigned int step, unsigned long newpp) +{ + unsigned long idx; + + pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", + start, end, newpp, step); + + for (idx = start; idx < end; idx += step) + /* Not sure if we can do much with the return value */ + mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, + mmu_kernel_ssize); +} + +static int notrace chmem_secondary_loop(struct change_memory_parms *parms) +{ + unsigned long msr, tmp, flags; + int *p; + + p = &parms->cpu_counter.counter; + + local_irq_save(flags); + hard_irq_disable(); + + asm volatile ( + // Switch to real mode and leave interrupts off + "mfmsr %[msr] ;" + "li %[tmp], %[MSR_IR_DR] ;" + "andc %[tmp], %[msr], %[tmp] ;" + "mtmsrd %[tmp] ;" + + // Tell the master we are in real mode + "1: " + "lwarx %[tmp], 0, %[p] ;" + "addic %[tmp], %[tmp], -1 ;" + "stwcx. %[tmp], 0, %[p] ;" + "bne- 1b ;" + + // Spin until the counter goes to zero + "2: ;" + "lwz %[tmp], 0(%[p]) ;" + "cmpwi %[tmp], 0 ;" + "bne- 2b ;" + + // Switch back to virtual mode + "mtmsrd %[msr] ;" + + : // outputs + [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p) + : // inputs + [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR) + : // clobbers + "cc", "xer" + ); + + local_irq_restore(flags); + + return 0; +} + +static int change_memory_range_fn(void *data) +{ + struct change_memory_parms *parms = data; + + if (parms->master_cpu != smp_processor_id()) + return chmem_secondary_loop(parms); + + // Wait for all but one CPU (this one) to call-in + while (atomic_read(&parms->cpu_counter) > 1) + barrier(); + + change_memory_range(parms->start, parms->end, parms->step, parms->newpp); + + mb(); + + // Signal the other CPUs that we're done + atomic_dec(&parms->cpu_counter); + + return 0; +} + static bool hash__change_memory_range(unsigned long start, unsigned long end, unsigned long newpp) { - unsigned long idx; unsigned int step, shift; shift = mmu_psize_defs[mmu_linear_psize].shift; @@ -415,25 +509,43 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end, if (start >= end) return false; - pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", - start, end, newpp, step); + if (firmware_has_feature(FW_FEATURE_LPAR)) { + mutex_lock(&chmem_lock); - for (idx = start; idx < end; idx += step) - /* Not sure if we can do much with the return value */ - mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, - mmu_kernel_ssize); + chmem_parms.start = start; + chmem_parms.end = end; + chmem_parms.step = step; + chmem_parms.newpp = newpp; + chmem_parms.master_cpu = smp_processor_id(); + + cpus_read_lock(); + + atomic_set(&chmem_parms.cpu_counter, num_online_cpus()); + + // Ensure state is consistent before we call the other CPUs + mb(); + + stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms, + cpu_online_mask); + + cpus_read_unlock(); + mutex_unlock(&chmem_lock); + } else + change_memory_range(start, end, step, newpp); return true; } void hash__mark_rodata_ro(void) { - unsigned long start, end; + unsigned long start, end, pp; start = (unsigned long)_stext; end = (unsigned long)__init_begin; - WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); + pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY); + + WARN_ON(!hash__change_memory_range(start, end, pp)); } void hash__mark_initmem_nx(void) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 581b20a2feaf..96d9aa164007 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -338,7 +338,7 @@ repeat: int htab_remove_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize) { - unsigned long vaddr; + unsigned long vaddr, time_limit; unsigned int step, shift; int rc; int ret = 0; @@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, /* Unmap the full range specificied */ vaddr = ALIGN_DOWN(vstart, step); + time_limit = jiffies + HZ; + for (;vaddr < vend; vaddr += step) { rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); + + /* + * For large number of mappings introduce a cond_resched() + * to prevent softlockup warnings. + */ + if (time_after(jiffies, time_limit)) { + cond_resched(); + time_limit = jiffies + HZ; + } if (rc == -ENOENT) { ret = -ENOENT; continue; @@ -1145,7 +1156,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) /* page is dirty */ if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) { - if (trap == 0x400) { + if (trap == INTERRUPT_INST_STORAGE) { flush_dcache_icache_page(page); set_bit(PG_dcache_clean, &page->flags); } else @@ -1545,10 +1556,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault) if (user_mode(regs) || (region_id == USER_REGION_ID)) access &= ~_PAGE_PRIVILEGED; - if (regs->trap == 0x400) + if (TRAP(regs) == INTERRUPT_INST_STORAGE) access |= _PAGE_EXEC; - err = hash_page_mm(mm, ea, access, regs->trap, flags); + err = hash_page_mm(mm, ea, access, TRAP(regs), flags); if (unlikely(err < 0)) { // failed to instert a hash PTE due to an hypervisor error if (user_mode(regs)) { @@ -1572,10 +1583,11 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault) DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault) { unsigned long dsisr = regs->dsisr; - long err; - if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) - goto page_fault; + if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) { + hash__do_page_fault(regs); + return 0; + } /* * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then @@ -1595,13 +1607,10 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault) return 0; } - err = __do_hash_fault(regs); - if (err) { -page_fault: - err = hash__do_page_fault(regs); - } + if (__do_hash_fault(regs)) + hash__do_page_fault(regs); - return err; + return 0; } #ifdef CONFIG_PPC_MM_SLICES diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 0c8557220ae2..c10fc8a72fb3 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -119,7 +119,7 @@ static int hash__init_new_context(struct mm_struct *mm) /* This is fork. Copy hash_context details from current->mm */ memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); #ifdef CONFIG_PPC_SUBPAGE_PROT - /* inherit subpage prot detalis if we have one. */ + /* inherit subpage prot details if we have one. */ if (current->mm->context.hash_context->spt) { mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 15dcc5ad91c5..a2d9ad138709 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -301,19 +301,6 @@ void setup_kuap(bool disabled) } #endif -static inline void update_current_thread_amr(u64 value) -{ - current->thread.regs->amr = value; -} - -static inline void update_current_thread_iamr(u64 value) -{ - if (!likely(pkey_execute_disable_supported)) - return; - - current->thread.regs->iamr = value; -} - #ifdef CONFIG_PPC_MEM_KEYS void pkey_mm_init(struct mm_struct *mm) { @@ -328,7 +315,7 @@ static inline void init_amr(int pkey, u8 init_bits) u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); - update_current_thread_amr(old_amr | new_amr_bits); + current->thread.regs->amr = old_amr | new_amr_bits; } static inline void init_iamr(int pkey, u8 init_bits) @@ -336,7 +323,10 @@ static inline void init_iamr(int pkey, u8 init_bits) u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey)); u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey)); - update_current_thread_iamr(old_iamr | new_iamr_bits); + if (!likely(pkey_execute_disable_supported)) + return; + + current->thread.regs->iamr = old_iamr | new_iamr_bits; } /* diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 98f0b243c1ab..5fef8db3b463 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -180,8 +180,8 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, } #ifdef CONFIG_STRICT_KERNEL_RWX -void radix__change_memory_range(unsigned long start, unsigned long end, - unsigned long clear) +static void radix__change_memory_range(unsigned long start, unsigned long end, + unsigned long clear) { unsigned long idx; pgd_t *pgdp; @@ -1058,7 +1058,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, * Book3S does not require a TLB flush when relaxing access * restrictions when the address space is not attached to a * NMMU, because the core MMU will reload the pte after taking - * an access fault, which is defined by the architectue. + * an access fault, which is defined by the architecture. */ } /* See ptesync comment in radix__set_pte_at */ @@ -1082,22 +1082,6 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, set_pte_at(mm, addr, ptep, pte); } -int __init arch_ioremap_pud_supported(void) -{ - /* HPT does not cope with large pages in the vmalloc area */ - return radix_enabled(); -} - -int __init arch_ioremap_pmd_supported(void) -{ - return radix_enabled(); -} - -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; -} - int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { pte_t *ptep = (pte_t *)pud; @@ -1181,8 +1165,3 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } - -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c new file mode 100644 index 000000000000..63363787e000 --- /dev/null +++ b/arch/powerpc/mm/cacheflush.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/highmem.h> +#include <linux/kprobes.h> + +/** + * flush_coherent_icache() - if a CPU has a coherent icache, flush it + * Return true if the cache was flushed, false otherwise + */ +static inline bool flush_coherent_icache(void) +{ + /* + * For a snooping icache, we still need a dummy icbi to purge all the + * prefetched instructions from the ifetch buffers. We also need a sync + * before the icbi to order the the actual stores to memory that might + * have modified instructions with the icbi. + */ + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { + mb(); /* sync */ + icbi((void *)PAGE_OFFSET); + mb(); /* sync */ + isync(); + return true; + } + + return false; +} + +/** + * invalidate_icache_range() - Flush the icache by issuing icbi across an address range + * @start: the start address + * @stop: the stop address (exclusive) + */ +static void invalidate_icache_range(unsigned long start, unsigned long stop) +{ + unsigned long shift = l1_icache_shift(); + unsigned long bytes = l1_icache_bytes(); + char *addr = (char *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); + unsigned long i; + + for (i = 0; i < size >> shift; i++, addr += bytes) + icbi(addr); + + mb(); /* sync */ + isync(); +} + +/** + * flush_icache_range: Write any modified data cache blocks out to memory + * and invalidate the corresponding blocks in the instruction cache + * + * Generic code will call this after writing memory, before executing from it. + * + * @start: the start address + * @stop: the stop address (exclusive) + */ +void flush_icache_range(unsigned long start, unsigned long stop) +{ + if (flush_coherent_icache()) + return; + + clean_dcache_range(start, stop); + + if (IS_ENABLED(CONFIG_44x)) { + /* + * Flash invalidate on 44x because we are passed kmapped + * addresses and this doesn't work for userspace pages due to + * the virtually tagged icache. + */ + iccci((void *)start); + mb(); /* sync */ + isync(); + } else + invalidate_icache_range(start, stop); +} +EXPORT_SYMBOL(flush_icache_range); + +#ifdef CONFIG_HIGHMEM +/** + * flush_dcache_icache_phys() - Flush a page by it's physical address + * @physaddr: the physical address of the page + */ +static void flush_dcache_icache_phys(unsigned long physaddr) +{ + unsigned long bytes = l1_dcache_bytes(); + unsigned long nb = PAGE_SIZE / bytes; + unsigned long addr = physaddr & PAGE_MASK; + unsigned long msr, msr0; + unsigned long loop1 = addr, loop2 = addr; + + msr0 = mfmsr(); + msr = msr0 & ~MSR_DR; + /* + * This must remain as ASM to prevent potential memory accesses + * while the data MMU is disabled + */ + asm volatile( + " mtctr %2;\n" + " mtmsr %3;\n" + " isync;\n" + "0: dcbst 0, %0;\n" + " addi %0, %0, %4;\n" + " bdnz 0b;\n" + " sync;\n" + " mtctr %2;\n" + "1: icbi 0, %1;\n" + " addi %1, %1, %4;\n" + " bdnz 1b;\n" + " sync;\n" + " mtmsr %5;\n" + " isync;\n" + : "+&r" (loop1), "+&r" (loop2) + : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) + : "ctr", "memory"); +} +NOKPROBE_SYMBOL(flush_dcache_icache_phys) +#else +static void flush_dcache_icache_phys(unsigned long physaddr) +{ +} +#endif + +/** + * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. + * Note: this is necessary because the instruction cache does *not* + * snoop from the data cache. + * + * @p: the address of the page to flush + */ +static void __flush_dcache_icache(void *p) +{ + unsigned long addr = (unsigned long)p & PAGE_MASK; + + clean_dcache_range(addr, addr + PAGE_SIZE); + + /* + * We don't flush the icache on 44x. Those have a virtual icache and we + * don't have access to the virtual address here (it's not the page + * vaddr but where it's mapped in user space). The flushing of the + * icache on these is handled elsewhere, when a change in the address + * space occurs, before returning to user space. + */ + + if (mmu_has_feature(MMU_FTR_TYPE_44x)) + return; + + invalidate_icache_range(addr, addr + PAGE_SIZE); +} + +static void flush_dcache_icache_hugepage(struct page *page) +{ + int i; + int nr = compound_nr(page); + + if (!PageHighMem(page)) { + for (i = 0; i < nr; i++) + __flush_dcache_icache(lowmem_page_address(page + i)); + } else { + for (i = 0; i < nr; i++) { + void *start = kmap_local_page(page + i); + + __flush_dcache_icache(start); + kunmap_local(start); + } + } +} + +void flush_dcache_icache_page(struct page *page) +{ + if (flush_coherent_icache()) + return; + + if (PageCompound(page)) + return flush_dcache_icache_hugepage(page); + + if (!PageHighMem(page)) { + __flush_dcache_icache(lowmem_page_address(page)); + } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { + void *start = kmap_local_page(page); + + __flush_dcache_icache(start); + kunmap_local(start); + } else { + flush_dcache_icache_phys(page_to_phys(page)); + } +} +EXPORT_SYMBOL(flush_dcache_icache_page); + +void clear_user_page(void *page, unsigned long vaddr, struct page *pg) +{ + clear_page(page); + + /* + * We shouldn't have to do this, but some versions of glibc + * require it (ld.so assumes zero filled pages are icache clean) + * - Anton + */ + flush_dcache_page(pg); +} +EXPORT_SYMBOL(clear_user_page); + +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg) +{ + copy_page(vto, vfrom); + + /* + * We should be able to use the following optimisation, however + * there are two problems. + * Firstly a bug in some versions of binutils meant PLT sections + * were not marked executable. + * Secondly the first word in the GOT section is blrl, used + * to establish the GOT address. Until recently the GOT was + * not marked executable. + * - Anton + */ +#if 0 + if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) + return; +#endif + + flush_dcache_page(pg); +} + +void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + void *maddr; + + maddr = kmap_local_page(page) + (addr & ~PAGE_MASK); + flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len); + kunmap_local(maddr); +} diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index bb368257b55c..34f641d4a2fe 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -32,6 +32,8 @@ #include <linux/context_tracking.h> #include <linux/hugetlb.h> #include <linux/uaccess.h> +#include <linux/kfence.h> +#include <linux/pkeys.h> #include <asm/firmware.h> #include <asm/interrupt.h> @@ -87,7 +89,6 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } -#ifdef CONFIG_PPC_MEM_KEYS static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma) { @@ -127,7 +128,6 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -#endif static noinline int bad_access(struct pt_regs *regs, unsigned long address) { @@ -197,7 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address, bool is_write) { - int is_exec = TRAP(regs) == 0x400; + int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT | @@ -234,7 +234,6 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, return false; } -#ifdef CONFIG_PPC_MEM_KEYS static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, struct vm_area_struct *vma) { @@ -248,7 +247,6 @@ static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, return false; } -#endif static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) { @@ -393,7 +391,7 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, struct vm_area_struct * vma; struct mm_struct *mm = current->mm; unsigned int flags = FAULT_FLAG_DEFAULT; - int is_exec = TRAP(regs) == 0x400; + int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; int is_user = user_mode(regs); int is_write = page_fault_is_write(error_code); vm_fault_t fault, major = 0; @@ -418,8 +416,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, * take a page fault to a kernel address or a page fault to a user * address outside of dedicated places */ - if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) + if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { + if (kfence_handle_page_fault(address, is_write, regs)) + return 0; + return SIGSEGV; + } /* * If we're in an interrupt, have no user context or are running @@ -492,11 +494,9 @@ retry: return bad_area(regs, address); } -#ifdef CONFIG_PPC_MEM_KEYS if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) return bad_access_pkey(regs, address, vma); -#endif /* CONFIG_PPC_MEM_KEYS */ if (unlikely(access_error(is_write, is_exec, vma))) return bad_access(regs, address); @@ -539,39 +539,25 @@ retry: } NOKPROBE_SYMBOL(___do_page_fault); -static long __do_page_fault(struct pt_regs *regs) +static __always_inline void __do_page_fault(struct pt_regs *regs) { - const struct exception_table_entry *entry; long err; err = ___do_page_fault(regs, regs->dar, regs->dsisr); - if (likely(!err)) - return err; - - entry = search_exception_tables(regs->nip); - if (likely(entry)) { - instruction_pointer_set(regs, extable_fixup(entry)); - return 0; - } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { - __bad_page_fault(regs, err); - return 0; - } else { - /* 32 and 64e handle the bad page fault in asm */ - return err; - } + if (unlikely(err)) + bad_page_fault(regs, err); } -NOKPROBE_SYMBOL(__do_page_fault); -DEFINE_INTERRUPT_HANDLER_RET(do_page_fault) +DEFINE_INTERRUPT_HANDLER(do_page_fault) { - return __do_page_fault(regs); + __do_page_fault(regs); } #ifdef CONFIG_PPC_BOOK3S_64 /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */ -long hash__do_page_fault(struct pt_regs *regs) +void hash__do_page_fault(struct pt_regs *regs) { - return __do_page_fault(regs); + __do_page_fault(regs); } NOKPROBE_SYMBOL(hash__do_page_fault); #endif @@ -581,27 +567,27 @@ NOKPROBE_SYMBOL(hash__do_page_fault); * It is called from the DSI and ISI handlers in head.S and from some * of the procedures in traps.c. */ -void __bad_page_fault(struct pt_regs *regs, int sig) +static void __bad_page_fault(struct pt_regs *regs, int sig) { int is_write = page_fault_is_write(regs->dsisr); /* kernel has accessed a bad area */ switch (TRAP(regs)) { - case 0x300: - case 0x380: - case 0xe00: + case INTERRUPT_DATA_STORAGE: + case INTERRUPT_DATA_SEGMENT: + case INTERRUPT_H_DATA_STORAGE: pr_alert("BUG: %s on %s at 0x%08lx\n", regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" : "Unable to handle kernel data access", is_write ? "write" : "read", regs->dar); break; - case 0x400: - case 0x480: + case INTERRUPT_INST_STORAGE: + case INTERRUPT_INST_SEGMENT: pr_alert("BUG: Unable to handle kernel instruction fetch%s", regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); break; - case 0x600: + case INTERRUPT_ALIGNMENT: pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", regs->dar); break; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d142b76d507d..9a75ba078e1b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 02c7db4087cb..3d690be48e84 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -97,6 +97,9 @@ static void __init MMU_setup(void) if (IS_ENABLED(CONFIG_PPC_8xx)) return; + if (IS_ENABLED(CONFIG_KFENCE)) + __map_without_ltlbs = 1; + if (debug_pagealloc_enabled()) __map_without_ltlbs = 1; diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index b1a0aebe8c48..57342154d2b0 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, if (!ret) return (void __iomem *)area->addr + offset; - unmap_kernel_range(va, size); + vunmap_range(va, va + size); free_vm_area(area); return NULL; diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c index fa9a7a718fc6..a3c30a884076 100644 --- a/arch/powerpc/mm/maccess.c +++ b/arch/powerpc/mm/maccess.c @@ -3,7 +3,28 @@ #include <linux/uaccess.h> #include <linux/kernel.h> +#include <asm/disassemble.h> +#include <asm/inst.h> +#include <asm/ppc-opcode.h> + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return is_kernel_addr((unsigned long)unsafe_src); } + +int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src) +{ + unsigned int val, suffix; + int err; + + err = copy_from_kernel_nofault(&val, src, sizeof(val)); + if (err) + return err; + if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) { + err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4); + *inst = ppc_inst_prefix(val, suffix); + } else { + *inst = ppc_inst(val); + } + return err; +} diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4e8ce6d85232..043bbeaf407c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -12,49 +12,18 @@ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ -#include <linux/export.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/gfp.h> -#include <linux/types.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/init.h> #include <linux/memblock.h> #include <linux/highmem.h> -#include <linux/initrd.h> -#include <linux/pagemap.h> #include <linux/suspend.h> -#include <linux/hugetlb.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/memremap.h> #include <linux/dma-direct.h> -#include <linux/kprobes.h> -#include <asm/prom.h> -#include <asm/io.h> -#include <asm/mmu_context.h> -#include <asm/mmu.h> -#include <asm/smp.h> #include <asm/machdep.h> -#include <asm/btext.h> -#include <asm/tlb.h> -#include <asm/sections.h> -#include <asm/sparsemem.h> -#include <asm/vdso.h> -#include <asm/fixmap.h> -#include <asm/swiotlb.h> #include <asm/rtas.h> #include <asm/kasan.h> #include <asm/svm.h> -#include <asm/mmzone.h> #include <mm/mmu_decl.h> -static DEFINE_MUTEX(linear_mapping_mutex); unsigned long long memory_limit; bool init_mem_is_free; @@ -72,6 +41,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, EXPORT_SYMBOL(phys_mem_access_prot); #ifdef CONFIG_MEMORY_HOTPLUG +static DEFINE_MUTEX(linear_mapping_mutex); #ifdef CONFIG_NUMA int memory_add_physaddr_to_nid(u64 start) @@ -312,7 +282,6 @@ void __init mem_init(void) (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - mem_init_print_info(NULL); #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); #ifdef CONFIG_KASAN @@ -340,257 +309,6 @@ void free_initmem(void) free_initmem_default(POISON_FREE_INITMEM); } -/** - * flush_coherent_icache() - if a CPU has a coherent icache, flush it - * @addr: The base address to use (can be any valid address, the whole cache will be flushed) - * Return true if the cache was flushed, false otherwise - */ -static inline bool flush_coherent_icache(unsigned long addr) -{ - /* - * For a snooping icache, we still need a dummy icbi to purge all the - * prefetched instructions from the ifetch buffers. We also need a sync - * before the icbi to order the the actual stores to memory that might - * have modified instructions with the icbi. - */ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { - mb(); /* sync */ - allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); - icbi((void *)addr); - prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); - mb(); /* sync */ - isync(); - return true; - } - - return false; -} - -/** - * invalidate_icache_range() - Flush the icache by issuing icbi across an address range - * @start: the start address - * @stop: the stop address (exclusive) - */ -static void invalidate_icache_range(unsigned long start, unsigned long stop) -{ - unsigned long shift = l1_icache_shift(); - unsigned long bytes = l1_icache_bytes(); - char *addr = (char *)(start & ~(bytes - 1)); - unsigned long size = stop - (unsigned long)addr + (bytes - 1); - unsigned long i; - - for (i = 0; i < size >> shift; i++, addr += bytes) - icbi(addr); - - mb(); /* sync */ - isync(); -} - -/** - * flush_icache_range: Write any modified data cache blocks out to memory - * and invalidate the corresponding blocks in the instruction cache - * - * Generic code will call this after writing memory, before executing from it. - * - * @start: the start address - * @stop: the stop address (exclusive) - */ -void flush_icache_range(unsigned long start, unsigned long stop) -{ - if (flush_coherent_icache(start)) - return; - - clean_dcache_range(start, stop); - - if (IS_ENABLED(CONFIG_44x)) { - /* - * Flash invalidate on 44x because we are passed kmapped - * addresses and this doesn't work for userspace pages due to - * the virtually tagged icache. - */ - iccci((void *)start); - mb(); /* sync */ - isync(); - } else - invalidate_icache_range(start, stop); -} -EXPORT_SYMBOL(flush_icache_range); - -#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) -/** - * flush_dcache_icache_phys() - Flush a page by it's physical address - * @physaddr: the physical address of the page - */ -static void flush_dcache_icache_phys(unsigned long physaddr) -{ - unsigned long bytes = l1_dcache_bytes(); - unsigned long nb = PAGE_SIZE / bytes; - unsigned long addr = physaddr & PAGE_MASK; - unsigned long msr, msr0; - unsigned long loop1 = addr, loop2 = addr; - - msr0 = mfmsr(); - msr = msr0 & ~MSR_DR; - /* - * This must remain as ASM to prevent potential memory accesses - * while the data MMU is disabled - */ - asm volatile( - " mtctr %2;\n" - " mtmsr %3;\n" - " isync;\n" - "0: dcbst 0, %0;\n" - " addi %0, %0, %4;\n" - " bdnz 0b;\n" - " sync;\n" - " mtctr %2;\n" - "1: icbi 0, %1;\n" - " addi %1, %1, %4;\n" - " bdnz 1b;\n" - " sync;\n" - " mtmsr %5;\n" - " isync;\n" - : "+&r" (loop1), "+&r" (loop2) - : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) - : "ctr", "memory"); -} -NOKPROBE_SYMBOL(flush_dcache_icache_phys) -#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) - -/* - * This is called when a page has been modified by the kernel. - * It just marks the page as not i-cache clean. We do the i-cache - * flush later when the page is given to a user process, if necessary. - */ -void flush_dcache_page(struct page *page) -{ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - /* avoid an atomic op if possible */ - if (test_bit(PG_dcache_clean, &page->flags)) - clear_bit(PG_dcache_clean, &page->flags); -} -EXPORT_SYMBOL(flush_dcache_page); - -static void flush_dcache_icache_hugepage(struct page *page) -{ - int i; - void *start; - - BUG_ON(!PageCompound(page)); - - for (i = 0; i < compound_nr(page); i++) { - if (!PageHighMem(page)) { - __flush_dcache_icache(page_address(page+i)); - } else { - start = kmap_atomic(page+i); - __flush_dcache_icache(start); - kunmap_atomic(start); - } - } -} - -void flush_dcache_icache_page(struct page *page) -{ - - if (PageCompound(page)) - return flush_dcache_icache_hugepage(page); - -#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) - /* On 8xx there is no need to kmap since highmem is not supported */ - __flush_dcache_icache(page_address(page)); -#else - if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { - void *start = kmap_atomic(page); - __flush_dcache_icache(start); - kunmap_atomic(start); - } else { - unsigned long addr = page_to_pfn(page) << PAGE_SHIFT; - - if (flush_coherent_icache(addr)) - return; - flush_dcache_icache_phys(addr); - } -#endif -} -EXPORT_SYMBOL(flush_dcache_icache_page); - -/** - * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. - * Note: this is necessary because the instruction cache does *not* - * snoop from the data cache. - * - * @page: the address of the page to flush - */ -void __flush_dcache_icache(void *p) -{ - unsigned long addr = (unsigned long)p; - - if (flush_coherent_icache(addr)) - return; - - clean_dcache_range(addr, addr + PAGE_SIZE); - - /* - * We don't flush the icache on 44x. Those have a virtual icache and we - * don't have access to the virtual address here (it's not the page - * vaddr but where it's mapped in user space). The flushing of the - * icache on these is handled elsewhere, when a change in the address - * space occurs, before returning to user space. - */ - - if (mmu_has_feature(MMU_FTR_TYPE_44x)) - return; - - invalidate_icache_range(addr, addr + PAGE_SIZE); -} - -void clear_user_page(void *page, unsigned long vaddr, struct page *pg) -{ - clear_page(page); - - /* - * We shouldn't have to do this, but some versions of glibc - * require it (ld.so assumes zero filled pages are icache clean) - * - Anton - */ - flush_dcache_page(pg); -} -EXPORT_SYMBOL(clear_user_page); - -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) -{ - copy_page(vto, vfrom); - - /* - * We should be able to use the following optimisation, however - * there are two problems. - * Firstly a bug in some versions of binutils meant PLT sections - * were not marked executable. - * Secondly the first word in the GOT section is blrl, used - * to establish the GOT address. Until recently the GOT was - * not marked executable. - * - Anton - */ -#if 0 - if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) - return; -#endif - - flush_dcache_page(pg); -} - -void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long addr, int len) -{ - unsigned long maddr; - - maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); - flush_icache_range(maddr, maddr + len); - kunmap(page); -} - /* * System memory should not be in /proc/iomem but various tools expect it * (eg kdump). diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index 18f20da0d348..a857af401738 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -43,24 +43,26 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, /* * This full barrier orders the store to the cpumask above vs - * a subsequent operation which allows this CPU to begin loading - * translations for next. + * a subsequent load which allows this CPU/MMU to begin loading + * translations for 'next' from page table PTEs into the TLB. * - * When using the radix MMU that operation is the load of the + * When using the radix MMU, that operation is the load of the * MMU context id, which is then moved to SPRN_PID. * * For the hash MMU it is either the first load from slb_cache - * in switch_slb(), and/or the store of paca->mm_ctx_id in - * copy_mm_to_paca(). + * in switch_slb() to preload the SLBs, or the load of + * get_user_context which loads the context for the VSID hash + * to insert a new SLB, in the SLB fault handler. * * On the other side, the barrier is in mm/tlb-radix.c for - * radix which orders earlier stores to clear the PTEs vs - * the load of mm_cpumask. And pte_xchg which does the same - * thing for hash. + * radix which orders earlier stores to clear the PTEs before + * the load of mm_cpumask to check which CPU TLBs should be + * flushed. For hash, pte_xchg to clear the PTE includes the + * barrier. * - * This full barrier is needed by membarrier when switching - * between processes after store to rq->curr, before user-space - * memory accesses. + * This full barrier is also needed by membarrier when + * switching between processes after store to rq->curr, before + * user-space memory accesses. */ smp_mb(); diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 998810e68562..7dac910c0b21 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -185,3 +185,8 @@ void ptdump_check_wx(void); #else static inline void ptdump_check_wx(void) { } #endif + +static inline bool debug_pagealloc_enabled_or_kfence(void) +{ + return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled(); +} diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 19a3eec1d8c5..71bfdbedacee 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); - bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled(); + bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence(); unsigned long boundary = strict_boundary ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); @@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return 0; mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); - if (debug_pagealloc_enabled()) { + if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile index c2dec3a68d4c..8e60af32e51e 100644 --- a/arch/powerpc/net/Makefile +++ b/arch/powerpc/net/Makefile @@ -2,8 +2,4 @@ # # Arch-specific network modules # -ifdef CONFIG_PPC64 -obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o -else -obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o -endif +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_jit_comp$(BITS).o diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index d0a67a1bbaf1..99fad093f43e 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -26,6 +26,9 @@ /* Long jump; (unconditional 'branch') */ #define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +/* blr; (unconditional 'branch' with link) to absolute address */ +#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ + (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) /* "cond" here covers BO:BI fields. */ #define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ (((cond) & 0x3ff) << 16) | \ @@ -42,6 +45,10 @@ EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \ } } while(0) +#ifdef CONFIG_PPC32 +#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0)) +#endif + #define PPC_LI64(d, i) do { \ if ((long)(i) >= -2147483648 && \ (long)(i) < 2147483648) \ @@ -108,6 +115,63 @@ static inline bool is_nearbranch(int offset) #define COND_LT (CR0_LT | COND_CMP_TRUE) #define COND_LE (CR0_GT | COND_CMP_FALSE) +#define SEEN_FUNC 0x20000000 /* might call external helpers */ +#define SEEN_STACK 0x40000000 /* uses BPF stack */ +#define SEEN_TAILCALL 0x80000000 /* uses tail calls */ + +#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */ +#define SEEN_NVREG_MASK 0x0003ffff /* Non volatile registers r14-r31 */ + +#ifdef CONFIG_PPC64 +extern const int b2p[MAX_BPF_JIT_REG + 2]; +#else +extern const int b2p[MAX_BPF_JIT_REG + 1]; +#endif + +struct codegen_context { + /* + * This is used to track register usage as well + * as calls to external helpers. + * - register usage is tracked with corresponding + * bits (r3-r31) + * - rest of the bits can be used to track other + * things -- for now, we use bits 0 to 2 + * encoded in SEEN_* macros above + */ + unsigned int seen; + unsigned int idx; + unsigned int stack_size; + int b2p[ARRAY_SIZE(b2p)]; +}; + +static inline void bpf_flush_icache(void *start, void *end) +{ + smp_wmb(); /* smp write barrier */ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i) +{ + return ctx->seen & (1 << (31 - i)); +} + +static inline void bpf_set_seen_register(struct codegen_context *ctx, int i) +{ + ctx->seen |= 1 << (31 - i); +} + +static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i) +{ + ctx->seen &= ~(1 << (31 - i)); +} + +void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func); +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, bool extra_pass); +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx); +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); +void bpf_jit_realloc_regs(struct codegen_context *ctx); + #endif #endif diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h deleted file mode 100644 index 448dfd4d98e1..000000000000 --- a/arch/powerpc/net/bpf_jit32.h +++ /dev/null @@ -1,139 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * bpf_jit32.h: BPF JIT compiler for PPC - * - * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation - * - * Split from bpf_jit.h - */ -#ifndef _BPF_JIT32_H -#define _BPF_JIT32_H - -#include <asm/asm-compat.h> -#include "bpf_jit.h" - -#ifdef CONFIG_PPC64 -#define BPF_PPC_STACK_R3_OFF 48 -#define BPF_PPC_STACK_LOCALS 32 -#define BPF_PPC_STACK_BASIC (48+64) -#define BPF_PPC_STACK_SAVE (18*8) -#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ - BPF_PPC_STACK_SAVE) -#define BPF_PPC_SLOWPATH_FRAME (48+64) -#else -#define BPF_PPC_STACK_R3_OFF 24 -#define BPF_PPC_STACK_LOCALS 16 -#define BPF_PPC_STACK_BASIC (24+32) -#define BPF_PPC_STACK_SAVE (18*4) -#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ - BPF_PPC_STACK_SAVE) -#define BPF_PPC_SLOWPATH_FRAME (24+32) -#endif - -#define REG_SZ (BITS_PER_LONG/8) - -/* - * Generated code register usage: - * - * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: - * - * skb r3 (Entry parameter) - * A register r4 - * X register r5 - * addr param r6 - * r7-r10 scratch - * skb->data r14 - * skb headlen r15 (skb->len - skb->data_len) - * m[0] r16 - * m[...] ... - * m[15] r31 - */ -#define r_skb 3 -#define r_ret 3 -#define r_A 4 -#define r_X 5 -#define r_addr 6 -#define r_scratch1 7 -#define r_scratch2 8 -#define r_D 14 -#define r_HL 15 -#define r_M 16 - -#ifndef __ASSEMBLY__ - -/* - * Assembly helpers from arch/powerpc/net/bpf_jit.S: - */ -#define DECLARE_LOAD_FUNC(func) \ - extern u8 func[], func##_negative_offset[], func##_positive_offset[] - -DECLARE_LOAD_FUNC(sk_load_word); -DECLARE_LOAD_FUNC(sk_load_half); -DECLARE_LOAD_FUNC(sk_load_byte); -DECLARE_LOAD_FUNC(sk_load_byte_msh); - -#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LBZ(r, base, i)); \ - else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ - EMIT(PPC_RAW_LBZ(r, r, IMM_L(i))); } } while(0) - -#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LD(r, base, i)); \ - else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ - EMIT(PPC_RAW_LD(r, r, IMM_L(i))); } } while(0) - -#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LWZ(r, base, i)); \ - else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ - EMIT(PPC_RAW_LWZ(r, r, IMM_L(i))); } } while(0) - -#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LHZ(r, base, i)); \ - else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ - EMIT(PPC_RAW_LHZ(r, r, IMM_L(i))); } } while(0) - -#ifdef CONFIG_PPC64 -#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) -#else -#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0) -#endif - -#ifdef CONFIG_SMP -#ifdef CONFIG_PPC64 -#define PPC_BPF_LOAD_CPU(r) \ - do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \ - PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ - } while (0) -#else -#define PPC_BPF_LOAD_CPU(r) \ - do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \ - PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \ - } while(0) -#endif -#else -#define PPC_BPF_LOAD_CPU(r) do { EMIT(PPC_RAW_LI(r, 0)); } while(0) -#endif - -#define PPC_LHBRX_OFFS(r, base, i) \ - do { PPC_LI32(r, i); EMIT(PPC_RAW_LHBRX(r, r, base)); } while(0) -#ifdef __LITTLE_ENDIAN__ -#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) -#else -#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) -#endif - -#define PPC_BPF_LL(r, base, i) do { EMIT(PPC_RAW_LWZ(r, base, i)); } while(0) -#define PPC_BPF_STL(r, base, i) do { EMIT(PPC_RAW_STW(r, base, i)); } while(0) -#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STWU(r, base, i)); } while(0) - -#define SEEN_DATAREF 0x10000 /* might call external helpers */ -#define SEEN_XREG 0x20000 /* X reg is used */ -#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary - * storage */ -#define SEEN_MEM_MSK 0x0ffff - -struct codegen_context { - unsigned int seen; - unsigned int idx; - int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ -}; - -#endif - -#endif diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index 2e33c6673ff9..7b713edfa7e2 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -39,7 +39,7 @@ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* BPF to ppc register mappings */ -static const int b2p[] = { +const int b2p[MAX_BPF_JIT_REG + 2] = { /* function return value */ [BPF_REG_0] = 8, /* function arguments */ @@ -86,25 +86,6 @@ static const int b2p[] = { } while(0) #define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0) -#define SEEN_FUNC 0x1000 /* might call external helpers */ -#define SEEN_STACK 0x2000 /* uses BPF stack */ -#define SEEN_TAILCALL 0x4000 /* uses tail calls */ - -struct codegen_context { - /* - * This is used to track register usage as well - * as calls to external helpers. - * - register usage is tracked with corresponding - * bits (r3-r10 and r27-r31) - * - rest of the bits can be used to track other - * things -- for now, we use bits 16 to 23 - * encoded in SEEN_* macros above - */ - unsigned int seen; - unsigned int idx; - unsigned int stack_size; -}; - #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S deleted file mode 100644 index 2f5030d8383f..000000000000 --- a/arch/powerpc/net/bpf_jit_asm.S +++ /dev/null @@ -1,226 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* bpf_jit.S: Packet/header access helper functions - * for PPC64 BPF compiler. - * - * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation - */ - -#include <asm/ppc_asm.h> -#include <asm/asm-compat.h> -#include "bpf_jit32.h" - -/* - * All of these routines are called directly from generated code, - * whose register usage is: - * - * r3 skb - * r4,r5 A,X - * r6 *** address parameter to helper *** - * r7-r10 scratch - * r14 skb->data - * r15 skb headlen - * r16-31 M[] - */ - -/* - * To consider: These helpers are so small it could be better to just - * generate them inline. Inline code can do the simple headlen check - * then branch directly to slow_path_XXX if required. (In fact, could - * load a spare GPR with the address of slow_path_generic and pass size - * as an argument, making the call site a mtlr, li and bllr.) - */ - .globl sk_load_word -sk_load_word: - PPC_LCMPI r_addr, 0 - blt bpf_slow_path_word_neg - .globl sk_load_word_positive_offset -sk_load_word_positive_offset: - /* Are we accessing past headlen? */ - subi r_scratch1, r_HL, 4 - PPC_LCMP r_scratch1, r_addr - blt bpf_slow_path_word - /* Nope, just hitting the header. cr0 here is eq or gt! */ -#ifdef __LITTLE_ENDIAN__ - lwbrx r_A, r_D, r_addr -#else - lwzx r_A, r_D, r_addr -#endif - blr /* Return success, cr0 != LT */ - - .globl sk_load_half -sk_load_half: - PPC_LCMPI r_addr, 0 - blt bpf_slow_path_half_neg - .globl sk_load_half_positive_offset -sk_load_half_positive_offset: - subi r_scratch1, r_HL, 2 - PPC_LCMP r_scratch1, r_addr - blt bpf_slow_path_half -#ifdef __LITTLE_ENDIAN__ - lhbrx r_A, r_D, r_addr -#else - lhzx r_A, r_D, r_addr -#endif - blr - - .globl sk_load_byte -sk_load_byte: - PPC_LCMPI r_addr, 0 - blt bpf_slow_path_byte_neg - .globl sk_load_byte_positive_offset -sk_load_byte_positive_offset: - PPC_LCMP r_HL, r_addr - ble bpf_slow_path_byte - lbzx r_A, r_D, r_addr - blr - -/* - * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value - */ - .globl sk_load_byte_msh -sk_load_byte_msh: - PPC_LCMPI r_addr, 0 - blt bpf_slow_path_byte_msh_neg - .globl sk_load_byte_msh_positive_offset -sk_load_byte_msh_positive_offset: - PPC_LCMP r_HL, r_addr - ble bpf_slow_path_byte_msh - lbzx r_X, r_D, r_addr - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -/* Call out to skb_copy_bits: - * We'll need to back up our volatile regs first; we have - * local variable space at r1+(BPF_PPC_STACK_BASIC). - * Allocate a new stack frame here to remain ABI-compliant in - * stashing LR. - */ -#define bpf_slow_path_common(SIZE) \ - mflr r0; \ - PPC_STL r0, PPC_LR_STKOFF(r1); \ - /* R3 goes in parameter space of caller's frame */ \ - PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ - PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ - PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ - addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \ - PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ - /* R3 = r_skb, as passed */ \ - mr r4, r_addr; \ - li r6, SIZE; \ - bl skb_copy_bits; \ - nop; \ - /* R3 = 0 on success */ \ - addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - PPC_LL r0, PPC_LR_STKOFF(r1); \ - PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ - PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ - mtlr r0; \ - PPC_LCMPI r3, 0; \ - blt bpf_error; /* cr0 = LT */ \ - PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ - /* Great success! */ - -bpf_slow_path_word: - bpf_slow_path_common(4) - /* Data value is on stack, and cr0 != LT */ - lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1) - blr - -bpf_slow_path_half: - bpf_slow_path_common(2) - lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) - blr - -bpf_slow_path_byte: - bpf_slow_path_common(1) - lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) - blr - -bpf_slow_path_byte_msh: - bpf_slow_path_common(1) - lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -/* Call out to bpf_internal_load_pointer_neg_helper: - * We'll need to back up our volatile regs first; we have - * local variable space at r1+(BPF_PPC_STACK_BASIC). - * Allocate a new stack frame here to remain ABI-compliant in - * stashing LR. - */ -#define sk_negative_common(SIZE) \ - mflr r0; \ - PPC_STL r0, PPC_LR_STKOFF(r1); \ - /* R3 goes in parameter space of caller's frame */ \ - PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ - PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ - PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ - PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ - /* R3 = r_skb, as passed */ \ - mr r4, r_addr; \ - li r5, SIZE; \ - bl bpf_internal_load_pointer_neg_helper; \ - nop; \ - /* R3 != 0 on success */ \ - addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - PPC_LL r0, PPC_LR_STKOFF(r1); \ - PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ - PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ - mtlr r0; \ - PPC_LCMPLI r3, 0; \ - beq bpf_error_slow; /* cr0 = EQ */ \ - mr r_addr, r3; \ - PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ - /* Great success! */ - -bpf_slow_path_word_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_word_negative_offset -sk_load_word_negative_offset: - sk_negative_common(4) - lwz r_A, 0(r_addr) - blr - -bpf_slow_path_half_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_half_negative_offset -sk_load_half_negative_offset: - sk_negative_common(2) - lhz r_A, 0(r_addr) - blr - -bpf_slow_path_byte_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_byte_negative_offset -sk_load_byte_negative_offset: - sk_negative_common(1) - lbz r_A, 0(r_addr) - blr - -bpf_slow_path_byte_msh_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_byte_msh_negative_offset -sk_load_byte_msh_negative_offset: - sk_negative_common(1) - lbz r_X, 0(r_addr) - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -bpf_error_slow: - /* fabricate a cr0 = lt */ - li r_scratch1, -1 - PPC_LCMPI r_scratch1, 0 -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e809cb5a1631..798ac4350a82 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only -/* bpf_jit_comp.c: BPF JIT compiler +/* + * eBPF JIT compiler * - * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> + * IBM Corporation * - * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) - * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org> + * Based on the powerpc classic BPF JIT compiler by Matt Evans */ #include <linux/moduleloader.h> #include <asm/cacheflush.h> @@ -12,639 +13,204 @@ #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/if_vlan.h> +#include <asm/kprobes.h> +#include <linux/bpf.h> -#include "bpf_jit32.h" +#include "bpf_jit.h" -static inline void bpf_flush_icache(void *start, void *end) +static void bpf_jit_fill_ill_insns(void *area, unsigned int size) { - smp_wmb(); - flush_icache_range((unsigned long)start, (unsigned long)end); + memset32(area, BREAKPOINT_INSTRUCTION, size / 4); } -static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, - struct codegen_context *ctx) +/* Fix the branch target addresses for subprog calls */ +static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, + struct codegen_context *ctx, u32 *addrs) { - int i; - const struct sock_filter *filter = fp->insns; - - if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { - /* Make stackframe */ - if (ctx->seen & SEEN_DATAREF) { - /* If we call any helpers (for loads), save LR */ - EMIT(PPC_INST_MFLR | __PPC_RT(R0)); - PPC_BPF_STL(0, 1, PPC_LR_STKOFF); - - /* Back up non-volatile regs. */ - PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D))); - PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL))); - } - if (ctx->seen & SEEN_MEM) { - /* - * Conditionally save regs r15-r31 as some will be used - * for M[] data. - */ - for (i = r_M; i < (r_M+16); i++) { - if (ctx->seen & (1 << (i-r_M))) - PPC_BPF_STL(i, 1, -(REG_SZ*(32-i))); - } - } - PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME); - } - - if (ctx->seen & SEEN_DATAREF) { - /* - * If this filter needs to access skb data, - * prepare r_D and r_HL: - * r_HL = skb->len - skb->data_len - * r_D = skb->data - */ - PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, - data_len)); - PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); - EMIT(PPC_RAW_SUB(r_HL, r_HL, r_scratch1)); - PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); - } + const struct bpf_insn *insn = fp->insnsi; + bool func_addr_fixed; + u64 func_addr; + u32 tmp_idx; + int i, ret; - if (ctx->seen & SEEN_XREG) { + for (i = 0; i < fp->len; i++) { /* - * TODO: Could also detect whether first instr. sets X and - * avoid this (as below, with A). + * During the extra pass, only the branch target addresses for + * the subprog calls need to be fixed. All other instructions + * can left untouched. + * + * The JITed image length does not change because we already + * ensure that the JITed instruction sequence for these calls + * are of fixed length by padding them with NOPs. */ - EMIT(PPC_RAW_LI(r_X, 0)); - } - - /* make sure we dont leak kernel information to user */ - if (bpf_needs_clear_a(&filter[0])) - EMIT(PPC_RAW_LI(r_A, 0)); -} - -static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) -{ - int i; - - if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { - EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME)); - if (ctx->seen & SEEN_DATAREF) { - PPC_BPF_LL(0, 1, PPC_LR_STKOFF); - EMIT(PPC_RAW_MTLR(0)); - PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D))); - PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL))); - } - if (ctx->seen & SEEN_MEM) { - /* Restore any saved non-vol registers */ - for (i = r_M; i < (r_M+16); i++) { - if (ctx->seen & (1 << (i-r_M))) - PPC_BPF_LL(i, 1, -(REG_SZ*(32-i))); - } - } - } - /* The RETs have left a return value in R3. */ - - EMIT(PPC_RAW_BLR()); -} - -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) - -/* Assemble the body code between the prologue & epilogue. */ -static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, - struct codegen_context *ctx, - unsigned int *addrs) -{ - const struct sock_filter *filter = fp->insns; - int flen = fp->len; - u8 *func; - unsigned int true_cond; - int i; - - /* Start of epilogue code */ - unsigned int exit_addr = addrs[flen]; - - for (i = 0; i < flen; i++) { - unsigned int K = filter[i].k; - u16 code = bpf_anc_helper(&filter[i]); + if (insn[i].code == (BPF_JMP | BPF_CALL) && + insn[i].src_reg == BPF_PSEUDO_CALL) { + ret = bpf_jit_get_func_addr(fp, &insn[i], true, + &func_addr, + &func_addr_fixed); + if (ret < 0) + return ret; - /* - * addrs[] maps a BPF bytecode address into a real offset from - * the start of the body code. - */ - addrs[i] = ctx->idx * 4; - - switch (code) { - /*** ALU ops ***/ - case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_ADD(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ - if (!K) - break; - EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(K))); - if (K >= 32768) - EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(K))); - break; - case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_SUB(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ - if (!K) - break; - EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(-K))); - if (K >= 32768) - EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(-K))); - break; - case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_MULW(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ - if (K < 32768) - EMIT(PPC_RAW_MULI(r_A, r_A, K)); - else { - PPC_LI32(r_scratch1, K); - EMIT(PPC_RAW_MULW(r_A, r_A, r_scratch1)); - } - break; - case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ - case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_CMPWI(r_X, 0)); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - EMIT(PPC_RAW_LI(r_ret, 0)); - PPC_JMP(exit_addr); - } - if (code == (BPF_ALU | BPF_MOD | BPF_X)) { - EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_X)); - EMIT(PPC_RAW_MULW(r_scratch1, r_X, r_scratch1)); - EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1)); - } else { - EMIT(PPC_RAW_DIVWU(r_A, r_A, r_X)); - } - break; - case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ - PPC_LI32(r_scratch2, K); - EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_scratch2)); - EMIT(PPC_RAW_MULW(r_scratch1, r_scratch2, r_scratch1)); - EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1)); - break; - case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ - if (K == 1) - break; - PPC_LI32(r_scratch1, K); - EMIT(PPC_RAW_DIVWU(r_A, r_A, r_scratch1)); - break; - case BPF_ALU | BPF_AND | BPF_X: - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_AND(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_AND | BPF_K: - if (!IMM_H(K)) - EMIT(PPC_RAW_ANDI(r_A, r_A, K)); - else { - PPC_LI32(r_scratch1, K); - EMIT(PPC_RAW_AND(r_A, r_A, r_scratch1)); - } - break; - case BPF_ALU | BPF_OR | BPF_X: - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_OR(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_OR | BPF_K: - if (IMM_L(K)) - EMIT(PPC_RAW_ORI(r_A, r_A, IMM_L(K))); - if (K >= 65536) - EMIT(PPC_RAW_ORIS(r_A, r_A, IMM_H(K))); - break; - case BPF_ANC | SKF_AD_ALU_XOR_X: - case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_XOR(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ - if (IMM_L(K)) - EMIT(PPC_RAW_XORI(r_A, r_A, IMM_L(K))); - if (K >= 65536) - EMIT(PPC_RAW_XORIS(r_A, r_A, IMM_H(K))); - break; - case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_SLW(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_LSH | BPF_K: - if (K == 0) - break; - else - EMIT(PPC_RAW_SLWI(r_A, r_A, K)); - break; - case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_SRW(r_A, r_A, r_X)); - break; - case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ - if (K == 0) - break; - else - EMIT(PPC_RAW_SRWI(r_A, r_A, K)); - break; - case BPF_ALU | BPF_NEG: - EMIT(PPC_RAW_NEG(r_A, r_A)); - break; - case BPF_RET | BPF_K: - PPC_LI32(r_ret, K); - if (!K) { - if (ctx->pc_ret0 == -1) - ctx->pc_ret0 = i; - } - /* - * If this isn't the very last instruction, branch to - * the epilogue if we've stuff to clean up. Otherwise, - * if there's nothing to tidy, just return. If we /are/ - * the last instruction, we're about to fall through to - * the epilogue to return. - */ - if (i != flen - 1) { - /* - * Note: 'seen' is properly valid only on pass - * #2. Both parts of this conditional are the - * same instruction size though, meaning the - * first pass will still correctly determine the - * code size/addresses. - */ - if (ctx->seen) - PPC_JMP(exit_addr); - else - EMIT(PPC_RAW_BLR()); - } - break; - case BPF_RET | BPF_A: - EMIT(PPC_RAW_MR(r_ret, r_A)); - if (i != flen - 1) { - if (ctx->seen) - PPC_JMP(exit_addr); - else - EMIT(PPC_RAW_BLR()); - } - break; - case BPF_MISC | BPF_TAX: /* X = A */ - EMIT(PPC_RAW_MR(r_X, r_A)); - break; - case BPF_MISC | BPF_TXA: /* A = X */ - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_MR(r_A, r_X)); - break; - - /*** Constant loads/M[] access ***/ - case BPF_LD | BPF_IMM: /* A = K */ - PPC_LI32(r_A, K); - break; - case BPF_LDX | BPF_IMM: /* X = K */ - PPC_LI32(r_X, K); - break; - case BPF_LD | BPF_MEM: /* A = mem[K] */ - EMIT(PPC_RAW_MR(r_A, r_M + (K & 0xf))); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_LDX | BPF_MEM: /* X = mem[K] */ - EMIT(PPC_RAW_MR(r_X, r_M + (K & 0xf))); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_ST: /* mem[K] = A */ - EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_A)); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_STX: /* mem[K] = X */ - EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_X)); - ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ - BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); - break; - case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ - PPC_LWZ_OFFS(r_A, r_skb, K); - break; - case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ - PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); - break; - - /*** Ancillary info loads ***/ - case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(sizeof_field(struct sk_buff, - protocol) != 2); - PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, - protocol)); - break; - case BPF_ANC | SKF_AD_IFINDEX: - case BPF_ANC | SKF_AD_HATYPE: - BUILD_BUG_ON(sizeof_field(struct net_device, - ifindex) != 4); - BUILD_BUG_ON(sizeof_field(struct net_device, - type) != 2); - PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, - dev)); - EMIT(PPC_RAW_CMPDI(r_scratch1, 0)); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* Exit, returning 0; first pass hits here. */ - PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12); - EMIT(PPC_RAW_LI(r_ret, 0)); - PPC_JMP(exit_addr); - } - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { - PPC_LWZ_OFFS(r_A, r_scratch1, - offsetof(struct net_device, ifindex)); - } else { - PPC_LHZ_OFFS(r_A, r_scratch1, - offsetof(struct net_device, type)); - } - - break; - case BPF_ANC | SKF_AD_MARK: - BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - mark)); - break; - case BPF_ANC | SKF_AD_RXHASH: - BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - hash)); - break; - case BPF_ANC | SKF_AD_VLAN_TAG: - BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); - - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - vlan_tci)); - break; - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET()); - if (PKT_VLAN_PRESENT_BIT) - EMIT(PPC_RAW_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT)); - if (PKT_VLAN_PRESENT_BIT < 7) - EMIT(PPC_RAW_ANDI(r_A, r_A, 1)); - break; - case BPF_ANC | SKF_AD_QUEUE: - BUILD_BUG_ON(sizeof_field(struct sk_buff, - queue_mapping) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - queue_mapping)); - break; - case BPF_ANC | SKF_AD_PKTTYPE: - PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); - EMIT(PPC_RAW_ANDI(r_A, r_A, PKT_TYPE_MAX)); - EMIT(PPC_RAW_SRWI(r_A, r_A, 5)); - break; - case BPF_ANC | SKF_AD_CPU: - PPC_BPF_LOAD_CPU(r_A); - break; - /*** Absolute loads from packet header/data ***/ - case BPF_LD | BPF_W | BPF_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); - goto common_load; - case BPF_LD | BPF_H | BPF_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); - goto common_load; - case BPF_LD | BPF_B | BPF_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); - common_load: - /* Load from [K]. */ - ctx->seen |= SEEN_DATAREF; - PPC_FUNC_ADDR(r_scratch1, func); - EMIT(PPC_RAW_MTLR(r_scratch1)); - PPC_LI32(r_addr, K); - EMIT(PPC_RAW_BLRL()); /* - * Helper returns 'lt' condition on error, and an - * appropriate return value in r3 + * Save ctx->idx as this would currently point to the + * end of the JITed image and set it to the offset of + * the instruction sequence corresponding to the + * subprog call temporarily. */ - PPC_BCC(COND_LT, exit_addr); - break; - - /*** Indirect loads from packet header/data ***/ - case BPF_LD | BPF_W | BPF_IND: - func = sk_load_word; - goto common_load_ind; - case BPF_LD | BPF_H | BPF_IND: - func = sk_load_half; - goto common_load_ind; - case BPF_LD | BPF_B | BPF_IND: - func = sk_load_byte; - common_load_ind: + tmp_idx = ctx->idx; + ctx->idx = addrs[i] / 4; + bpf_jit_emit_func_call_rel(image, ctx, func_addr); + /* - * Load from [X + K]. Negative offsets are tested for - * in the helper functions. - */ - ctx->seen |= SEEN_DATAREF | SEEN_XREG; - PPC_FUNC_ADDR(r_scratch1, func); - EMIT(PPC_RAW_MTLR(r_scratch1)); - EMIT(PPC_RAW_ADDI(r_addr, r_X, IMM_L(K))); - if (K >= 32768) - EMIT(PPC_RAW_ADDIS(r_addr, r_addr, IMM_HA(K))); - EMIT(PPC_RAW_BLRL()); - /* If error, cr0.LT set */ - PPC_BCC(COND_LT, exit_addr); - break; - - case BPF_LDX | BPF_B | BPF_MSH: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); - goto common_load; - break; - - /*** Jump and branches ***/ - case BPF_JMP | BPF_JA: - if (K != 0) - PPC_JMP(addrs[i + 1 + K]); - break; - - case BPF_JMP | BPF_JGT | BPF_K: - case BPF_JMP | BPF_JGT | BPF_X: - true_cond = COND_GT; - goto cond_branch; - case BPF_JMP | BPF_JGE | BPF_K: - case BPF_JMP | BPF_JGE | BPF_X: - true_cond = COND_GE; - goto cond_branch; - case BPF_JMP | BPF_JEQ | BPF_K: - case BPF_JMP | BPF_JEQ | BPF_X: - true_cond = COND_EQ; - goto cond_branch; - case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP | BPF_JSET | BPF_X: - true_cond = COND_NE; - cond_branch: - /* same targets, can avoid doing the test :) */ - if (filter[i].jt == filter[i].jf) { - if (filter[i].jt > 0) - PPC_JMP(addrs[i + 1 + filter[i].jt]); - break; - } - - switch (code) { - case BPF_JMP | BPF_JGT | BPF_X: - case BPF_JMP | BPF_JGE | BPF_X: - case BPF_JMP | BPF_JEQ | BPF_X: - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_CMPLW(r_A, r_X)); - break; - case BPF_JMP | BPF_JSET | BPF_X: - ctx->seen |= SEEN_XREG; - EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, r_X)); - break; - case BPF_JMP | BPF_JEQ | BPF_K: - case BPF_JMP | BPF_JGT | BPF_K: - case BPF_JMP | BPF_JGE | BPF_K: - if (K < 32768) - EMIT(PPC_RAW_CMPLWI(r_A, K)); - else { - PPC_LI32(r_scratch1, K); - EMIT(PPC_RAW_CMPLW(r_A, r_scratch1)); - } - break; - case BPF_JMP | BPF_JSET | BPF_K: - if (K < 32768) - /* PPC_ANDI is /only/ dot-form */ - EMIT(PPC_RAW_ANDI(r_scratch1, r_A, K)); - else { - PPC_LI32(r_scratch1, K); - EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, - r_scratch1)); - } - break; - } - /* Sometimes branches are constructed "backward", with - * the false path being the branch and true path being - * a fallthrough to the next instruction. + * Restore ctx->idx here. This is safe as the length + * of the JITed sequence remains unchanged. */ - if (filter[i].jt == 0) - /* Swap the sense of the branch */ - PPC_BCC(true_cond ^ COND_CMP_TRUE, - addrs[i + 1 + filter[i].jf]); - else { - PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); - if (filter[i].jf != 0) - PPC_JMP(addrs[i + 1 + filter[i].jf]); - } - break; - default: - /* The filter contains something cruel & unusual. - * We don't handle it, but also there shouldn't be - * anything missing from our list. - */ - if (printk_ratelimit()) - pr_err("BPF filter opcode %04x (@%d) unsupported\n", - filter[i].code, i); - return -ENOTSUPP; + ctx->idx = tmp_idx; } - } - /* Set end-of-body-code address for exit. */ - addrs[i] = ctx->idx * 4; return 0; } -void bpf_jit_compile(struct bpf_prog *fp) +struct powerpc64_jit_data { + struct bpf_binary_header *header; + u32 *addrs; + u8 *image; + u32 proglen; + struct codegen_context ctx; +}; + +bool bpf_jit_needs_zext(void) +{ + return true; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { - unsigned int proglen; - unsigned int alloclen; - u32 *image = NULL; + u32 proglen; + u32 alloclen; + u8 *image = NULL; u32 *code_base; - unsigned int *addrs; + u32 *addrs; + struct powerpc64_jit_data *jit_data; struct codegen_context cgctx; int pass; - int flen = fp->len; + int flen; + struct bpf_binary_header *bpf_hdr; + struct bpf_prog *org_fp = fp; + struct bpf_prog *tmp_fp; + bool bpf_blinded = false; + bool extra_pass = false; + + if (!fp->jit_requested) + return org_fp; + + tmp_fp = bpf_jit_blind_constants(org_fp); + if (IS_ERR(tmp_fp)) + return org_fp; + + if (tmp_fp != org_fp) { + bpf_blinded = true; + fp = tmp_fp; + } + + jit_data = fp->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + fp = org_fp; + goto out; + } + fp->aux->jit_data = jit_data; + } - if (!bpf_jit_enable) - return; + flen = fp->len; + addrs = jit_data->addrs; + if (addrs) { + cgctx = jit_data->ctx; + image = jit_data->image; + bpf_hdr = jit_data->header; + proglen = jit_data->proglen; + alloclen = proglen + FUNCTION_DESCR_SIZE; + extra_pass = true; + goto skip_init_ctx; + } addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); - if (addrs == NULL) - return; + if (addrs == NULL) { + fp = org_fp; + goto out_addrs; + } - /* - * There are multiple assembly passes as the generated code will change - * size as it settles down, figuring out the max branch offsets/exit - * paths required. - * - * The range of standard conditional branches is +/- 32Kbytes. Since - * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to - * finish with 8 bytes/instruction. Not feasible, so long jumps are - * used, distinct from short branches. - * - * Current: - * - * For now, both branch types assemble to 2 words (short branches padded - * with a NOP); this is less efficient, but assembly will always complete - * after exactly 3 passes: - * - * First pass: No code buffer; Program is "faux-generated" -- no code - * emitted but maximum size of output determined (and addrs[] filled - * in). Also, we note whether we use M[], whether we use skb data, etc. - * All generation choices assumed to be 'worst-case', e.g. branches all - * far (2 instructions), return path code reduction not available, etc. - * - * Second pass: Code buffer allocated with size determined previously. - * Prologue generated to support features we have seen used. Exit paths - * determined and addrs[] is filled in again, as code may be slightly - * smaller as a result. - * - * Third pass: Code generated 'for real', and branch destinations - * determined from now-accurate addrs[] map. - * - * Ideal: - * - * If we optimise this, near branches will be shorter. On the - * first assembly pass, we should err on the side of caution and - * generate the biggest code. On subsequent passes, branches will be - * generated short or long and code size will reduce. With smaller - * code, more branches may fall into the short category, and code will - * reduce more. - * - * Finally, if we see one pass generate code the same size as the - * previous pass we have converged and should now generate code for - * real. Allocating at the end will also save the memory that would - * otherwise be wasted by the (small) current code shrinkage. - * Preferably, we should do a small number of passes (e.g. 5) and if we - * haven't converged by then, get impatient and force code to generate - * as-is, even if the odd branch would be left long. The chances of a - * long jump are tiny with all but the most enormous of BPF filter - * inputs, so we should usually converge on the third pass. - */ + memset(&cgctx, 0, sizeof(struct codegen_context)); + memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p)); + + /* Make sure that the stack is quadword aligned. */ + cgctx.stack_size = round_up(fp->aux->stack_depth, 16); - cgctx.idx = 0; - cgctx.seen = 0; - cgctx.pc_ret0 = -1; /* Scouting faux-generate pass 0 */ - if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) + if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { /* We hit something illegal or unsupported. */ - goto out; + fp = org_fp; + goto out_addrs; + } + + /* + * If we have seen a tail call, we need a second pass. + * This is because bpf_jit_emit_common_epilogue() is called + * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. + */ + if (cgctx.seen & SEEN_TAILCALL) { + cgctx.idx = 0; + if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { + fp = org_fp; + goto out_addrs; + } + } + bpf_jit_realloc_regs(&cgctx); /* * Pretend to build prologue, given the features we've seen. This will * update ctgtx.idx as it pretends to output instructions, then we can * calculate total size from idx. */ - bpf_jit_build_prologue(fp, 0, &cgctx); + bpf_jit_build_prologue(0, &cgctx); bpf_jit_build_epilogue(0, &cgctx); proglen = cgctx.idx * 4; alloclen = proglen + FUNCTION_DESCR_SIZE; - image = module_alloc(alloclen); - if (!image) - goto out; - code_base = image + (FUNCTION_DESCR_SIZE/4); + bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns); + if (!bpf_hdr) { + fp = org_fp; + goto out_addrs; + } + +skip_init_ctx: + code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); + + if (extra_pass) { + /* + * Do not touch the prologue and epilogue as they will remain + * unchanged. Only fix the branch target address for subprog + * calls in the body. + * + * This does not change the offsets and lengths of the subprog + * call instruction sequences and hence, the size of the JITed + * image as well. + */ + bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); + + /* There is no need to perform the usual passes. */ + goto skip_codegen_passes; + } /* Code generation passes 1-2 */ for (pass = 1; pass < 3; pass++) { /* Now build the prologue, body code & epilogue for real. */ cgctx.idx = 0; - bpf_jit_build_prologue(fp, code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs); + bpf_jit_build_prologue(code_base, &cgctx); + bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); bpf_jit_build_epilogue(code_base, &cgctx); if (bpf_jit_enable > 1) @@ -652,15 +218,15 @@ void bpf_jit_compile(struct bpf_prog *fp) proglen - (cgctx.idx * 4), cgctx.seen); } +skip_codegen_passes: if (bpf_jit_enable > 1) - /* Note that we output the base address of the code_base + /* + * Note that we output the base address of the code_base * rather than image, since opcodes are in code_base. */ bpf_jit_dump(flen, proglen, pass, code_base); - bpf_flush_icache(code_base, code_base + (proglen/4)); - -#ifdef CONFIG_PPC64 +#ifdef PPC64_ELF_ABI_v1 /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; @@ -668,16 +234,38 @@ void bpf_jit_compile(struct bpf_prog *fp) fp->bpf_func = (void *)image; fp->jited = 1; + fp->jited_len = alloclen; + + bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); + if (!fp->is_func || extra_pass) { + bpf_prog_fill_jited_linfo(fp, addrs); +out_addrs: + kfree(addrs); + kfree(jit_data); + fp->aux->jit_data = NULL; + } else { + jit_data->addrs = addrs; + jit_data->ctx = cgctx; + jit_data->proglen = proglen; + jit_data->image = image; + jit_data->header = bpf_hdr; + } out: - kfree(addrs); - return; + if (bpf_blinded) + bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); + + return fp; } +/* Overriding bpf_jit_free() as we don't set images read-only. */ void bpf_jit_free(struct bpf_prog *fp) { + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; + struct bpf_binary_header *bpf_hdr = (void *)addr; + if (fp->jited) - module_memfree(fp->bpf_func); + bpf_jit_binary_free(bpf_hdr); bpf_prog_unlock_free(fp); } diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c new file mode 100644 index 000000000000..bbb16099e8c7 --- /dev/null +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -0,0 +1,1100 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * eBPF JIT compiler for PPC32 + * + * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu> + * CS GROUP France + * + * Based on PPC64 eBPF JIT compiler by Naveen N. Rao + */ +#include <linux/moduleloader.h> +#include <asm/cacheflush.h> +#include <asm/asm-compat.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/if_vlan.h> +#include <asm/kprobes.h> +#include <linux/bpf.h> + +#include "bpf_jit.h" + +/* + * Stack layout: + * + * [ prev sp ] <------------- + * [ nv gpr save area ] 16 * 4 | + * fp (r31) --> [ ebpf stack space ] upto 512 | + * [ frame header ] 16 | + * sp (r1) ---> [ stack pointer ] -------------- + */ + +/* for gpr non volatile registers r17 to r31 (14) + tail call */ +#define BPF_PPC_STACK_SAVE (15 * 4 + 4) +/* stack frame, ensure this is quadword aligned */ +#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) + +/* BPF register usage */ +#define TMP_REG (MAX_BPF_JIT_REG + 0) + +/* BPF to ppc register mappings */ +const int b2p[MAX_BPF_JIT_REG + 1] = { + /* function return value */ + [BPF_REG_0] = 12, + /* function arguments */ + [BPF_REG_1] = 4, + [BPF_REG_2] = 6, + [BPF_REG_3] = 8, + [BPF_REG_4] = 10, + [BPF_REG_5] = 22, + /* non volatile registers */ + [BPF_REG_6] = 24, + [BPF_REG_7] = 26, + [BPF_REG_8] = 28, + [BPF_REG_9] = 30, + /* frame pointer aka BPF_REG_10 */ + [BPF_REG_FP] = 18, + /* eBPF jit internal registers */ + [BPF_REG_AX] = 20, + [TMP_REG] = 31, /* 32 bits */ +}; + +static int bpf_to_ppc(struct codegen_context *ctx, int reg) +{ + return ctx->b2p[reg]; +} + +/* PPC NVR range -- update this if we ever use NVRs below r17 */ +#define BPF_PPC_NVR_MIN 17 +#define BPF_PPC_TC 16 + +static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) +{ + if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) + return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg); + + WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); + /* Use the hole we have left for alignment */ + return BPF_PPC_STACKFRAME(ctx) - 4; +} + +void bpf_jit_realloc_regs(struct codegen_context *ctx) +{ + if (ctx->seen & SEEN_FUNC) + return; + + while (ctx->seen & SEEN_NVREG_MASK && + (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { + int old = 32 - fls(ctx->seen & (SEEN_NVREG_MASK & 0xaaaaaaab)); + int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); + int i; + + for (i = BPF_REG_0; i <= TMP_REG; i++) { + if (ctx->b2p[i] != old) + continue; + ctx->b2p[i] = new; + bpf_set_seen_register(ctx, new); + bpf_clear_seen_register(ctx, old); + if (i != TMP_REG) { + bpf_set_seen_register(ctx, new - 1); + bpf_clear_seen_register(ctx, old - 1); + } + break; + } + } +} + +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* First arg comes in as a 32 bits pointer. */ + EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), __REG_R3)); + EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0)); + EMIT(PPC_RAW_STWU(__REG_R1, __REG_R1, -BPF_PPC_STACKFRAME(ctx))); + + /* + * Initialize tail_call_cnt in stack frame if we do tail calls. + * Otherwise, put in NOPs so that it can be skipped when we are + * invoked through a tail call. + */ + if (ctx->seen & SEEN_TAILCALL) { + EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); + } else { + EMIT(PPC_RAW_NOP()); + } + +#define BPF_TAILCALL_PROLOGUE_SIZE 16 + + /* + * We need a stack frame, but we don't necessarily need to + * save/restore LR unless we call other functions + */ + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_MFLR(__REG_R0)); + + /* + * Back up non-volatile regs -- registers r18-r31 + */ + for (i = BPF_PPC_NVR_MIN; i <= 31; i++) + if (bpf_is_seen_register(ctx, i)) + EMIT(PPC_RAW_STW(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i))); + + /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/ + if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { + EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 8); + EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 12); + } + + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) { + EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0)); + EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), __REG_R1, + STACK_FRAME_MIN_SIZE + ctx->stack_size)); + } + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); +} + +static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* Restore NVRs */ + for (i = BPF_PPC_NVR_MIN; i <= 31; i++) + if (bpf_is_seen_register(ctx, i)) + EMIT(PPC_RAW_LWZ(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i))); +} + +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +{ + EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_0))); + + bpf_jit_emit_common_epilogue(image, ctx); + + /* Tear down our stack frame */ + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); + + EMIT(PPC_RAW_ADDI(__REG_R1, __REG_R1, BPF_PPC_STACKFRAME(ctx))); + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_MTLR(__REG_R0)); + + EMIT(PPC_RAW_BLR()); +} + +void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) +{ + s32 rel = (s32)func - (s32)(image + ctx->idx); + + if (image && rel < 0x2000000 && rel >= -0x2000000) { + PPC_BL_ABS(func); + } else { + /* Load function address into r0 */ + EMIT(PPC_RAW_LIS(__REG_R0, IMM_H(func))); + EMIT(PPC_RAW_ORI(__REG_R0, __REG_R0, IMM_L(func))); + EMIT(PPC_RAW_MTLR(__REG_R0)); + EMIT(PPC_RAW_BLRL()); + } +} + +static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +{ + /* + * By now, the eBPF program has already setup parameters in r3-r6 + * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program + * r5-r6/BPF_REG_2 - pointer to bpf_array + * r7-r8/BPF_REG_3 - index in bpf_array + */ + int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2); + int b2p_index = bpf_to_ppc(ctx, BPF_REG_3); + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + EMIT(PPC_RAW_LWZ(__REG_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); + EMIT(PPC_RAW_CMPLW(b2p_index, __REG_R0)); + EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); + PPC_BCC(COND_GE, out); + + /* + * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + */ + EMIT(PPC_RAW_CMPLWI(__REG_R0, MAX_TAIL_CALL_CNT)); + /* tail_call_cnt++; */ + EMIT(PPC_RAW_ADDIC(__REG_R0, __REG_R0, 1)); + PPC_BCC(COND_GT, out); + + /* prog = array->ptrs[index]; */ + EMIT(PPC_RAW_RLWINM(__REG_R3, b2p_index, 2, 0, 29)); + EMIT(PPC_RAW_ADD(__REG_R3, __REG_R3, b2p_bpf_array)); + EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_array, ptrs))); + EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); + + /* + * if (prog == NULL) + * goto out; + */ + EMIT(PPC_RAW_CMPLWI(__REG_R3, 0)); + PPC_BCC(COND_EQ, out); + + /* goto *(prog->bpf_func + prologue_size); */ + EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_prog, bpf_func))); + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); + + EMIT(PPC_RAW_ADDIC(__REG_R3, __REG_R3, BPF_TAILCALL_PROLOGUE_SIZE)); + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_MTLR(__REG_R0)); + + EMIT(PPC_RAW_MTCTR(__REG_R3)); + + EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_1))); + + /* tear restore NVRs, ... */ + bpf_jit_emit_common_epilogue(image, ctx); + + EMIT(PPC_RAW_BCTR()); + /* out: */ +} + +/* Assemble the body code between the prologue & epilogue */ +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, bool extra_pass) +{ + const struct bpf_insn *insn = fp->insnsi; + int flen = fp->len; + int i, ret; + + /* Start of epilogue code - will only be valid 2nd pass onwards */ + u32 exit_addr = addrs[flen]; + + for (i = 0; i < flen; i++) { + u32 code = insn[i].code; + u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg); + u32 dst_reg_h = dst_reg - 1; + u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg); + u32 src_reg_h = src_reg - 1; + u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG); + s16 off = insn[i].off; + s32 imm = insn[i].imm; + bool func_addr_fixed; + u64 func_addr; + u32 true_cond; + + /* + * addrs[] maps a BPF bytecode address into a real offset from + * the start of the body code. + */ + addrs[i] = ctx->idx * 4; + + /* + * As an optimization, we note down which registers + * are used so that we can only save/restore those in our + * prologue and epilogue. We do this here regardless of whether + * the actual BPF instruction uses src/dst registers or not + * (for instance, BPF_CALL does not use them). The expectation + * is that those instructions will have src_reg/dst_reg set to + * 0. Even otherwise, we just lose some prologue/epilogue + * optimization but everything else should work without + * any issues. + */ + if (dst_reg >= 3 && dst_reg < 32) { + bpf_set_seen_register(ctx, dst_reg); + bpf_set_seen_register(ctx, dst_reg_h); + } + + if (src_reg >= 3 && src_reg < 32) { + bpf_set_seen_register(ctx, src_reg); + bpf_set_seen_register(ctx, src_reg_h); + } + + switch (code) { + /* + * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG + */ + case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ + EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ + EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg)); + EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h)); + break; + case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ + imm = -imm; + fallthrough; + case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ + if (IMM_HA(imm) & 0xffff) + EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm))); + if (IMM_L(imm)) + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ + imm = -imm; + fallthrough; + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ + if (!imm) + break; + + if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm)); + } else { + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, __REG_R0)); + } + if (imm >= 0) + EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h)); + else + EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h)); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_MULW(__REG_R0, dst_reg, src_reg_h)); + EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg)); + EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg)); + break; + case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ + if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm)); + } else { + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, __REG_R0)); + } + break; + case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ + if (!imm) { + PPC_LI32(dst_reg, 0); + PPC_LI32(dst_reg_h, 0); + break; + } + if (imm == 1) + break; + if (imm == -1) { + EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); + break; + } + bpf_set_seen_register(ctx, tmp_reg); + PPC_LI32(tmp_reg, imm); + EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg)); + if (imm < 0) + EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg)); + EMIT(PPC_RAW_MULHWU(__REG_R0, dst_reg, tmp_reg)); + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0)); + break; + case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ + EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, src_reg)); + EMIT(PPC_RAW_MULW(__REG_R0, src_reg, __REG_R0)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0)); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ + return -EOPNOTSUPP; + case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ + return -EOPNOTSUPP; + case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ + if (!imm) + return -EINVAL; + if (imm == 1) + break; + + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, __REG_R0)); + break; + case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ + if (!imm) + return -EINVAL; + + if (!is_power_of_2((u32)imm)) { + bpf_set_seen_register(ctx, tmp_reg); + PPC_LI32(tmp_reg, imm); + EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, tmp_reg)); + EMIT(PPC_RAW_MULW(__REG_R0, tmp_reg, __REG_R0)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0)); + break; + } + if (imm == 1) + EMIT(PPC_RAW_LI(dst_reg, 0)); + else + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31)); + + break; + case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ + if (!imm) + return -EINVAL; + if (imm < 0) + imm = -imm; + if (!is_power_of_2(imm)) + return -EOPNOTSUPP; + if (imm == 1) + EMIT(PPC_RAW_LI(dst_reg, 0)); + else + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ + if (!imm) + return -EINVAL; + if (!is_power_of_2(abs(imm))) + return -EOPNOTSUPP; + + if (imm < 0) { + EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); + imm = -imm; + } + if (imm == 1) + break; + imm = ilog2(imm); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); + break; + case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ + EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); + break; + case BPF_ALU64 | BPF_NEG: /* dst = -dst */ + EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); + break; + + /* + * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH + */ + case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ + EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ + EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ + if (imm >= 0) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + fallthrough; + case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ + if (!IMM_H(imm)) { + EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); + } else if (!IMM_L(imm)) { + EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm))); + } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) { + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, + 32 - fls(imm), 32 - ffs(imm))); + } else { + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_AND(dst_reg, dst_reg, __REG_R0)); + } + break; + case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ + EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ + EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ + /* Sign-extended */ + if (imm < 0) + EMIT(PPC_RAW_LI(dst_reg_h, -1)); + fallthrough; + case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ + if (IMM_L(imm)) + EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); + if (IMM_H(imm)) + EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ + if (dst_reg == src_reg) { + EMIT(PPC_RAW_LI(dst_reg, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else { + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h)); + } + break; + case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ + if (dst_reg == src_reg) + EMIT(PPC_RAW_LI(dst_reg, 0)); + else + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ + if (imm < 0) + EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h)); + fallthrough; + case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ + if (IMM_L(imm)) + EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); + if (IMM_H(imm)) + EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); + break; + case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ + EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32)); + EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_SRW(__REG_R0, dst_reg, __REG_R0)); + EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, __REG_R0)); + EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg)); + break; + case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */ + if (!imm) + break; + EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) + break; + if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm)); + EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31)); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm)); + break; + } + if (imm < 64) + EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm)); + else + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + EMIT(PPC_RAW_LI(dst_reg, 0)); + break; + case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ + EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32)); + EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0)); + EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0)); + EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); + break; + case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ + if (!imm) + break; + EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); + break; + case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) + break; + if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31)); + break; + } + if (imm < 64) + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31)); + else + EMIT(PPC_RAW_LI(dst_reg, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ + EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32)); + EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0)); + EMIT(PPC_RAW_RLWINM(__REG_R0, tmp_reg, 0, 26, 26)); + EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg)); + EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg)); + EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, __REG_R0)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); + break; + case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ + if (!imm) + break; + EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) + break; + if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); + break; + } + if (imm < 64) + EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32)); + else + EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31)); + break; + + /* + * MOV + */ + case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ + if (dst_reg == src_reg) + break; + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ + /* special mov32 for zext */ + if (imm == 1) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + else if (dst_reg != src_reg) + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ + PPC_LI32(dst_reg, imm); + PPC_EX32(dst_reg_h, imm); + break; + case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ + PPC_LI32(dst_reg, imm); + break; + + /* + * BPF_FROM_BE/LE + */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: + /* Copy 16 bits to upper part */ + EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15)); + /* Rotate 8 bits right & mask */ + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31)); + break; + case 32: + /* + * Rotate word left by 8 bits: + * 2 bytes are already in their final position + * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) + */ + EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg, 8, 0, 31)); + /* Rotate 24 bits and insert byte 1 */ + EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 0, 7)); + /* Rotate 24 bits and insert byte 3 */ + EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, __REG_R0)); + break; + case 64: + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31)); + EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg_h, 8, 0, 31)); + /* Rotate 24 bits and insert byte 1 */ + EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7)); + EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 0, 7)); + /* Rotate 24 bits and insert byte 3 */ + EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23)); + EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, __REG_R0)); + EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); + break; + } + break; + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + /* zero-extend 16 bits into 32 bits */ + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31)); + break; + case 32: + case 64: + /* nop */ + break; + } + break; + + /* + * BPF_ST(X) + */ + case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_STB(__REG_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_STH(__REG_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ + EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off)); + EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4)); + break; + case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off + 4)); + PPC_EX32(__REG_R0, imm); + EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off)); + break; + + /* + * BPF_STX XADD (atomic_add) + */ + case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */ + bpf_set_seen_register(ctx, tmp_reg); + /* Get offset into TMP_REG */ + EMIT(PPC_RAW_LI(tmp_reg, off)); + /* load value from memory into r0 */ + EMIT(PPC_RAW_LWARX(__REG_R0, tmp_reg, dst_reg, 0)); + /* add value from src_reg into this */ + EMIT(PPC_RAW_ADD(__REG_R0, __REG_R0, src_reg)); + /* store result back */ + EMIT(PPC_RAW_STWCX(__REG_R0, tmp_reg, dst_reg)); + /* we're done if this succeeded */ + PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4); + break; + + case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */ + return -EOPNOTSUPP; + + /* + * BPF_LDX + */ + case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ + EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); + break; + + /* + * Doubleword load + * 16 byte instruction that uses two 'struct bpf_insn' + */ + case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ + PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); + PPC_LI32(dst_reg, (u32)insn[i].imm); + /* Adjust for two bpf instructions */ + addrs[++i] = ctx->idx * 4; + break; + + /* + * Return/Exit + */ + case BPF_JMP | BPF_EXIT: + /* + * If this isn't the very last instruction, branch to + * the epilogue. If we _are_ the last instruction, + * we'll just fall through to the epilogue. + */ + if (i != flen - 1) + PPC_JMP(exit_addr); + /* else fall through to the epilogue */ + break; + + /* + * Call kernel helper or bpf function + */ + case BPF_JMP | BPF_CALL: + ctx->seen |= SEEN_FUNC; + + ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + + if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { + EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, 8)); + EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, 12)); + } + + bpf_jit_emit_func_call_rel(image, ctx, func_addr); + + EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, __REG_R3)); + EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), __REG_R4)); + break; + + /* + * Jumps and branches + */ + case BPF_JMP | BPF_JA: + PPC_JMP(addrs[i + 1 + off]); + break; + + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: + true_cond = COND_GT; + goto cond_branch; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: + true_cond = COND_LT; + goto cond_branch; + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: + true_cond = COND_GE; + goto cond_branch; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: + true_cond = COND_LE; + goto cond_branch; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: + true_cond = COND_EQ; + goto cond_branch; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: + true_cond = COND_NE; + goto cond_branch; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: + true_cond = COND_NE; + /* fallthrough; */ + +cond_branch: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + /* unsigned comparison */ + EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + /* unsigned comparison */ + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + /* signed comparison */ + EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + /* signed comparison */ + EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSET | BPF_X: + EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JSET | BPF_X: { + EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + /* + * Need sign-extended load, so only positive + * values can be used as imm in cmplwi + */ + if (imm >= 0 && imm < 32768) { + EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + /* sign-extending load ... but unsigned comparison */ + PPC_EX32(__REG_R0, imm); + EMIT(PPC_RAW_CMPLW(dst_reg_h, __REG_R0)); + PPC_LI32(__REG_R0, imm); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0)); + } + break; + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + if (imm >= 0 && imm < 65536) { + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0)); + } + break; + } + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (imm >= 0 && imm < 65536) { + EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + /* sign-extending load */ + EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); + PPC_LI32(__REG_R0, imm); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0)); + } + break; + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + /* + * signed comparison, so any 16-bit value + * can be used in cmpwi + */ + if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_CMPWI(dst_reg, imm)); + } else { + /* sign-extending load */ + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_CMPW(dst_reg, __REG_R0)); + } + break; + case BPF_JMP | BPF_JSET | BPF_K: + /* andi does not sign-extend the immediate */ + if (imm >= 0 && imm < 32768) { + /* PPC_ANDI is _only/always_ dot-form */ + EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm)); + } else { + PPC_LI32(__REG_R0, imm); + if (imm < 0) { + EMIT(PPC_RAW_CMPWI(dst_reg_h, 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + } + EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0)); + } + break; + case BPF_JMP32 | BPF_JSET | BPF_K: + /* andi does not sign-extend the immediate */ + if (imm >= -32768 && imm < 32768) { + /* PPC_ANDI is _only/always_ dot-form */ + EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm)); + } else { + PPC_LI32(__REG_R0, imm); + EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0)); + } + break; + } + PPC_BCC(true_cond, addrs[i + 1 + off]); + break; + + /* + * Tail call + */ + case BPF_JMP | BPF_TAIL_CALL: + ctx->seen |= SEEN_TAILCALL; + bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + break; + + default: + /* + * The filter contains something cruel & unusual. + * We don't handle it, but also there shouldn't be + * anything missing from our list. + */ + pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); + return -EOPNOTSUPP; + } + if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && + !insn_is_zext(&insn[i + 1])) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } + + /* Set end-of-body-code address for exit. */ + addrs[i] = ctx->idx * 4; + + return 0; +} diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index aaf1a887f653..57a8c1153851 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -18,27 +18,6 @@ #include "bpf_jit64.h" -static void bpf_jit_fill_ill_insns(void *area, unsigned int size) -{ - memset32(area, BREAKPOINT_INSTRUCTION, size/4); -} - -static inline void bpf_flush_icache(void *start, void *end) -{ - smp_wmb(); - flush_icache_range((unsigned long)start, (unsigned long)end); -} - -static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i) -{ - return (ctx->seen & (1 << (31 - b2p[i]))); -} - -static inline void bpf_set_seen_register(struct codegen_context *ctx, int i) -{ - ctx->seen |= (1 << (31 - b2p[i])); -} - static inline bool bpf_has_stack_frame(struct codegen_context *ctx) { /* @@ -47,7 +26,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * - the bpf program uses its stack area * The latter condition is deduced from the usage of BPF_REG_FP */ - return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP); + return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]); } /* @@ -85,7 +64,11 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) BUG(); } -static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) +void bpf_jit_realloc_regs(struct codegen_context *ctx) +{ +} + +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) { int i; @@ -124,11 +107,11 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) * in the protected zone below the previous stack frame */ for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, i)) + if (bpf_is_seen_register(ctx, b2p[i])) PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); /* Setup frame pointer to point to the bpf stack area */ - if (bpf_is_seen_register(ctx, BPF_REG_FP)) + if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP])) EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1, STACK_FRAME_MIN_SIZE + ctx->stack_size)); } @@ -139,7 +122,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx /* Restore NVRs */ for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, i)) + if (bpf_is_seen_register(ctx, b2p[i])) PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); /* Tear down our stack frame */ @@ -152,7 +135,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx } } -static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) { bpf_jit_emit_common_epilogue(image, ctx); @@ -187,8 +170,7 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, EMIT(PPC_RAW_BLRL()); } -static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, - u64 func) +void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) { unsigned int i, ctx_idx = ctx->idx; @@ -289,9 +271,8 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 } /* Assemble the body code between the prologue & epilogue */ -static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, - struct codegen_context *ctx, - u32 *addrs, bool extra_pass) +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, bool extra_pass) { const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; @@ -330,9 +311,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, * any issues. */ if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32) - bpf_set_seen_register(ctx, insn[i].dst_reg); + bpf_set_seen_register(ctx, dst_reg); if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32) - bpf_set_seen_register(ctx, insn[i].src_reg); + bpf_set_seen_register(ctx, src_reg); switch (code) { /* @@ -1026,249 +1007,3 @@ cond_branch: return 0; } - -/* Fix the branch target addresses for subprog calls */ -static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, - struct codegen_context *ctx, u32 *addrs) -{ - const struct bpf_insn *insn = fp->insnsi; - bool func_addr_fixed; - u64 func_addr; - u32 tmp_idx; - int i, ret; - - for (i = 0; i < fp->len; i++) { - /* - * During the extra pass, only the branch target addresses for - * the subprog calls need to be fixed. All other instructions - * can left untouched. - * - * The JITed image length does not change because we already - * ensure that the JITed instruction sequence for these calls - * are of fixed length by padding them with NOPs. - */ - if (insn[i].code == (BPF_JMP | BPF_CALL) && - insn[i].src_reg == BPF_PSEUDO_CALL) { - ret = bpf_jit_get_func_addr(fp, &insn[i], true, - &func_addr, - &func_addr_fixed); - if (ret < 0) - return ret; - - /* - * Save ctx->idx as this would currently point to the - * end of the JITed image and set it to the offset of - * the instruction sequence corresponding to the - * subprog call temporarily. - */ - tmp_idx = ctx->idx; - ctx->idx = addrs[i] / 4; - bpf_jit_emit_func_call_rel(image, ctx, func_addr); - - /* - * Restore ctx->idx here. This is safe as the length - * of the JITed sequence remains unchanged. - */ - ctx->idx = tmp_idx; - } - } - - return 0; -} - -struct powerpc64_jit_data { - struct bpf_binary_header *header; - u32 *addrs; - u8 *image; - u32 proglen; - struct codegen_context ctx; -}; - -bool bpf_jit_needs_zext(void) -{ - return true; -} - -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) -{ - u32 proglen; - u32 alloclen; - u8 *image = NULL; - u32 *code_base; - u32 *addrs; - struct powerpc64_jit_data *jit_data; - struct codegen_context cgctx; - int pass; - int flen; - struct bpf_binary_header *bpf_hdr; - struct bpf_prog *org_fp = fp; - struct bpf_prog *tmp_fp; - bool bpf_blinded = false; - bool extra_pass = false; - - if (!fp->jit_requested) - return org_fp; - - tmp_fp = bpf_jit_blind_constants(org_fp); - if (IS_ERR(tmp_fp)) - return org_fp; - - if (tmp_fp != org_fp) { - bpf_blinded = true; - fp = tmp_fp; - } - - jit_data = fp->aux->jit_data; - if (!jit_data) { - jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); - if (!jit_data) { - fp = org_fp; - goto out; - } - fp->aux->jit_data = jit_data; - } - - flen = fp->len; - addrs = jit_data->addrs; - if (addrs) { - cgctx = jit_data->ctx; - image = jit_data->image; - bpf_hdr = jit_data->header; - proglen = jit_data->proglen; - alloclen = proglen + FUNCTION_DESCR_SIZE; - extra_pass = true; - goto skip_init_ctx; - } - - addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); - if (addrs == NULL) { - fp = org_fp; - goto out_addrs; - } - - memset(&cgctx, 0, sizeof(struct codegen_context)); - - /* Make sure that the stack is quadword aligned. */ - cgctx.stack_size = round_up(fp->aux->stack_depth, 16); - - /* Scouting faux-generate pass 0 */ - if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { - /* We hit something illegal or unsupported. */ - fp = org_fp; - goto out_addrs; - } - - /* - * If we have seen a tail call, we need a second pass. - * This is because bpf_jit_emit_common_epilogue() is called - * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. - */ - if (cgctx.seen & SEEN_TAILCALL) { - cgctx.idx = 0; - if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { - fp = org_fp; - goto out_addrs; - } - } - - /* - * Pretend to build prologue, given the features we've seen. This will - * update ctgtx.idx as it pretends to output instructions, then we can - * calculate total size from idx. - */ - bpf_jit_build_prologue(0, &cgctx); - bpf_jit_build_epilogue(0, &cgctx); - - proglen = cgctx.idx * 4; - alloclen = proglen + FUNCTION_DESCR_SIZE; - - bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, - bpf_jit_fill_ill_insns); - if (!bpf_hdr) { - fp = org_fp; - goto out_addrs; - } - -skip_init_ctx: - code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); - - if (extra_pass) { - /* - * Do not touch the prologue and epilogue as they will remain - * unchanged. Only fix the branch target address for subprog - * calls in the body. - * - * This does not change the offsets and lengths of the subprog - * call instruction sequences and hence, the size of the JITed - * image as well. - */ - bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); - - /* There is no need to perform the usual passes. */ - goto skip_codegen_passes; - } - - /* Code generation passes 1-2 */ - for (pass = 1; pass < 3; pass++) { - /* Now build the prologue, body code & epilogue for real. */ - cgctx.idx = 0; - bpf_jit_build_prologue(code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); - bpf_jit_build_epilogue(code_base, &cgctx); - - if (bpf_jit_enable > 1) - pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, - proglen - (cgctx.idx * 4), cgctx.seen); - } - -skip_codegen_passes: - if (bpf_jit_enable > 1) - /* - * Note that we output the base address of the code_base - * rather than image, since opcodes are in code_base. - */ - bpf_jit_dump(flen, proglen, pass, code_base); - -#ifdef PPC64_ELF_ABI_v1 - /* Function descriptor nastiness: Address + TOC */ - ((u64 *)image)[0] = (u64)code_base; - ((u64 *)image)[1] = local_paca->kernel_toc; -#endif - - fp->bpf_func = (void *)image; - fp->jited = 1; - fp->jited_len = alloclen; - - bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); - if (!fp->is_func || extra_pass) { - bpf_prog_fill_jited_linfo(fp, addrs); -out_addrs: - kfree(addrs); - kfree(jit_data); - fp->aux->jit_data = NULL; - } else { - jit_data->addrs = addrs; - jit_data->ctx = cgctx; - jit_data->proglen = proglen; - jit_data->image = image; - jit_data->header = bpf_hdr; - } - -out: - if (bpf_blinded) - bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); - - return fp; -} - -/* Overriding bpf_jit_free() as we don't set images read-only. */ -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *bpf_hdr = (void *)addr; - - if (fp->jited) - bpf_jit_binary_free(bpf_hdr); - - bpf_prog_unlock_free(fp); -} diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 766f064f00fb..16d4d1b6a1ff 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -17,6 +17,7 @@ #include <asm/firmware.h> #include <asm/ptrace.h> #include <asm/code-patching.h> +#include <asm/interrupt.h> #ifdef CONFIG_PPC64 #include "internal.h" @@ -168,7 +169,7 @@ static bool regs_use_siar(struct pt_regs *regs) * they have not been setup using perf_read_regs() and so regs->result * is something random. */ - return ((TRAP(regs) == 0xf00) && regs->result); + return ((TRAP(regs) == INTERRUPT_PERFMON) && regs->result); } /* @@ -347,7 +348,7 @@ static inline void perf_read_regs(struct pt_regs *regs) * hypervisor samples as well as samples in the kernel with * interrupts off hence the userspace check. */ - if (TRAP(regs) != 0xf00) + if (TRAP(regs) != INTERRUPT_PERFMON) use_siar = 0; else if ((ppmu->flags & PPMU_NO_SIAR)) use_siar = 0; @@ -1963,6 +1964,17 @@ static int power_pmu_event_init(struct perf_event *event) return -ENOENT; } + /* + * PMU config registers have fields that are + * reserved and some specific values for bit fields are reserved. + * For ex., MMCRA[61:62] is Randome Sampling Mode (SM) + * and value of 0b11 to this field is reserved. + * Check for invalid values in attr.config. + */ + if (ppmu->check_attr_config && + ppmu->check_attr_config(event)) + return -EINVAL; + event->hw.config_base = ev; event->hw.idx = 0; @@ -2206,9 +2218,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, ppmu->get_mem_data_src) ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs); - if (event->attr.sample_type & PERF_SAMPLE_WEIGHT && + if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE && ppmu->get_mem_weight) - ppmu->get_mem_weight(&data.weight.full); + ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type); if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index e5eb33255066..1816f560a465 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -226,14 +226,14 @@ static struct attribute_group event_long_desc_group = { static struct kmem_cache *hv_page_cache; -DEFINE_PER_CPU(int, hv_24x7_txn_flags); -DEFINE_PER_CPU(int, hv_24x7_txn_err); +static DEFINE_PER_CPU(int, hv_24x7_txn_flags); +static DEFINE_PER_CPU(int, hv_24x7_txn_err); struct hv_24x7_hw { struct perf_event *events[255]; }; -DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw); +static DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw); /* * request_buffer and result_buffer are not required to be 4k aligned, @@ -241,8 +241,8 @@ DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw); * the simplest way to ensure that. */ #define H24x7_DATA_BUFFER_SIZE 4096 -DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); -DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); +static DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); +static DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); static unsigned int max_num_requests(int interface_version) { diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index e4f577da33d8..f92bf5f6b74f 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -21,7 +21,7 @@ PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); PMU_FORMAT_ATTR(thresh_start, "config:36-39"); PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); -struct attribute *isa207_pmu_format_attr[] = { +static struct attribute *isa207_pmu_format_attr[] = { &format_attr_event.attr, &format_attr_pmcxsel.attr, &format_attr_mark.attr, @@ -275,17 +275,47 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, sier = mfspr(SPRN_SIER); val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - if (val == 1 || val == 2) { - idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; - sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; + if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) + return; + + idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; + sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; + + dsrc->val = isa207_find_source(idx, sub_idx); + if (val == 7) { + u64 mmcra; + u32 op_type; - dsrc->val = isa207_find_source(idx, sub_idx); + /* + * Type 0b111 denotes either larx or stcx instruction. Use the + * MMCRA sampling bits [57:59] along with the type value + * to determine the exact instruction type. If the sampling + * criteria is neither load or store, set the type as default + * to NA. + */ + mmcra = mfspr(SPRN_MMCRA); + + op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK; + switch (op_type) { + case 5: + dsrc->val |= P(OP, LOAD); + break; + case 7: + dsrc->val |= P(OP, STORE); + break; + default: + dsrc->val |= P(OP, NA); + break; + } + } else { dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE); } } -void isa207_get_mem_weight(u64 *weight) +void isa207_get_mem_weight(u64 *weight, u64 type) { + union perf_sample_weight *weight_fields; + u64 weight_lat; u64 mmcra = mfspr(SPRN_MMCRA); u64 exp = MMCRA_THR_CTR_EXP(mmcra); u64 mantissa = MMCRA_THR_CTR_MANT(mmcra); @@ -295,10 +325,31 @@ void isa207_get_mem_weight(u64 *weight) if (cpu_has_feature(CPU_FTR_ARCH_31)) mantissa = P10_MMCRA_THR_CTR_MANT(mmcra); - if (val == 0 || val == 7) - *weight = 0; + if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) + weight_lat = 0; else - *weight = mantissa << (2 * exp); + weight_lat = mantissa << (2 * exp); + + /* + * Use 64 bit weight field (full) if sample type is + * WEIGHT. + * + * if sample type is WEIGHT_STRUCT: + * - store memory latency in the lower 32 bits. + * - For ISA v3.1, use remaining two 16 bit fields of + * perf_sample_weight to store cycle counter values + * from sier2. + */ + weight_fields = (union perf_sample_weight *)weight; + if (type & PERF_SAMPLE_WEIGHT) + weight_fields->full = weight_lat; + else { + weight_fields->var1_dw = (u32)weight_lat; + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + weight_fields->var2_w = P10_SIER2_FINISH_CYC(mfspr(SPRN_SIER2)); + weight_fields->var3_w = P10_SIER2_DISPATCH_CYC(mfspr(SPRN_SIER2)); + } + } } int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1) @@ -447,8 +498,8 @@ ebb_bhrb: * EBB events are pinned & exclusive, so this should never actually * hit, but we leave it as a fallback in case. */ - mask |= CNST_EBB_VAL(ebb); - value |= CNST_EBB_MASK; + mask |= CNST_EBB_MASK; + value |= CNST_EBB_VAL(ebb); *maskp = mask; *valp = value; @@ -694,3 +745,45 @@ int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, return num_alt; } + +int isa3XX_check_attr_config(struct perf_event *ev) +{ + u64 val, sample_mode; + u64 event = ev->attr.config; + + val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; + sample_mode = val & 0x3; + + /* + * MMCRA[61:62] is Random Sampling Mode (SM). + * value of 0b11 is reserved. + */ + if (sample_mode == 0x3) + return -EINVAL; + + /* + * Check for all reserved value + * Source: Performance Monitoring Unit User Guide + */ + switch (val) { + case 0x5: + case 0x9: + case 0xD: + case 0x19: + case 0x1D: + case 0x1A: + case 0x1E: + return -EINVAL; + } + + /* + * MMCRA[48:51]/[52:55]) Threshold Start/Stop + * Events Selection. + * 0b11110000/0b00001111 is reserved. + */ + val = (event >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; + if (((val & 0xF0) == 0xF0) || ((val & 0xF) == 0xF)) + return -EINVAL; + + return 0; +} diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 1af0e8c97ac7..4a2cbc3dc047 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -220,6 +220,7 @@ /* Bits in MMCRA for PowerISA v2.07 */ #define MMCRA_SAMP_MODE_SHIFT 1 #define MMCRA_SAMP_ELIG_SHIFT 4 +#define MMCRA_SAMP_ELIG_MASK 7 #define MMCRA_THR_CTL_SHIFT 8 #define MMCRA_THR_SEL_SHIFT 16 #define MMCRA_THR_CMP_SHIFT 32 @@ -265,6 +266,10 @@ #define ISA207_SIER_DATA_SRC_SHIFT 53 #define ISA207_SIER_DATA_SRC_MASK (0x7ull << ISA207_SIER_DATA_SRC_SHIFT) +/* Bits in SIER2/SIER3 for Power10 */ +#define P10_SIER2_FINISH_CYC(sier2) (((sier2) >> (63 - 37)) & 0x7fful) +#define P10_SIER2_DISPATCH_CYC(sier2) (((sier2) >> (63 - 13)) & 0x7fful) + #define P(a, b) PERF_MEM_S(a, b) #define PH(a, b) (P(LVL, HIT) | P(a, b)) #define PM(a, b) (P(LVL, MISS) | P(a, b)) @@ -278,6 +283,8 @@ int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, const unsigned int ev_alt[][MAX_ALT]); void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, struct pt_regs *regs); -void isa207_get_mem_weight(u64 *weight); +void isa207_get_mem_weight(u64 *weight, u64 type); + +int isa3XX_check_attr_config(struct perf_event *ev); #endif diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h index e45dafe818ed..93be7197d250 100644 --- a/arch/powerpc/perf/power10-events-list.h +++ b/arch/powerpc/perf/power10-events-list.h @@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT, 0x00002); * thresh end (TE) */ -EVENT(MEM_LOADS, 0x34340401e0); -EVENT(MEM_STORES, 0x343c0401e0); +EVENT(MEM_LOADS, 0x35340401e0); +EVENT(MEM_STORES, 0x353c0401e0); diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index a901c1348cad..f9d64c63bb4a 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -106,6 +106,18 @@ static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[]) return num_alt; } +static int power10_check_attr_config(struct perf_event *ev) +{ + u64 val; + u64 event = ev->attr.config; + + val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; + if (val == 0x10 || isa3XX_check_attr_config(ev)) + return -EINVAL; + + return 0; +} + GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC); GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL); GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); @@ -559,6 +571,7 @@ static struct power_pmu power10_pmu = { .attr_groups = power10_pmu_attr_groups, .bhrb_nr = 32, .capabilities = PERF_PMU_CAP_EXTENDED_REGS, + .check_attr_config = power10_check_attr_config, }; int init_power10_pmu(void) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 2a57e93a79dc..ff3382140d7e 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -151,6 +151,18 @@ static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) return num_alt; } +static int power9_check_attr_config(struct perf_event *ev) +{ + u64 val; + u64 event = ev->attr.config; + + val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; + if (val == 0xC || isa3XX_check_attr_config(ev)) + return -EINVAL; + + return 0; +} + GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); @@ -437,6 +449,7 @@ static struct power_pmu power9_pmu = { .attr_groups = power9_pmu_attr_groups, .bhrb_nr = 32, .capabilities = PERF_PMU_CAP_EXTENDED_REGS, + .check_attr_config = power9_check_attr_config, }; int init_power9_pmu(void) diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 7d41e9264510..83975ef50975 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -5,7 +5,7 @@ config PPC_47x select MPIC help This option enables support for the 47x family of processors and is - not currently compatible with other 44x or 46x varients + not currently compatible with other 44x or 46x variants config BAMBOO bool "Bamboo" diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 11475c58ea43..afee8b1515a8 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -181,7 +181,7 @@ sram_code: udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ - addi r12, r13, r12 /* end */ + add r12, r13, r12 /* end */ 1: mftb r13 /* current */ cmp cr0, r13, r12 diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 3ce907523b1e..f998e655b570 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -40,8 +40,8 @@ config PPC_85xx config PPC_8xx bool "Freescale 8xx" + select ARCH_SUPPORTS_HUGETLBFS select FSL_SOC - select SYS_SUPPORTS_HUGETLBFS select PPC_HAVE_KUEP select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK @@ -95,12 +95,16 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select SYS_SUPPORTS_HUGETLBFS select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_PMD_SPLIT_PTLOCK select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select IRQ_WORK select PPC_MM_SLICES + select PPC_HAVE_KUEP + select PPC_HAVE_KUAP config PPC_BOOK3E_64 bool "Embedded processors" @@ -278,9 +282,9 @@ config FSL_BOOKE # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI - select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select PPC_DOORBELL default y if FSL_BOOKE @@ -306,6 +310,7 @@ config PHYS_64BIT config ALTIVEC bool "AltiVec Support" depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64) + select PPC_FPU help This option enables kernel support for the Altivec extensions to the PowerPC processor. The kernel currently supports saving and restoring @@ -355,16 +360,10 @@ config SPE If in doubt, say Y here. -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on PPC_BOOK3S_64 - config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 select ARCH_HAS_GIGANTIC_PAGE - select PPC_HAVE_KUEP - select PPC_HAVE_KUAP default y help Enable support for the Power ISA 3.0 Radix style MMU. Currently this @@ -420,10 +419,6 @@ config PPC_PKEY depends on PPC_BOOK3S_64 depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION - config PPC_MMU_NOHASH def_bool y diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 2124831cf57c..fa08699aedeb 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -486,7 +486,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, window->table.it_size = size >> window->table.it_page_shift; window->table.it_ops = &cell_iommu_ops; - iommu_init_table(&window->table, iommu->nid, 0, 0); + if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) + panic("Failed to initialize iommu table"); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index abdef9bcf432..fe0d8797a00a 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -35,9 +35,9 @@ */ static void *spu_syscall_table[] = { +#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) #define __SYSCALL(nr, entry) [nr] = entry, #include <asm/syscall_table_spu.h> -#undef __SYSCALL }; long spu_sys_callback(struct spu_syscall_block *s) diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 60b5583e9eaf..1a587618015c 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -149,8 +149,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, return -EIO; } - if (!dump_skip(cprm, roundup(cprm->pos - ret + sz, 4) - cprm->pos)) - return -EIO; + dump_skip_to(cprm, roundup(cprm->pos - ret + sz, 4)); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index b83a3670bd74..bed05b644c2c 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -236,10 +236,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, if (!inode) return -ENOSPC; - if (dir->i_mode & S_ISGID) { - inode->i_gid = dir->i_gid; - inode->i_mode &= S_ISGID; - } + inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR); ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ SPUFS_I(inode)->i_ctx = ctx; if (!ctx) { @@ -470,10 +467,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) goto out; ret = 0; - if (dir->i_mode & S_ISGID) { - inode->i_gid = dir->i_gid; - inode->i_mode &= S_ISGID; - } + inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR); gang = alloc_spu_gang(); SPUFS_I(inode)->i_ctx = NULL; SPUFS_I(inode)->i_gang = gang; diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 8c421dc78b28..76e6256cb0a7 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -131,8 +131,7 @@ static struct pci_ops rtas_pci_ops = volatile struct Hydra __iomem *Hydra = NULL; -int __init -hydra_init(void) +static int __init hydra_init(void) { struct device_node *np; struct resource r; diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig index c1920961f410..4c6d703a4284 100644 --- a/arch/powerpc/platforms/embedded6xx/Kconfig +++ b/arch/powerpc/platforms/embedded6xx/Kconfig @@ -71,11 +71,6 @@ config MPC10X_BRIDGE bool select PPC_INDIRECT_PCI -config MV64X60 - bool - select PPC_INDIRECT_PCI - select CHECK_CACHE_COHERENCY - config GAMECUBE_COMMON bool diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index a20b9576de22..37875e478b3a 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -34,7 +34,7 @@ static struct pci_controller *u3_agp, *u3_ht, *u4_pcie; static int __init fixup_one_level_bus_range(struct device_node *node, int higher) { - for (; node != 0;node = node->sibling) { + for (; node; node = node->sibling) { const int *bus_range; const unsigned int *class_code; int len; diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index b500a6e47e6b..5be7242fbd86 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -146,7 +146,9 @@ static void iommu_table_iobmap_setup(void) */ iommu_table_iobmap.it_blocksize = 4; iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops; - iommu_init_table(&iommu_table_iobmap, 0, 0, 0); + if (!iommu_init_table(&iommu_table_iobmap, 0, 0, 0)) + panic("Failed to initialize iommu table"); + pr_debug(" <- %s\n", __func__); } diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 2eb6ae150d1f..be2546b96816 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o obj-$(CONFIG_FA_DUMP) += opal-fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_OPAL_CORE) += opal-core.o -obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o +obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o obj-$(CONFIG_PCI_IOV) += pci-sriov.o obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 019669eb21d2..537a4daed614 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -46,10 +46,26 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf, return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size); } +static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct memtrace_entry *ent = filp->private_data; + + if (ent->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (vma->vm_pgoff << PAGE_SHIFT >= ent->size) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + static const struct file_operations memtrace_fops = { .llseek = default_llseek, .read = memtrace_read, .open = simple_open, + .mmap = memtrace_mmap, }; #define FLUSH_CHUNK_SIZE SZ_1G @@ -88,8 +104,8 @@ static void memtrace_clear_range(unsigned long start_pfn, * Before we go ahead and use this range as cache inhibited range * flush the cache. */ - flush_dcache_range_chunked(PFN_PHYS(start_pfn), - PFN_PHYS(start_pfn + nr_pages), + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), FLUSH_CHUNK_SIZE); } @@ -187,7 +203,7 @@ static int memtrace_init_debugfs(void) dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir); ent->dir = dir; - debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops); + debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops); debugfs_create_x64("start", 0400, dir, &ent->start); debugfs_create_x64("size", 0400, dir, &ent->size); } diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c deleted file mode 100644 index b711dc3262a3..000000000000 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ /dev/null @@ -1,705 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * This file implements the DMA operations for NVLink devices. The NPU - * devices all point to the same iommu table as the parent PCI device. - * - * Copyright Alistair Popple, IBM Corporation 2015. - */ - -#include <linux/mmu_notifier.h> -#include <linux/mmu_context.h> -#include <linux/of.h> -#include <linux/pci.h> -#include <linux/memblock.h> -#include <linux/sizes.h> - -#include <asm/debugfs.h> -#include <asm/powernv.h> -#include <asm/ppc-pci.h> -#include <asm/opal.h> - -#include "pci.h" - -static struct pci_dev *get_pci_dev(struct device_node *dn) -{ - struct pci_dn *pdn = PCI_DN(dn); - struct pci_dev *pdev; - - pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus), - pdn->busno, pdn->devfn); - - /* - * pci_get_domain_bus_and_slot() increased the reference count of - * the PCI device, but callers don't need that actually as the PE - * already holds a reference to the device. Since callers aren't - * aware of the reference count change, call pci_dev_put() now to - * avoid leaks. - */ - if (pdev) - pci_dev_put(pdev); - - return pdev; -} - -/* Given a NPU device get the associated PCI device. */ -struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev) -{ - struct device_node *dn; - struct pci_dev *gpdev; - - if (WARN_ON(!npdev)) - return NULL; - - if (WARN_ON(!npdev->dev.of_node)) - return NULL; - - /* Get assoicated PCI device */ - dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0); - if (!dn) - return NULL; - - gpdev = get_pci_dev(dn); - of_node_put(dn); - - return gpdev; -} -EXPORT_SYMBOL(pnv_pci_get_gpu_dev); - -/* Given the real PCI device get a linked NPU device. */ -struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) -{ - struct device_node *dn; - struct pci_dev *npdev; - - if (WARN_ON(!gpdev)) - return NULL; - - /* Not all PCI devices have device-tree nodes */ - if (!gpdev->dev.of_node) - return NULL; - - /* Get assoicated PCI device */ - dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index); - if (!dn) - return NULL; - - npdev = get_pci_dev(dn); - of_node_put(dn); - - return npdev; -} -EXPORT_SYMBOL(pnv_pci_get_npu_dev); - -#ifdef CONFIG_IOMMU_API -/* - * Returns the PE assoicated with the PCI device of the given - * NPU. Returns the linked pci device if pci_dev != NULL. - */ -static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, - struct pci_dev **gpdev) -{ - struct pnv_phb *phb; - struct pci_controller *hose; - struct pci_dev *pdev; - struct pnv_ioda_pe *pe; - struct pci_dn *pdn; - - pdev = pnv_pci_get_gpu_dev(npe->pdev); - if (!pdev) - return NULL; - - pdn = pci_get_pdn(pdev); - if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) - return NULL; - - hose = pci_bus_to_host(pdev->bus); - phb = hose->private_data; - pe = &phb->ioda.pe_array[pdn->pe_number]; - - if (gpdev) - *gpdev = pdev; - - return pe; -} - -static long pnv_npu_unset_window(struct iommu_table_group *table_group, - int num); - -static long pnv_npu_set_window(struct iommu_table_group *table_group, int num, - struct iommu_table *tbl) -{ - struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, - table_group); - struct pnv_phb *phb = npe->phb; - int64_t rc; - const unsigned long size = tbl->it_indirect_levels ? - tbl->it_level_size : tbl->it_size; - const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; - const __u64 win_size = tbl->it_size << tbl->it_page_shift; - int num2 = (num == 0) ? 1 : 0; - - /* NPU has just one TVE so if there is another table, remove it first */ - if (npe->table_group.tables[num2]) - pnv_npu_unset_window(&npe->table_group, num2); - - pe_info(npe, "Setting up window %llx..%llx pg=%lx\n", - start_addr, start_addr + win_size - 1, - IOMMU_PAGE_SIZE(tbl)); - - rc = opal_pci_map_pe_dma_window(phb->opal_id, - npe->pe_number, - npe->pe_number, - tbl->it_indirect_levels + 1, - __pa(tbl->it_base), - size << 3, - IOMMU_PAGE_SIZE(tbl)); - if (rc) { - pe_err(npe, "Failed to configure TCE table, err %lld\n", rc); - return rc; - } - pnv_pci_ioda2_tce_invalidate_entire(phb, false); - - /* Add the table to the list so its TCE cache will get invalidated */ - pnv_pci_link_table_and_group(phb->hose->node, num, - tbl, &npe->table_group); - - return 0; -} - -static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num) -{ - struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, - table_group); - struct pnv_phb *phb = npe->phb; - int64_t rc; - - if (!npe->table_group.tables[num]) - return 0; - - pe_info(npe, "Removing DMA window\n"); - - rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, - npe->pe_number, - 0/* levels */, 0/* table address */, - 0/* table size */, 0/* page size */); - if (rc) { - pe_err(npe, "Unmapping failed, ret = %lld\n", rc); - return rc; - } - pnv_pci_ioda2_tce_invalidate_entire(phb, false); - - pnv_pci_unlink_table_and_group(npe->table_group.tables[num], - &npe->table_group); - - return 0; -} - -/* Switch ownership from platform code to external user (e.g. VFIO) */ -static void pnv_npu_take_ownership(struct iommu_table_group *table_group) -{ - struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, - table_group); - struct pnv_phb *phb = npe->phb; - int64_t rc; - struct pci_dev *gpdev = NULL; - - /* - * Note: NPU has just a single TVE in the hardware which means that - * while used by the kernel, it can have either 32bit window or - * DMA bypass but never both. So we deconfigure 32bit window only - * if it was enabled at the moment of ownership change. - */ - if (npe->table_group.tables[0]) { - pnv_npu_unset_window(&npe->table_group, 0); - return; - } - - /* Disable bypass */ - rc = opal_pci_map_pe_dma_window_real(phb->opal_id, - npe->pe_number, npe->pe_number, - 0 /* bypass base */, 0); - if (rc) { - pe_err(npe, "Failed to disable bypass, err %lld\n", rc); - return; - } - pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); - - get_gpu_pci_dev_and_pe(npe, &gpdev); - if (gpdev) - pnv_npu2_unmap_lpar_dev(gpdev); -} - -static void pnv_npu_release_ownership(struct iommu_table_group *table_group) -{ - struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe, - table_group); - struct pci_dev *gpdev = NULL; - - get_gpu_pci_dev_and_pe(npe, &gpdev); - if (gpdev) - pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV); -} - -static struct iommu_table_group_ops pnv_pci_npu_ops = { - .set_window = pnv_npu_set_window, - .unset_window = pnv_npu_unset_window, - .take_ownership = pnv_npu_take_ownership, - .release_ownership = pnv_npu_release_ownership, -}; -#endif /* !CONFIG_IOMMU_API */ - -/* - * NPU2 ATS - */ -/* Maximum possible number of ATSD MMIO registers per NPU */ -#define NV_NMMU_ATSD_REGS 8 -#define NV_NPU_MAX_PE_NUM 16 - -/* - * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or - * up to 3 x (GPU + 2xNPUs) (POWER9). - */ -struct npu_comp { - struct iommu_table_group table_group; - int pe_num; - struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM]; -}; - -/* An NPU descriptor, valid for POWER9 only */ -struct npu { - int index; - struct npu_comp npucomp; -}; - -#ifdef CONFIG_IOMMU_API -static long pnv_npu_peers_create_table_userspace( - struct iommu_table_group *table_group, - int num, __u32 page_shift, __u64 window_size, __u32 levels, - struct iommu_table **ptbl) -{ - struct npu_comp *npucomp = container_of(table_group, struct npu_comp, - table_group); - - if (!npucomp->pe_num || !npucomp->pe[0] || - !npucomp->pe[0]->table_group.ops || - !npucomp->pe[0]->table_group.ops->create_table) - return -EFAULT; - - return npucomp->pe[0]->table_group.ops->create_table( - &npucomp->pe[0]->table_group, num, page_shift, - window_size, levels, ptbl); -} - -static long pnv_npu_peers_set_window(struct iommu_table_group *table_group, - int num, struct iommu_table *tbl) -{ - int i, j; - long ret = 0; - struct npu_comp *npucomp = container_of(table_group, struct npu_comp, - table_group); - - for (i = 0; i < npucomp->pe_num; ++i) { - struct pnv_ioda_pe *pe = npucomp->pe[i]; - - if (!pe->table_group.ops->set_window) - continue; - - ret = pe->table_group.ops->set_window(&pe->table_group, - num, tbl); - if (ret) - break; - } - - if (ret) { - for (j = 0; j < i; ++j) { - struct pnv_ioda_pe *pe = npucomp->pe[j]; - - if (!pe->table_group.ops->unset_window) - continue; - - ret = pe->table_group.ops->unset_window( - &pe->table_group, num); - if (ret) - break; - } - } else { - table_group->tables[num] = iommu_tce_table_get(tbl); - } - - return ret; -} - -static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group, - int num) -{ - int i, j; - long ret = 0; - struct npu_comp *npucomp = container_of(table_group, struct npu_comp, - table_group); - - for (i = 0; i < npucomp->pe_num; ++i) { - struct pnv_ioda_pe *pe = npucomp->pe[i]; - - WARN_ON(npucomp->table_group.tables[num] != - table_group->tables[num]); - if (!npucomp->table_group.tables[num]) - continue; - - if (!pe->table_group.ops->unset_window) - continue; - - ret = pe->table_group.ops->unset_window(&pe->table_group, num); - if (ret) - break; - } - - if (ret) { - for (j = 0; j < i; ++j) { - struct pnv_ioda_pe *pe = npucomp->pe[j]; - - if (!npucomp->table_group.tables[num]) - continue; - - if (!pe->table_group.ops->set_window) - continue; - - ret = pe->table_group.ops->set_window(&pe->table_group, - num, table_group->tables[num]); - if (ret) - break; - } - } else if (table_group->tables[num]) { - iommu_tce_table_put(table_group->tables[num]); - table_group->tables[num] = NULL; - } - - return ret; -} - -static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group) -{ - int i; - struct npu_comp *npucomp = container_of(table_group, struct npu_comp, - table_group); - - for (i = 0; i < npucomp->pe_num; ++i) { - struct pnv_ioda_pe *pe = npucomp->pe[i]; - - if (!pe->table_group.ops || - !pe->table_group.ops->take_ownership) - continue; - pe->table_group.ops->take_ownership(&pe->table_group); - } -} - -static void pnv_npu_peers_release_ownership( - struct iommu_table_group *table_group) -{ - int i; - struct npu_comp *npucomp = container_of(table_group, struct npu_comp, - table_group); - - for (i = 0; i < npucomp->pe_num; ++i) { - struct pnv_ioda_pe *pe = npucomp->pe[i]; - - if (!pe->table_group.ops || - !pe->table_group.ops->release_ownership) - continue; - pe->table_group.ops->release_ownership(&pe->table_group); - } -} - -static struct iommu_table_group_ops pnv_npu_peers_ops = { - .get_table_size = pnv_pci_ioda2_get_table_size, - .create_table = pnv_npu_peers_create_table_userspace, - .set_window = pnv_npu_peers_set_window, - .unset_window = pnv_npu_peers_unset_window, - .take_ownership = pnv_npu_peers_take_ownership, - .release_ownership = pnv_npu_peers_release_ownership, -}; - -static void pnv_comp_attach_table_group(struct npu_comp *npucomp, - struct pnv_ioda_pe *pe) -{ - if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM)) - return; - - npucomp->pe[npucomp->pe_num] = pe; - ++npucomp->pe_num; -} - -static struct iommu_table_group * - pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) -{ - struct iommu_table_group *compound_group; - struct npu_comp *npucomp; - struct pci_dev *gpdev = NULL; - struct pci_controller *hose; - struct pci_dev *npdev = NULL; - - list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) { - npdev = pnv_pci_get_npu_dev(gpdev, 0); - if (npdev) - break; - } - - if (!npdev) - /* It is not an NPU attached device, skip */ - return NULL; - - hose = pci_bus_to_host(npdev->bus); - - if (hose->npu) { - /* P9 case: compound group is per-NPU (all gpus, all links) */ - npucomp = &hose->npu->npucomp; - } else { - /* P8 case: Compound group is per-GPU (1 gpu, 2 links) */ - npucomp = pe->npucomp = kzalloc(sizeof(*npucomp), GFP_KERNEL); - } - - compound_group = &npucomp->table_group; - if (!compound_group->group) { - compound_group->ops = &pnv_npu_peers_ops; - iommu_register_group(compound_group, hose->global_number, - pe->pe_number); - - /* Steal capabilities from a GPU PE */ - compound_group->max_dynamic_windows_supported = - pe->table_group.max_dynamic_windows_supported; - compound_group->tce32_start = pe->table_group.tce32_start; - compound_group->tce32_size = pe->table_group.tce32_size; - compound_group->max_levels = pe->table_group.max_levels; - if (!compound_group->pgsizes) - compound_group->pgsizes = pe->table_group.pgsizes; - } - - /* - * The gpu would have been added to the iommu group that's created - * for the PE. Pull it out now. - */ - iommu_del_device(&gpdev->dev); - - /* - * I'm not sure this is strictly required, but it's probably a good idea - * since the table_group for the PE is going to be attached to the - * compound table group. If we leave the PE's iommu group active then - * we might have the same table_group being modifiable via two sepeate - * iommu groups. - */ - iommu_group_put(pe->table_group.group); - - /* now put the GPU into the compound group */ - pnv_comp_attach_table_group(npucomp, pe); - iommu_add_device(compound_group, &gpdev->dev); - - return compound_group; -} - -static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) -{ - struct iommu_table_group *table_group; - struct npu_comp *npucomp; - struct pci_dev *gpdev = NULL; - struct pci_dev *npdev; - struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev); - - WARN_ON(!(pe->flags & PNV_IODA_PE_DEV)); - if (!gpe) - return NULL; - - /* - * IODA2 bridges get this set up from pci_controller_ops::setup_bridge - * but NPU bridges do not have this hook defined so we do it here. - * We do not setup other table group parameters as they won't be used - * anyway - NVLink bridges are subordinate PEs. - */ - pe->table_group.ops = &pnv_pci_npu_ops; - - table_group = iommu_group_get_iommudata( - iommu_group_get(&gpdev->dev)); - - /* - * On P9 NPU PHB and PCI PHB support different page sizes, - * keep only matching. We expect here that NVLink bridge PE pgsizes is - * initialized by the caller. - */ - table_group->pgsizes &= pe->table_group.pgsizes; - npucomp = container_of(table_group, struct npu_comp, table_group); - pnv_comp_attach_table_group(npucomp, pe); - - list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) { - struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev); - - if (gpdevtmp != gpdev) - continue; - - iommu_add_device(table_group, &npdev->dev); - } - - return table_group; -} - -void pnv_pci_npu_setup_iommu_groups(void) -{ - struct pci_controller *hose; - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - - /* - * For non-nvlink devices the IOMMU group is registered when the PE is - * configured and devices are added to the group when the per-device - * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is - * only initialise for "normal" IODA PHBs. - * - * For NVLink devices we need to ensure the NVLinks and the GPU end up - * in the same IOMMU group, so that's handled here. - */ - list_for_each_entry(hose, &hose_list, list_node) { - phb = hose->private_data; - - if (phb->type == PNV_PHB_IODA2) - list_for_each_entry(pe, &phb->ioda.pe_list, list) - pnv_try_setup_npu_table_group(pe); - } - - /* - * Now we have all PHBs discovered, time to add NPU devices to - * the corresponding IOMMU groups. - */ - list_for_each_entry(hose, &hose_list, list_node) { - unsigned long pgsizes; - - phb = hose->private_data; - - if (phb->type != PNV_PHB_NPU_NVLINK) - continue; - - pgsizes = pnv_ioda_parse_tce_sizes(phb); - list_for_each_entry(pe, &phb->ioda.pe_list, list) { - /* - * IODA2 bridges get this set up from - * pci_controller_ops::setup_bridge but NPU bridges - * do not have this hook defined so we do it here. - */ - pe->table_group.pgsizes = pgsizes; - pnv_npu_compound_attach(pe); - } - } -} -#endif /* CONFIG_IOMMU_API */ - -int pnv_npu2_init(struct pci_controller *hose) -{ - static int npu_index; - struct npu *npu; - int ret; - - npu = kzalloc(sizeof(*npu), GFP_KERNEL); - if (!npu) - return -ENOMEM; - - npu_index++; - if (WARN_ON(npu_index >= NV_MAX_NPUS)) { - ret = -ENOSPC; - goto fail_exit; - } - npu->index = npu_index; - hose->npu = npu; - - return 0; - -fail_exit: - kfree(npu); - return ret; -} - -int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, - unsigned long msr) -{ - int ret; - struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); - struct pci_controller *hose; - struct pnv_phb *nphb; - - if (!npdev) - return -ENODEV; - - hose = pci_bus_to_host(npdev->bus); - if (hose->npu == NULL) { - dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); - return 0; - } - - nphb = hose->private_data; - - dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n", - nphb->opal_id, lparid); - /* - * Currently we only support radix and non-zero LPCR only makes sense - * for hash tables so skiboot expects the LPCR parameter to be a zero. - */ - ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid, - 0 /* LPCR bits */); - if (ret) { - dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); - return ret; - } - - dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n", - nphb->opal_id, msr); - ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr, - pci_dev_id(gpdev)); - if (ret < 0) - dev_err(&gpdev->dev, "Failed to init context: %d\n", ret); - else - ret = 0; - - return 0; -} -EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev); - -void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr) -{ - struct pci_dev *gpdev; - - list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list) - pnv_npu2_map_lpar_dev(gpdev, 0, msr); -} - -int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) -{ - int ret; - struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); - struct pci_controller *hose; - struct pnv_phb *nphb; - - if (!npdev) - return -ENODEV; - - hose = pci_bus_to_host(npdev->bus); - if (hose->npu == NULL) { - dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); - return 0; - } - - nphb = hose->private_data; - - dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", - nphb->opal_id); - ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/, - pci_dev_id(gpdev)); - if (ret < 0) { - dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret); - return ret; - } - - /* Set LPID to 0 anyway, just to be safe */ - dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id); - ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/, - 0 /* LPCR bits */); - if (ret) - dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); - - return ret; -} -EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev); diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c index 5cd0f52d258f..01401e3da7ca 100644 --- a/arch/powerpc/platforms/powernv/opal-call.c +++ b/arch/powerpc/platforms/powernv/opal-call.c @@ -267,8 +267,6 @@ OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE); OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE); OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE); OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); -OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); -OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index 0d9ba70f7251..5b9736bbc2aa 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -71,7 +71,7 @@ static LIST_HEAD(opalcore_list); static struct opalcore_config *oc_conf; static const struct opal_mpipl_fadump *opalc_metadata; static const struct opal_mpipl_fadump *opalc_cpu_metadata; -struct kobject *mpipl_kobj; +static struct kobject *mpipl_kobj; /* * Set crashing CPU's signal to SIGUSR1. if the kernel is triggered diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index deddaebf8c14..a191f4c60ce7 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -105,7 +105,6 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) { size_t addr, size; pgprot_t page_prot; - int rc; pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, @@ -121,10 +120,8 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); - rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, page_prot); - - return rc; } static bool opal_msg_queue_empty(void) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index f0f901683a2f..7de464679292 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -47,8 +47,7 @@ #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ #define PNV_IODA1_DMA32_SEGSIZE 0x10000000 -static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK", - "NPU_OCAPI" }; +static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" }; static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); static void pnv_pci_configure_bus(struct pci_bus *bus); @@ -192,8 +191,6 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) unsigned int pe_num = pe->pe_number; WARN_ON(pe->pdev); - WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */ - kfree(pe->npucomp); memset(pe, 0, sizeof(struct pnv_ioda_pe)); mutex_lock(&phb->ioda.pe_alloc_mutex); @@ -875,7 +872,7 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) * Release from all parents PELT-V. NPUs don't have a PELTV * table */ - if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI) + if (phb->type != PNV_PHB_NPU_OCAPI) pnv_ioda_unset_peltv(phb, pe, parent); rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, @@ -946,7 +943,7 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) * Configure PELTV. NPUs don't have a PELTV table so skip * configuration on them. */ - if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI) + if (phb->type != PNV_PHB_NPU_OCAPI) pnv_ioda_set_peltv(phb, pe, true); /* Setup reverse map */ @@ -1002,8 +999,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) /* NOTE: We don't get a reference for the pointer in the PE * data structure, both the device and PE structures should be - * destroyed at the same time. However, removing nvlink - * devices will need some work. + * destroyed at the same time. * * At some point we want to remove the PDN completely anyways */ @@ -1099,113 +1095,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) return pe; } -static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) -{ - int pe_num, found_pe = false, rc; - long rid; - struct pnv_ioda_pe *pe; - struct pci_dev *gpu_pdev; - struct pci_dn *npu_pdn; - struct pnv_phb *phb = pci_bus_to_pnvhb(npu_pdev->bus); - - /* - * Intentionally leak a reference on the npu device (for - * nvlink only; this is not an opencapi path) to make sure it - * never goes away, as it's been the case all along and some - * work is needed otherwise. - */ - pci_dev_get(npu_pdev); - - /* - * Due to a hardware errata PE#0 on the NPU is reserved for - * error handling. This means we only have three PEs remaining - * which need to be assigned to four links, implying some - * links must share PEs. - * - * To achieve this we assign PEs such that NPUs linking the - * same GPU get assigned the same PE. - */ - gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev); - for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { - pe = &phb->ioda.pe_array[pe_num]; - if (!pe->pdev) - continue; - - if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) { - /* - * This device has the same peer GPU so should - * be assigned the same PE as the existing - * peer NPU. - */ - dev_info(&npu_pdev->dev, - "Associating to existing PE %x\n", pe_num); - npu_pdn = pci_get_pdn(npu_pdev); - rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; - npu_pdn->pe_number = pe_num; - phb->ioda.pe_rmap[rid] = pe->pe_number; - pe->device_count++; - - /* Map the PE to this link */ - rc = opal_pci_set_pe(phb->opal_id, pe_num, rid, - OpalPciBusAll, - OPAL_COMPARE_RID_DEVICE_NUMBER, - OPAL_COMPARE_RID_FUNCTION_NUMBER, - OPAL_MAP_PE); - WARN_ON(rc != OPAL_SUCCESS); - found_pe = true; - break; - } - } - - if (!found_pe) - /* - * Could not find an existing PE so allocate a new - * one. - */ - return pnv_ioda_setup_dev_PE(npu_pdev); - else - return pe; -} - -static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) -{ - struct pci_dev *pdev; - - list_for_each_entry(pdev, &bus->devices, bus_list) - pnv_ioda_setup_npu_PE(pdev); -} - -static void pnv_pci_ioda_setup_nvlink(void) -{ - struct pci_controller *hose; - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - - list_for_each_entry(hose, &hose_list, list_node) { - phb = hose->private_data; - if (phb->type == PNV_PHB_NPU_NVLINK) { - /* PE#0 is needed for error reporting */ - pnv_ioda_reserve_pe(phb, 0); - pnv_ioda_setup_npu_PEs(hose->bus); - if (phb->model == PNV_PHB_MODEL_NPU2) - WARN_ON_ONCE(pnv_npu2_init(hose)); - } - } - list_for_each_entry(hose, &hose_list, list_node) { - phb = hose->private_data; - if (phb->type != PNV_PHB_IODA2) - continue; - - list_for_each_entry(pe, &phb->ioda.pe_list, list) - pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV); - } - -#ifdef CONFIG_IOMMU_API - /* setup iommu groups so we can do nvlink pass-thru */ - pnv_pci_npu_setup_iommu_groups(); -#endif -} - static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); @@ -1468,18 +1357,6 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) -static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm) -{ - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm); - const unsigned long val = PHB3_TCE_KILL_INVAL_ALL; - - mb(); /* Ensure previous TCE table stores are visible */ - if (rm) - __raw_rm_writeq_be(val, invalidate); - else - __raw_writeq_be(val, invalidate); -} - static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) { /* 01xb - invalidate TCEs that match the specified PE# */ @@ -1539,20 +1416,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, struct pnv_phb *phb = pe->phb; unsigned int shift = tbl->it_page_shift; - /* - * NVLink1 can use the TCE kill register directly as - * it's the same as PHB3. NVLink2 is different and - * should go via the OPAL call. - */ - if (phb->model == PNV_PHB_MODEL_NPU) { - /* - * The NVLink hardware does not support TCE kill - * per TCE entry so we have to invalidate - * the entire cache for it. - */ - pnv_pci_phb3_tce_invalidate_entire(phb, rm); - continue; - } if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) pnv_pci_phb3_tce_invalidate(pe, rm, shift, index, npages); @@ -1564,14 +1427,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, } } -void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm) -{ - if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3) - pnv_pci_phb3_tce_invalidate_entire(phb, rm); - else - opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0); -} - static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, @@ -1762,7 +1617,8 @@ found: tbl->it_ops = &pnv_ioda1_iommu_ops; pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; - iommu_init_table(tbl, phb->hose->node, 0, 0); + if (!iommu_init_table(tbl, phb->hose->node, 0, 0)) + panic("Failed to initialize iommu table"); pe->dma_setup_done = true; return; @@ -1930,16 +1786,16 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; } - iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end); - rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); + if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) + rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); + else + rc = -ENOMEM; if (rc) { - pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", - rc); + pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); iommu_tce_table_put(tbl); - return rc; + tbl = NULL; /* This clears iommu_table_base below */ } - if (!pnv_iommu_bypass_disabled) pnv_pci_ioda2_set_bypass(pe, true); @@ -2450,7 +2306,6 @@ static void pnv_pci_enable_bridges(void) static void pnv_pci_ioda_fixup(void) { - pnv_pci_ioda_setup_nvlink(); pnv_pci_ioda_create_dbgfs(); pnv_pci_enable_bridges(); @@ -2823,15 +2678,6 @@ static void pnv_pci_release_device(struct pci_dev *pdev) pnv_ioda_release_pe(pe); } -static void pnv_npu_disable_device(struct pci_dev *pdev) -{ - struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); - struct eeh_pe *eehpe = edev ? edev->pe : NULL; - - if (eehpe && eeh_ops && eeh_ops->reset) - eeh_ops->reset(eehpe, EEH_RESET_HOT); -} - static void pnv_pci_ioda_shutdown(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; @@ -2873,16 +2719,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .shutdown = pnv_pci_ioda_shutdown, }; -static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { - .setup_msi_irqs = pnv_setup_msi_irqs, - .teardown_msi_irqs = pnv_teardown_msi_irqs, - .enable_device_hook = pnv_pci_enable_device_hook, - .window_alignment = pnv_pci_window_alignment, - .reset_secondary_bus = pnv_pci_reset_secondary_bus, - .shutdown = pnv_pci_ioda_shutdown, - .disable_device = pnv_npu_disable_device, -}; - static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { .enable_device_hook = pnv_ocapi_enable_device_hook, .release_device = pnv_pci_release_device, @@ -2956,10 +2792,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->model = PNV_PHB_MODEL_P7IOC; else if (of_device_is_compatible(np, "ibm,power8-pciex")) phb->model = PNV_PHB_MODEL_PHB3; - else if (of_device_is_compatible(np, "ibm,power8-npu-pciex")) - phb->model = PNV_PHB_MODEL_NPU; - else if (of_device_is_compatible(np, "ibm,power9-npu-pciex")) - phb->model = PNV_PHB_MODEL_NPU2; else phb->model = PNV_PHB_MODEL_UNKNOWN; @@ -3117,9 +2949,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; switch (phb->type) { - case PNV_PHB_NPU_NVLINK: - hose->controller_ops = pnv_npu_ioda_controller_ops; - break; case PNV_PHB_NPU_OCAPI: hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; break; @@ -3172,11 +3001,6 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np) pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); } -void __init pnv_pci_init_npu_phb(struct device_node *np) -{ - pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK); -} - void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) { pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 9b9bca169275..b18468dc31ff 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -926,17 +926,6 @@ void __init pnv_pci_init(void) for_each_compatible_node(np, NULL, "ibm,ioda3-phb") pnv_pci_init_ioda2_phb(np); - /* Look for NPU PHBs */ - for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") - pnv_pci_init_npu_phb(np); - - /* - * Look for NPU2 PHBs which we treat mostly as NPU PHBs with - * the exception of TCE kill which requires an OPAL call. - */ - for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") - pnv_pci_init_npu_phb(np); - /* Look for NPU2 OpenCAPI PHBs */ for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") pnv_pci_init_npu2_opencapi_phb(np); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 36d22920f5a3..c8d4f222a86f 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -10,10 +10,9 @@ struct pci_dn; enum pnv_phb_type { - PNV_PHB_IODA1 = 0, - PNV_PHB_IODA2 = 1, - PNV_PHB_NPU_NVLINK = 2, - PNV_PHB_NPU_OCAPI = 3, + PNV_PHB_IODA1, + PNV_PHB_IODA2, + PNV_PHB_NPU_OCAPI, }; /* Precise PHB model for error management */ @@ -21,8 +20,6 @@ enum pnv_phb_model { PNV_PHB_MODEL_UNKNOWN, PNV_PHB_MODEL_P7IOC, PNV_PHB_MODEL_PHB3, - PNV_PHB_MODEL_NPU, - PNV_PHB_MODEL_NPU2, }; #define PNV_PCI_DIAG_BUF_SIZE 8192 @@ -81,7 +78,6 @@ struct pnv_ioda_pe { /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ struct iommu_table_group table_group; - struct npu_comp *npucomp; /* 64-bit TCE bypass region */ bool tce_bypass_enabled; @@ -289,9 +285,7 @@ extern struct iommu_table *pnv_pci_table_alloc(int nid); extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); -extern void pnv_pci_init_npu_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); -extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); @@ -314,11 +308,6 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, #define pe_info(pe, fmt, ...) \ pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__) -/* Nvlink functions */ -extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass); -extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm); -extern void pnv_pci_npu_setup_iommu_groups(void); - /* pci-ioda-tce.c */ #define POWERNV_IOMMU_DEFAULT_LEVELS 2 #define POWERNV_IOMMU_MAX_LEVELS 5 diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index aadf932c4e61..a8db3f153063 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -157,7 +157,7 @@ static void __init pnv_check_guarded_cores(void) for_each_node_by_type(dn, "cpu") { if (of_property_match_string(dn, "status", "bad") >= 0) bad_count++; - }; + } if (bad_count) { printk(" _ _______________\n"); diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 233503fcf8f0..3ac70790ec7a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -329,6 +329,20 @@ int dlpar_release_drc(u32 drc_index) return 0; } +int dlpar_unisolate_drc(u32 drc_index) +{ + int dr_status, rc; + + rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, + DR_ENTITY_SENSE, drc_index); + if (rc || dr_status != DR_ENTITY_PRESENT) + return -1; + + rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); + + return 0; +} + int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) { int rc; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 12cbffd3c2e3..7e970f81d8ff 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -47,9 +47,6 @@ static void rtas_stop_self(void) BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); - printk("cpu %u (hwid %u) Ready to die...\n", - smp_processor_id(), hard_smp_processor_id()); - rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); panic("Alas, I survived.\n"); @@ -271,6 +268,19 @@ static int dlpar_offline_cpu(struct device_node *dn) if (!cpu_online(cpu)) break; + /* + * device_offline() will return -EBUSY (via cpu_down()) if there + * is only one CPU left. Check it here to fail earlier and with a + * more informative error message, while also retaining the + * cpu_add_remove_lock to be sure that no CPUs are being + * online/offlined during this check. + */ + if (num_online_cpus() == 1) { + pr_warn("Unable to remove last online CPU %pOFn\n", dn); + rc = -EBUSY; + goto out_unlock; + } + cpu_maps_update_done(); rc = device_offline(get_cpu_device(cpu)); if (rc) @@ -283,6 +293,7 @@ static int dlpar_offline_cpu(struct device_node *dn) thread); } } +out_unlock: cpu_maps_update_done(); out: @@ -802,8 +813,16 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_REMOVE: if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) rc = dlpar_cpu_remove_by_count(count); - else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) + else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { rc = dlpar_cpu_remove_by_index(drc_index); + /* + * Setting the isolation state of an UNISOLATED/CONFIGURED + * device to UNISOLATE is a no-op, but the hypervisor can + * use it as a hint that the CPU removal failed. + */ + if (rc) + dlpar_unisolate_drc(drc_index); + } else rc = -EINVAL; break; diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 2c59b4986ea5..3a50612a78db 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -26,7 +26,7 @@ struct hcall_stats { }; #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) -DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); +static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); /* * Routines for displaying the statistics in debugfs diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 9fc5217f0c8e..0c55b991f665 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -638,7 +638,8 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) iommu_table_setparms(pci->phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; - iommu_init_table(tbl, pci->phb->node, 0, 0); + if (!iommu_init_table(tbl, pci->phb->node, 0, 0)) + panic("Failed to initialize iommu table"); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; @@ -720,7 +721,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) iommu_table_setparms_lpar(ppci->phb, pdn, tbl, ppci->table_group, dma_window); tbl->it_ops = &iommu_table_lpar_multi_ops; - iommu_init_table(tbl, ppci->phb->node, 0, 0); + if (!iommu_init_table(tbl, ppci->phb->node, 0, 0)) + panic("Failed to initialize iommu table"); iommu_register_group(ppci->table_group, pci_domain_nr(bus), 0); pr_debug(" created table: %p\n", ppci->table_group); @@ -749,7 +751,9 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) tbl = PCI_DN(dn)->table_group->tables[0]; iommu_table_setparms(phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; - iommu_init_table(tbl, phb->node, 0, 0); + if (!iommu_init_table(tbl, phb->node, 0, 0)) + panic("Failed to initialize iommu table"); + set_iommu_table_base(&dev->dev, tbl); return; } @@ -1099,6 +1103,33 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); } +/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ +static int iommu_get_page_shift(u32 query_page_size) +{ + /* Supported IO page-sizes according to LoPAR */ + const int shift[] = { + __builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M), + __builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M), + __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G) + }; + + int i = ARRAY_SIZE(shift) - 1; + + /* + * On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field: + * - bit 31 means 4k pages are supported, + * - bit 30 means 64k pages are supported, and so on. + * Larger pagesizes map more memory with the same amount of TCEs, so start probing them. + */ + for (; i >= 0 ; i--) { + if (query_page_size & (1 << i)) + return shift[i]; + } + + /* No valid page size found. */ + return 0; +} + /* * If the PE supports dynamic dma windows, and there is space for a table * that can map all pages in a linear offset, then setup such a table, @@ -1206,13 +1237,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; } } - if (query.page_size & 4) { - page_shift = 24; /* 16MB */ - } else if (query.page_size & 2) { - page_shift = 16; /* 64kB */ - } else if (query.page_size & 1) { - page_shift = 12; /* 4kB */ - } else { + + page_shift = iommu_get_page_shift(query.page_size); + if (!page_shift) { dev_dbg(&dev->dev, "no supported direct page size in mask %x", query.page_size); goto out_failed; @@ -1229,7 +1256,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (pmem_present) { if (query.largest_available_block >= (1ULL << (MAX_PHYSMEM_BITS - page_shift))) - len = MAX_PHYSMEM_BITS - page_shift; + len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 764170fdb0f7..1f3152ad7213 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, want_v = hpte_encode_avpn(vpn, psize, ssize); - flags = (newpp & 7) | H_AVPN; + flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN; + flags |= (newpp & HPTE_R_KEY_HI) >> 48; if (mmu_has_feature(MMU_FTR_KERNEL_RO)) /* Move pp0 into bit 8 (IBM 55) */ flags |= (newpp & HPTE_R_PP0) >> 55; @@ -976,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); - flags = newpp & 7; + flags = newpp & (HPTE_R_PP | HPTE_R_N); if (mmu_has_feature(MMU_FTR_KERNEL_RO)) /* Move pp0 into bit 8 (IBM 55) */ flags |= (newpp & HPTE_R_PP0) >> 55; + flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO); + lpar_rc = plpar_pte_protect(flags, slot, 0); BUG_ON(lpar_rc != H_SUCCESS); @@ -1629,7 +1632,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift) } msleep(delay); rc = plpar_resize_hpt_prepare(0, shift); - }; + } switch (rc) { case H_SUCCESS: diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e278390ab28d..f71eac74ea92 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -537,6 +537,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) parse_em_data(m); maxmem_data(m); + seq_printf(m, "security_flavor=%u\n", pseries_security_flavor); + return 0; } diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index ea4d6a660e0d..e83e0891272d 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -452,12 +452,28 @@ static int do_suspend(void) return ret; } +/** + * struct pseries_suspend_info - State shared between CPUs for join/suspend. + * @counter: Threads are to increment this upon resuming from suspend + * or if an error is received from H_JOIN. The thread which performs + * the first increment (i.e. sets it to 1) is responsible for + * waking the other threads. + * @done: False if join/suspend is in progress. True if the operation is + * complete (successful or not). + */ +struct pseries_suspend_info { + atomic_t counter; + bool done; +}; + static int do_join(void *arg) { - atomic_t *counter = arg; + struct pseries_suspend_info *info = arg; + atomic_t *counter = &info->counter; long hvrc; int ret; +retry: /* Must ensure MSR.EE off for H_JOIN. */ hard_irq_disable(); hvrc = plpar_hcall_norets(H_JOIN); @@ -473,8 +489,20 @@ static int do_join(void *arg) case H_SUCCESS: /* * The suspend is complete and this cpu has received a - * prod. + * prod, or we've received a stray prod from unrelated + * code (e.g. paravirt spinlocks) and we need to join + * again. + * + * This barrier orders the return from H_JOIN above vs + * the load of info->done. It pairs with the barrier + * in the wakeup/prod path below. */ + smp_mb(); + if (READ_ONCE(info->done) == false) { + pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying", + smp_processor_id()); + goto retry; + } ret = 0; break; case H_BAD_MODE: @@ -488,6 +516,13 @@ static int do_join(void *arg) if (atomic_inc_return(counter) == 1) { pr_info("CPU %u waking all threads\n", smp_processor_id()); + WRITE_ONCE(info->done, true); + /* + * This barrier orders the store to info->done vs subsequent + * H_PRODs to wake the other CPUs. It pairs with the barrier + * in the H_SUCCESS case above. + */ + smp_mb(); prod_others(); } /* @@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle) int ret; while (true) { - atomic_t counter = ATOMIC_INIT(0); + struct pseries_suspend_info info; unsigned long vasi_state; int vasi_err; - ret = stop_machine(do_join, &counter, cpu_online_mask); + info = (struct pseries_suspend_info) { + .counter = ATOMIC_INIT(0), + .done = false, + }; + + ret = stop_machine(do_join, &info, cpu_online_mask); if (ret == 0) break; /* diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 835163f54244..ef26fe40efb0 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -93,6 +93,7 @@ struct papr_scm_priv { uint64_t block_size; int metadata_size; bool is_volatile; + bool hcall_flush_required; uint64_t bound_addr; @@ -117,6 +118,38 @@ struct papr_scm_priv { size_t stat_buffer_len; }; +static int papr_scm_pmem_flush(struct nd_region *nd_region, + struct bio *bio __maybe_unused) +{ + struct papr_scm_priv *p = nd_region_provider_data(nd_region); + unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0; + long rc; + + dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index); + + do { + rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token); + token = ret_buf[0]; + + /* Check if we are stalled for some time */ + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } else if (rc == H_BUSY) { + cond_resched(); + } + } while (rc == H_BUSY); + + if (rc) { + dev_err(&p->pdev->dev, "flush error: %ld", rc); + rc = -EIO; + } else { + dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index); + } + + return rc; +} + static LIST_HEAD(papr_nd_regions); static DEFINE_MUTEX(papr_ndr_lock); @@ -914,6 +947,15 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) dimm_flags = 0; set_bit(NDD_LABELING, &dimm_flags); + /* + * Check if the nvdimm is unarmed. No locking needed as we are still + * initializing. Ignore error encountered if any. + */ + __drc_pmem_query_health(p); + + if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK) + set_bit(NDD_UNARMED, &dimm_flags); + p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups, dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL); if (!p->nvdimm) { @@ -943,6 +985,11 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) ndr_desc.num_mappings = 1; ndr_desc.nd_set = &p->nd_set; + if (p->hcall_flush_required) { + set_bit(ND_REGION_ASYNC, &ndr_desc.flags); + ndr_desc.flush = papr_scm_pmem_flush; + } + if (p->is_volatile) p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc); else { @@ -1088,6 +1135,7 @@ static int papr_scm_probe(struct platform_device *pdev) p->block_size = block_size; p->blocks = blocks; p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required"); + p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required"); /* We just need to ensure that set cookies are unique across */ uuid_parse(uuid_str, (uuid_t *) uuid); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 1bffbd1c9a94..3b6800f774c2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -224,8 +224,6 @@ static void __init pSeries_request_regions(void) void __init pSeries_final_fixup(void) { - struct pci_controller *hose; - pSeries_request_regions(); eeh_show_enabled(); @@ -234,27 +232,6 @@ void __init pSeries_final_fixup(void) ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable; ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable; #endif - list_for_each_entry(hose, &hose_list, list_node) { - struct device_node *dn = hose->dn, *nvdn; - - while (1) { - dn = of_find_all_nodes(dn); - if (!dn) - break; - nvdn = of_parse_phandle(dn, "ibm,nvlink", 0); - if (!nvdn) - continue; - if (!of_device_is_compatible(nvdn, "ibm,npu-link")) - continue; - if (!of_device_is_compatible(nvdn->parent, - "ibm,power9-npu")) - continue; -#ifdef CONFIG_PPC_POWERNV - WARN_ON_ONCE(pnv_npu2_init(hose)); -#endif - break; - } - } } /* diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index f9ae17e8a0f4..a8f9140a24fa 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic); int remove_phb_dynamic(struct pci_controller *phb) { struct pci_bus *b = phb->bus; + struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge); struct resource *res; int rc, i; @@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb) /* Remove the PCI bus and unregister the bridge device from sysfs */ phb->bus = NULL; pci_remove_bus(b); - device_unregister(b->bridge); + host_bridge->bus = NULL; + device_unregister(&host_bridge->dev); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index e1dc5d3254df..439ac72c2470 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -139,7 +139,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) return rc; } -const struct of_device_id drc_pmem_match[] = { +static const struct of_device_id drc_pmem_match[] = { { .type = "ibm,persistent-memory", }, {} }; diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 4fe48c04c6c2..1f051a786fb3 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -43,9 +43,6 @@ extern void pSeries_final_fixup(void); /* Poweron flag used for enabling auto ups restart */ extern unsigned long rtas_poweron_auto; -/* Provided by HVC VIO */ -extern void hvc_vio_init_early(void); - /* Dynamic logical Partitioning/Mobility */ extern void dlpar_free_cc_nodes(struct device_node *); extern void dlpar_free_cc_property(struct property *); @@ -55,6 +52,7 @@ extern int dlpar_attach_node(struct device_node *, struct device_node *); extern int dlpar_detach_node(struct device_node *); extern int dlpar_acquire_drc(u32 drc_index); extern int dlpar_release_drc(u32 drc_index); +extern int dlpar_unisolate_drc(u32 drc_index); void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog); int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog); @@ -111,6 +109,7 @@ static inline unsigned long cmo_get_page_size(void) int dlpar_workqueue_init(void); +extern u32 pseries_security_flavor; void pseries_setup_security_mitigations(void); void pseries_lpar_read_hblkrm_characteristics(void); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index f8b390a9d9fb..9d4ef65da7f3 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -699,7 +699,7 @@ static int mce_handle_err_virtmode(struct pt_regs *regs, mce_err.error_type = MCE_ERROR_TYPE_DCACHE; break; case MC_ERROR_TYPE_I_CACHE: - mce_err.error_type = MCE_ERROR_TYPE_DCACHE; + mce_err.error_type = MCE_ERROR_TYPE_ICACHE; break; case MC_ERROR_TYPE_UNKNOWN: default: diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index 81343908ed33..f8f73b47b107 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -247,7 +247,7 @@ static inline int rtas_fadump_gpr_index(u64 id) return i; } -void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val) +static void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val) { int i; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 46e1540abc22..754e493b7c05 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -71,6 +71,7 @@ #include <asm/swiotlb.h> #include <asm/svm.h> #include <asm/dtl.h> +#include <asm/hvconsole.h> #include "pseries.h" #include "../../../../drivers/pci/pci.h" @@ -85,6 +86,7 @@ EXPORT_SYMBOL(CMO_PageSize); int fwnmi_active; /* TRUE if an FWNMI handler is present */ int ibm_nmi_interlock_token; +u32 pseries_security_flavor; static void pSeries_show_cpuinfo(struct seq_file *m) { @@ -534,9 +536,15 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. + * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if + * H_CPU_BEHAV_FAVOUR_SECURITY is. */ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) + pseries_security_flavor = 1; + else + pseries_security_flavor = 2; if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 7b739cc7a8a9..1d829e257996 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -55,9 +55,9 @@ void __init svm_swiotlb_init(void) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) return; - if (io_tlb_start) - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + + memblock_free_early(__pa(vstart), + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); panic("SVM: Cannot allocate SWIOTLB buffer"); } diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 9cb4fc839fd5..e00f3725ec96 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -22,6 +22,7 @@ #include <linux/mm.h> #include <linux/dma-map-ops.h> #include <linux/kobject.h> +#include <linux/kexec.h> #include <asm/iommu.h> #include <asm/dma.h> @@ -1278,6 +1279,20 @@ static int vio_bus_remove(struct device *dev) return 0; } +static void vio_bus_shutdown(struct device *dev) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct vio_driver *viodrv; + + if (dev->driver) { + viodrv = to_vio_driver(dev->driver); + if (viodrv->shutdown) + viodrv->shutdown(viodev); + else if (kexec_in_progress) + vio_bus_remove(dev); + } +} + /** * vio_register_driver: - Register a new vio driver * @viodrv: The vio_driver structure to be registered. @@ -1285,6 +1300,10 @@ static int vio_bus_remove(struct device *dev) int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, const char *mod_name) { + // vio_bus_type is only initialised for pseries + if (!machine_is(pseries)) + return -ENODEV; + pr_debug("%s: driver %s registering\n", __func__, viodrv->name); /* fill in 'struct driver' fields */ @@ -1613,6 +1632,7 @@ struct bus_type vio_bus_type = { .match = vio_bus_match, .probe = vio_bus_probe, .remove = vio_bus_remove, + .shutdown = vio_bus_shutdown, }; /** diff --git a/arch/powerpc/purgatory/trampoline_64.S b/arch/powerpc/purgatory/trampoline_64.S index d956b8a35fd1..b35837c13852 100644 --- a/arch/powerpc/purgatory/trampoline_64.S +++ b/arch/powerpc/purgatory/trampoline_64.S @@ -12,7 +12,6 @@ #include <asm/asm-compat.h> #include <asm/crashdump-ppc64.h> - .machine ppc64 .balign 256 .globl purgatory_start purgatory_start: diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 6b4a34b36d98..1d33b7a5ea83 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -344,7 +344,8 @@ static void iommu_table_dart_setup(void) iommu_table_dart.it_index = 0; iommu_table_dart.it_blocksize = 1; iommu_table_dart.it_ops = &iommu_dart_ops; - iommu_init_table(&iommu_table_dart, -1, 0, 0); + if (!iommu_init_table(&iommu_table_dart, -1, 0, 0)) + panic("Failed to initialize iommu table"); /* Reserve the last page of the DART to avoid possible prefetch * past the DART mapped area diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 040b9d01c079..69af73765783 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -455,7 +455,7 @@ static void setup_pci_atmu(struct pci_controller *hose) } } -static void __init setup_pci_cmd(struct pci_controller *hose) +static void setup_pci_cmd(struct pci_controller *hose) { u16 cmd; int cap_x; diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 0baec82510b9..4c4a6efd5e5f 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -73,7 +73,6 @@ static int __init tsi108_eth_of_init(void) struct device_node *phy, *mdio; hw_info tsi_eth_data; const unsigned int *phy_id; - const void *mac_addr; const phandle *ph; memset(r, 0, sizeof(r)); @@ -101,9 +100,7 @@ static int __init tsi108_eth_of_init(void) goto err; } - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(tsi_eth_data.mac_addr, mac_addr); + of_get_mac_address(np, tsi_eth_data.mac_addr); ph = of_get_property(np, "mdio-handle", NULL); mdio = of_find_node_by_phandle(*ph); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 595310e056f4..a8304327072d 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -63,8 +63,19 @@ static const struct xive_ops *xive_ops; static struct irq_domain *xive_irq_domain; #ifdef CONFIG_SMP -/* The IPIs all use the same logical irq number */ -static u32 xive_ipi_irq; +/* The IPIs use the same logical irq number when on the same chip */ +static struct xive_ipi_desc { + unsigned int irq; + char name[16]; +} *xive_ipis; + +/* + * Use early_cpu_to_node() for hot-plugged CPUs + */ +static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu) +{ + return xive_ipis[early_cpu_to_node(cpu)].irq; +} #endif /* Xive state for each CPU */ @@ -253,17 +264,20 @@ notrace void xmon_xive_do_dump(int cpu) xmon_printf("\n"); } +static struct irq_data *xive_get_irq_data(u32 hw_irq) +{ + unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); + + return irq ? irq_get_irq_data(irq) : NULL; +} + int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) { - struct irq_chip *chip = irq_data_get_irq_chip(d); int rc; u32 target; u8 prio; u32 lirq; - if (!is_xive_irq(chip)) - return -EINVAL; - rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); if (rc) { xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); @@ -273,6 +287,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", hw_irq, target, prio, lirq); + if (!d) + d = xive_get_irq_data(hw_irq); + if (d) { struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); u64 val = xive_esb_read(xd, XIVE_ESB_GET); @@ -289,6 +306,20 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) return 0; } +void xmon_xive_get_irq_all(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_data *d = irq_desc_get_irq_data(desc); + unsigned int hwirq = (unsigned int)irqd_to_hwirq(d); + + if (d->domain == xive_irq_domain) + xmon_xive_get_irq_config(hwirq, d); + } +} + #endif /* CONFIG_XMON */ static unsigned int xive_get_irq(void) @@ -959,16 +990,12 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { if (xd->eoi_mmio) { - unmap_kernel_range((unsigned long)xd->eoi_mmio, - 1u << xd->esb_shift); iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) xd->trig_mmio = NULL; xd->eoi_mmio = NULL; } if (xd->trig_mmio) { - unmap_kernel_range((unsigned long)xd->trig_mmio, - 1u << xd->esb_shift); iounmap(xd->trig_mmio); xd->trig_mmio = NULL; } @@ -1067,28 +1094,94 @@ static struct irq_chip xive_ipi_chip = { .irq_unmask = xive_ipi_do_nothing, }; -static void __init xive_request_ipi(void) +/* + * IPIs are marked per-cpu. We use separate HW interrupts under the + * hood but associated with the same "linux" interrupt + */ +struct xive_ipi_alloc_info { + irq_hw_number_t hwirq; +}; + +static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) { - unsigned int virq; + struct xive_ipi_alloc_info *info = arg; + int i; - /* - * Initialization failed, move on, we might manage to - * reach the point where we display our errors before - * the system falls appart - */ - if (!xive_irq_domain) - return; + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip, + domain->host_data, handle_percpu_irq, + NULL, NULL); + } + return 0; +} - /* Initialize it */ - virq = irq_create_mapping(xive_irq_domain, XIVE_IPI_HW_IRQ); - xive_ipi_irq = virq; +static const struct irq_domain_ops xive_ipi_irq_domain_ops = { + .alloc = xive_ipi_irq_domain_alloc, +}; - WARN_ON(request_irq(virq, xive_muxed_ipi_action, - IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); +static int __init xive_request_ipi(void) +{ + struct fwnode_handle *fwnode; + struct irq_domain *ipi_domain; + unsigned int node; + int ret = -ENOMEM; + + fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); + if (!fwnode) + goto out; + + ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, + &xive_ipi_irq_domain_ops, NULL); + if (!ipi_domain) + goto out_free_fwnode; + + xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); + if (!xive_ipis) + goto out_free_domain; + + for_each_node(node) { + struct xive_ipi_desc *xid = &xive_ipis[node]; + struct xive_ipi_alloc_info info = { node }; + + /* Skip nodes without CPUs */ + if (cpumask_empty(cpumask_of_node(node))) + continue; + + /* + * Map one IPI interrupt per node for all cpus of that node. + * Since the HW interrupt number doesn't have any meaning, + * simply use the node number. + */ + xid->irq = irq_domain_alloc_irqs(ipi_domain, 1, node, &info); + if (xid->irq < 0) { + ret = xid->irq; + goto out_free_xive_ipis; + } + + snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); + + ret = request_irq(xid->irq, xive_muxed_ipi_action, + IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); + + WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); + } + + return ret; + +out_free_xive_ipis: + kfree(xive_ipis); +out_free_domain: + irq_domain_remove(ipi_domain); +out_free_fwnode: + irq_domain_free_fwnode(fwnode); +out: + return ret; } static int xive_setup_cpu_ipi(unsigned int cpu) { + unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); struct xive_cpu *xc; int rc; @@ -1131,6 +1224,8 @@ static int xive_setup_cpu_ipi(unsigned int cpu) static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) { + unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); + /* Disable the IPI and free the IRQ data */ /* Already cleaned up ? */ @@ -1178,19 +1273,6 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, */ irq_clear_status_flags(virq, IRQ_LEVEL); -#ifdef CONFIG_SMP - /* IPIs are special and come up with HW number 0 */ - if (hw == XIVE_IPI_HW_IRQ) { - /* - * IPIs are marked per-cpu. We use separate HW interrupts under - * the hood but associated with the same "linux" interrupt - */ - irq_set_chip_and_handler(virq, &xive_ipi_chip, - handle_percpu_irq); - return 0; - } -#endif - rc = xive_irq_alloc_data(virq, hw); if (rc) return rc; @@ -1202,15 +1284,7 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) { - struct irq_data *data = irq_get_irq_data(virq); - unsigned int hw_irq; - - /* XXX Assign BAD number */ - if (!data) - return; - hw_irq = (unsigned int)irqd_to_hwirq(data); - if (hw_irq != XIVE_IPI_HW_IRQ) - xive_irq_free_data(virq); + xive_irq_free_data(virq); } static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, @@ -1335,17 +1409,14 @@ static int xive_prepare_cpu(unsigned int cpu) xc = per_cpu(xive_cpu, cpu); if (!xc) { - struct device_node *np; - xc = kzalloc_node(sizeof(struct xive_cpu), GFP_KERNEL, cpu_to_node(cpu)); if (!xc) return -ENOMEM; - np = of_get_cpu_node(cpu, NULL); - if (np) - xc->chip_id = of_get_ibm_chip_id(np); - of_node_put(np); xc->hw_ipi = XIVE_BAD_IRQ; + xc->chip_id = XIVE_INVALID_CHIP_ID; + if (xive_ops->prepare_cpu) + xive_ops->prepare_cpu(cpu, xc); per_cpu(xive_cpu, cpu) = xc; } @@ -1408,13 +1479,12 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) struct irq_desc *desc = irq_to_desc(irq); struct irq_data *d = irq_desc_get_irq_data(desc); struct xive_irq_data *xd; - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); /* * Ignore anything that isn't a XIVE irq and ignore * IPIs, so can just be dropped. */ - if (d->domain != xive_irq_domain || hw_irq == XIVE_IPI_HW_IRQ) + if (d->domain != xive_irq_domain) continue; /* @@ -1592,16 +1662,15 @@ static void xive_debug_show_cpu(struct seq_file *m, int cpu) seq_puts(m, "\n"); } -static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d) +static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) { - struct irq_chip *chip = irq_data_get_irq_chip(d); + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int rc; u32 target; u8 prio; u32 lirq; - - if (!is_xive_irq(chip)) - return; + struct xive_irq_data *xd; + u64 val; rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); if (rc) { @@ -1612,17 +1681,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", hw_irq, target, prio, lirq); - if (d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); - u64 val = xive_esb_read(xd, XIVE_ESB_GET); - - seq_printf(m, "flags=%c%c%c PQ=%c%c", - xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', - xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', - xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', - val & XIVE_ESB_VAL_P ? 'P' : '-', - val & XIVE_ESB_VAL_Q ? 'Q' : '-'); - } + xd = irq_data_get_irq_handler_data(d); + val = xive_esb_read(xd, XIVE_ESB_GET); + seq_printf(m, "flags=%c%c%c PQ=%c%c", + xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', + xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', + xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', + val & XIVE_ESB_VAL_P ? 'P' : '-', + val & XIVE_ESB_VAL_Q ? 'Q' : '-'); seq_puts(m, "\n"); } @@ -1640,16 +1706,9 @@ static int xive_core_debug_show(struct seq_file *m, void *private) for_each_irq_desc(i, desc) { struct irq_data *d = irq_desc_get_irq_data(desc); - unsigned int hw_irq; - - if (!d) - continue; - - hw_irq = (unsigned int)irqd_to_hwirq(d); - /* IPIs are special (HW number 0) */ - if (hw_irq != XIVE_IPI_HW_IRQ) - xive_debug_show_irq(m, hw_irq, d); + if (d->domain == xive_irq_domain) + xive_debug_show_irq(m, d); } return 0; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 05a800a3104e..57e3f1540435 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc) } } +static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc) +{ + xc->chip_id = cpu_to_chip_id(cpu); +} + static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) { s64 rc; @@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = { .match = xive_native_match, .shutdown = xive_native_shutdown, .update_pending = xive_native_update_pending, + .prepare_cpu = xive_native_prepare_cpu, .setup_cpu = xive_native_setup_cpu, .teardown_cpu = xive_native_teardown_cpu, .sync_source = xive_native_sync_source, diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 01ccc0786ada..f143b6f111ac 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -549,7 +549,7 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, static bool xive_spapr_match(struct device_node *node) { /* Ignore cascaded controllers for the moment */ - return 1; + return true; } #ifdef CONFIG_SMP diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index 9cf57c722faa..504e7edce358 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -5,8 +5,6 @@ #ifndef __XIVE_INTERNAL_H #define __XIVE_INTERNAL_H -#define XIVE_IPI_HW_IRQ 0 /* interrupt source # for IPIs */ - /* * A "disabled" interrupt should never fire, to catch problems * we set its logical number to this @@ -46,6 +44,7 @@ struct xive_ops { u32 *sw_irq); int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); + void (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc); void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc); void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc); bool (*match)(struct device_node *np); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 3fe37495f63d..c8173e92f19d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -54,6 +54,7 @@ #include <asm/code-patching.h> #include <asm/sections.h> #include <asm/inst.h> +#include <asm/interrupt.h> #ifdef CONFIG_PPC64 #include <asm/hvcall.h> @@ -605,7 +606,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) * debugger break (IPI). This is similar to * crash_kexec_secondary(). */ - if (TRAP(regs) != 0x100 || !wait_for_other_cpus(ncpus)) + if (TRAP(regs) != INTERRUPT_SYSTEM_RESET || !wait_for_other_cpus(ncpus)) smp_send_debugger_break(); wait_for_other_cpus(ncpus); @@ -615,7 +616,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) if (!locked_down) { /* for breakpoint or single step, print curr insn */ - if (bp || TRAP(regs) == 0xd00) + if (bp || TRAP(regs) == INTERRUPT_TRACE) ppc_inst_dump(regs->nip, 1, 0); printf("enter ? for help\n"); } @@ -684,7 +685,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) disable_surveillance(); if (!locked_down) { /* for breakpoint or single step, print current insn */ - if (bp || TRAP(regs) == 0xd00) + if (bp || TRAP(regs) == INTERRUPT_TRACE) ppc_inst_dump(regs->nip, 1, 0); printf("enter ? for help\n"); } @@ -1769,9 +1770,12 @@ static void excprint(struct pt_regs *fp) printf(" sp: %lx\n", fp->gpr[1]); printf(" msr: %lx\n", fp->msr); - if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) { + if (trap == INTERRUPT_DATA_STORAGE || + trap == INTERRUPT_DATA_SEGMENT || + trap == INTERRUPT_ALIGNMENT || + trap == INTERRUPT_MACHINE_CHECK) { printf(" dar: %lx\n", fp->dar); - if (trap != 0x380) + if (trap != INTERRUPT_DATA_SEGMENT) printf(" dsisr: %lx\n", fp->dsisr); } @@ -1785,7 +1789,7 @@ static void excprint(struct pt_regs *fp) current->pid, current->comm); } - if (trap == 0x700) + if (trap == INTERRUPT_PROGRAM) print_bug_trap(fp); printf(linux_banner); @@ -1815,25 +1819,16 @@ static void prregs(struct pt_regs *fp) } #ifdef CONFIG_PPC64 - if (FULL_REGS(fp)) { - for (n = 0; n < 16; ++n) - printf("R%.2d = "REG" R%.2d = "REG"\n", - n, fp->gpr[n], n+16, fp->gpr[n+16]); - } else { - for (n = 0; n < 7; ++n) - printf("R%.2d = "REG" R%.2d = "REG"\n", - n, fp->gpr[n], n+7, fp->gpr[n+7]); - } +#define R_PER_LINE 2 #else +#define R_PER_LINE 4 +#endif + for (n = 0; n < 32; ++n) { - printf("R%.2d = %.8lx%s", n, fp->gpr[n], - (n & 3) == 3? "\n": " "); - if (n == 12 && !FULL_REGS(fp)) { - printf("\n"); - break; - } + printf("R%.2d = "REG"%s", n, fp->gpr[n], + (n % R_PER_LINE) == R_PER_LINE - 1 ? "\n" : " "); } -#endif + printf("pc = "); xmon_print_symbol(fp->nip, " ", "\n"); if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) { @@ -1846,7 +1841,9 @@ static void prregs(struct pt_regs *fp) printf("ctr = "REG" xer = "REG" trap = %4lx\n", fp->ctr, fp->xer, fp->trap); trap = TRAP(fp); - if (trap == 0x300 || trap == 0x380 || trap == 0x600) + if (trap == INTERRUPT_DATA_STORAGE || + trap == INTERRUPT_DATA_SEGMENT || + trap == INTERRUPT_ALIGNMENT) printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr); } @@ -2727,30 +2724,6 @@ static void dump_all_xives(void) dump_one_xive(cpu); } -static void dump_one_xive_irq(u32 num, struct irq_data *d) -{ - xmon_xive_get_irq_config(num, d); -} - -static void dump_all_xive_irq(void) -{ - unsigned int i; - struct irq_desc *desc; - - for_each_irq_desc(i, desc) { - struct irq_data *d = irq_desc_get_irq_data(desc); - unsigned int hwirq; - - if (!d) - continue; - - hwirq = (unsigned int)irqd_to_hwirq(d); - /* IPIs are special (HW number 0) */ - if (hwirq) - dump_one_xive_irq(hwirq, d); - } -} - static void dump_xives(void) { unsigned long num; @@ -2767,9 +2740,9 @@ static void dump_xives(void) return; } else if (c == 'i') { if (scanhex(&num)) - dump_one_xive_irq(num, NULL); + xmon_xive_get_irq_config(num, NULL); else - dump_all_xive_irq(); + xmon_xive_get_irq_all(); return; } @@ -2980,7 +2953,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, if (!ppc_inst_prefixed(inst)) dump_func(ppc_inst_val(inst), adr); else - dump_func(ppc_inst_as_u64(inst), adr); + dump_func(ppc_inst_as_ulong(inst), adr); printf("\n"); } return adr - first_adr; @@ -3001,7 +2974,7 @@ print_address(unsigned long addr) static void dump_log_buf(void) { - struct kmsg_dumper dumper = { .active = 1 }; + struct kmsg_dump_iter iter; unsigned char buf[128]; size_t len; @@ -3013,9 +2986,9 @@ dump_log_buf(void) catch_memory_errors = 1; sync(); - kmsg_dump_rewind_nolock(&dumper); + kmsg_dump_rewind(&iter); xmon_start_pagination(); - while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { + while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) { buf[len] = '\0'; printf("%s", buf); } @@ -4212,8 +4185,7 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%p", pdata); } -int -spu_inst_dump(unsigned long adr, long count, int praddr) +static int spu_inst_dump(unsigned long adr, long count, int praddr) { return generic_inst_dump(adr, count, praddr, print_insn_spu); } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 85d626b8ce5e..a8ad8eb76120 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -20,6 +20,7 @@ config RISCV select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_WX + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV @@ -27,18 +28,23 @@ config RISCV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY - select ARCH_HAS_STRICT_KERNEL_RWX if MMU + select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT + select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select CLONE_BACKWARDS select CLINT_TIMER if !MMU select COMMON_CLK select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ATOMIC64 if !64BIT + select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_EARLY_IOREMAP select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IOREMAP @@ -93,7 +99,6 @@ config RISCV select PCI_MSI if PCI select RISCV_INTC select RISCV_TIMER if RISCV_SBI - select SPARSEMEM_STATIC if 32BIT select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK @@ -154,7 +159,8 @@ config ARCH_FLATMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool y depends on MMU - select SPARSEMEM_VMEMMAP_ENABLE + select SPARSEMEM_STATIC if 32BIT && SPARSEMEM + select SPARSEMEM_VMEMMAP_ENABLE if 64BIT config ARCH_SELECT_MEMORY_MODEL def_bool ARCH_SPARSEMEM_ENABLE @@ -165,10 +171,6 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_SUPPORTS_UPROBES def_bool y -config SYS_SUPPORTS_HUGETLBFS - depends on MMU - def_bool y - config STACKTRACE_SUPPORT def_bool y @@ -204,6 +206,7 @@ config LOCKDEP_SUPPORT def_bool y source "arch/riscv/Kconfig.socs" +source "arch/riscv/Kconfig.erratas" menu "Platform type" @@ -227,7 +230,7 @@ config ARCH_RV64I bool "RV64I" select 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 - select HAVE_DYNAMIC_FTRACE if MMU + select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER @@ -314,7 +317,7 @@ endchoice # Common NUMA Features config NUMA bool "NUMA Memory Allocation and Scheduler Support" - depends on SMP + depends on SMP && MMU select GENERIC_ARCH_NUMA select OF_NUMA select ARCH_SUPPORTS_NUMA_BALANCING @@ -386,6 +389,31 @@ config RISCV_SBI_V01 help This config allows kernel to use SBI v0.1 APIs. This will be deprecated in future once legacy M-mode software are no longer in use. + +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + select HOTPLUG_CPU if SMP + depends on MMU + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + endmenu menu "Boot options" @@ -438,7 +466,7 @@ config EFI_STUB config EFI bool "UEFI runtime support" - depends on OF + depends on OF && !XIP_KERNEL select LIBFDT select UCS2_STRING select EFI_PARAMS_FROM_FDT @@ -462,11 +490,63 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS +config PHYS_RAM_BASE_FIXED + bool "Explicitly specified physical RAM address" + default n + +config PHYS_RAM_BASE + hex "Platform Physical RAM address" + depends on PHYS_RAM_BASE_FIXED + default "0x80000000" + help + This is the physical address of RAM in the system. It has to be + explicitly specified to run early relocations of read-write data + from flash to RAM. + +config XIP_KERNEL + bool "Kernel Execute-In-Place from ROM" + depends on MMU && SPARSEMEM + # This prevents XIP from being enabled by all{yes,mod}config, which + # fail to build since XIP doesn't support large kernels. + depends on !COMPILE_TEST + select PHYS_RAM_BASE_FIXED + help + Execute-In-Place allows the kernel to run from non-volatile storage + directly addressable by the CPU, such as NOR flash. This saves RAM + space since the text section of the kernel is not loaded from flash + to RAM. Read-write sections, such as the data section and stack, + are still copied to RAM. The XIP kernel is not compressed since + it has to run directly from flash, so it will take more space to + store it. The flash address used to link the kernel object files, + and for storing it, is configuration dependent. Therefore, if you + say Y here, you must know the proper physical address where to + store the kernel image depending on your own flash memory usage. + + Also note that the make target becomes "make xipImage" rather than + "make zImage" or "make Image". The final kernel binary to put in + ROM memory will be arch/riscv/boot/xipImage. + + SPARSEMEM is required because the kernel text and rodata that are + flash resident are not backed by memmap, then any attempt to get + a struct page on those regions will trigger a fault. + + If unsure, say N. + +config XIP_PHYS_ADDR + hex "XIP Kernel Physical Location" + depends on XIP_KERNEL + default "0x21000000" + help + This is the physical address in your flash memory the kernel will + be linked for and stored to. This address is dependent on your + own flash usage. + endmenu config BUILTIN_DTB - def_bool n + bool depends on OF + default y if XIP_KERNEL menu "Power management options" diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas new file mode 100644 index 000000000000..b44d6ecdb46e --- /dev/null +++ b/arch/riscv/Kconfig.erratas @@ -0,0 +1,44 @@ +menu "CPU errata selection" + +config RISCV_ERRATA_ALTERNATIVE + bool "RISC-V alternative scheme" + default y + help + This Kconfig allows the kernel to automatically patch the + errata required by the execution platform at run time. The + code patching is performed once in the boot stages. It means + that the overhead from this mechanism is just taken once. + +config ERRATA_SIFIVE + bool "SiFive errata" + depends on RISCV_ERRATA_ALTERNATIVE + help + All SiFive errata Kconfig depend on this Kconfig. Disabling + this Kconfig will disable all SiFive errata. Please say "Y" + here if your platform uses SiFive CPU cores. + + Otherwise, please say "N" here to avoid unnecessary overhead. + +config ERRATA_SIFIVE_CIP_453 + bool "Apply SiFive errata CIP-453" + depends on ERRATA_SIFIVE && 64BIT + default y + help + This will apply the SiFive CIP-453 errata to add sign extension + to the $badaddr when exception type is instruction page fault + and instruction access fault. + + If you don't know what to do here, say "Y". + +config ERRATA_SIFIVE_CIP_1200 + bool "Apply SiFive errata CIP-1200" + depends on ERRATA_SIFIVE && 64BIT + default y + help + This will apply the SiFive CIP-1200 errata to repalce all + "sfence.vma addr" with "sfence.vma" to ensure that the addr + has been flushed from TLB. + + If you don't know what to do here, say "Y". + +endmenu diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 7efcece8896c..ed963761fbd2 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -1,5 +1,12 @@ menu "SoC selection" +config SOC_MICROCHIP_POLARFIRE + bool "Microchip PolarFire SoCs" + select MCHP_CLK_MPFS + select SIFIVE_PLIC + help + This enables support for Microchip PolarFire SoC platforms. + config SOC_SIFIVE bool "SiFive SoCs" select SERIAL_SIFIVE if TTY @@ -7,6 +14,7 @@ config SOC_SIFIVE select CLK_SIFIVE select CLK_SIFIVE_PRCI select SIFIVE_PLIC + select ERRATA_SIFIVE help This enables support for SiFive SoC platform hardware. @@ -31,6 +39,8 @@ config SOC_CANAAN select SIFIVE_PLIC select ARCH_HAS_RESET_CONTROLLER select PINCTRL + select COMMON_CLK + select COMMON_CLK_K210 help This enables support for Canaan Kendryte K210 SoC platform hardware. diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 1368d943f1f3..3eb9590a0775 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,11 +82,16 @@ CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) # Default target when executing plain make boot := arch/riscv/boot +ifeq ($(CONFIG_XIP_KERNEL),y) +KBUILD_IMAGE := $(boot)/xipImage +else KBUILD_IMAGE := $(boot)/Image.gz +endif head-y := arch/riscv/kernel/head.o core-y += arch/riscv/ +core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/ libs-y += arch/riscv/lib/ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a @@ -95,12 +100,14 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ +ifneq ($(CONFIG_XIP_KERNEL),y) ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else KBUILD_IMAGE := $(boot)/Image.gz endif -BOOT_TARGETS := Image Image.gz loader loader.bin +endif +BOOT_TARGETS := Image Image.gz loader loader.bin xipImage all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 03404c84f971..6bf299f70c27 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -17,8 +17,21 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.* loader loader.o loader.lds loader.bin +targets := Image Image.* loader loader.o loader.lds loader.bin xipImage + +ifeq ($(CONFIG_XIP_KERNEL),y) + +quiet_cmd_mkxip = $(quiet_cmd_objcopy) +cmd_mkxip = $(cmd_objcopy) + +$(obj)/xipImage: vmlinux FORCE + $(call if_changed,mkxip) + @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' + +endif $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index 7ffd502e3e7b..fe996b88319e 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 subdir-y += sifive subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan +subdir-y += microchip obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile new file mode 100644 index 000000000000..622b12771fd3 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts new file mode 100644 index 000000000000..ec79944065c9 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +#include "microchip-mpfs.dtsi" + +/* Clock frequency (in Hz) of the rtcclk */ +#define RTCCLK_FREQ 1000000 + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip PolarFire-SoC Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + stdout-path = &serial0; + }; + + cpus { + timebase-frequency = <RTCCLK_FREQ>; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x40000000>; + clocks = <&clkcfg 26>; + }; + + soc { + }; +}; + +&serial0 { + status = "okay"; +}; + +&serial1 { + status = "okay"; +}; + +&serial2 { + status = "okay"; +}; + +&serial3 { + status = "okay"; +}; + +&sdcard { + status = "okay"; +}; + +&emac0 { + phy-mode = "sgmii"; + phy-handle = <&phy0>; + phy0: ethernet-phy@8 { + reg = <8>; + ti,fifo-depth = <0x01>; + }; +}; + +&emac1 { + status = "okay"; + phy-mode = "sgmii"; + phy-handle = <&phy1>; + phy1: ethernet-phy@9 { + reg = <9>; + ti,fifo-depth = <0x01>; + }; +}; diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi new file mode 100644 index 000000000000..b9819570a7d1 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip MPFS Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + clock-frequency = <0>; + compatible = "sifive,e51", "sifive,rocket0", "riscv"; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <16384>; + reg = <0>; + riscv,isa = "rv64imac"; + status = "disabled"; + + cpu0_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@1 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <1>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu1_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@2 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <2>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu2_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@3 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <3>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu3_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@4 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <4>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + cpu4_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + ranges; + + cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; + + clint@2000000 { + compatible = "sifive,clint0"; + reg = <0x0 0x2000000 0x0 0xC000>; + interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 + &cpu1_intc 3 &cpu1_intc 7 + &cpu2_intc 3 &cpu2_intc 7 + &cpu3_intc 3 &cpu3_intc 7 + &cpu4_intc 3 &cpu4_intc 7>; + }; + + plic: interrupt-controller@c000000 { + #interrupt-cells = <1>; + compatible = "sifive,plic-1.0.0"; + reg = <0x0 0xc000000 0x0 0x4000000>; + riscv,ndev = <186>; + interrupt-controller; + interrupts-extended = <&cpu0_intc 11 + &cpu1_intc 11 &cpu1_intc 9 + &cpu2_intc 11 &cpu2_intc 9 + &cpu3_intc 11 &cpu3_intc 9 + &cpu4_intc 11 &cpu4_intc 9>; + }; + + dma@3000000 { + compatible = "sifive,fu540-c000-pdma"; + reg = <0x0 0x3000000 0x0 0x8000>; + interrupt-parent = <&plic>; + interrupts = <23 24 25 26 27 28 29 30>; + #dma-cells = <1>; + }; + + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + clock-output-names = "msspllclk"; + }; + + clkcfg: clkcfg@20002000 { + compatible = "microchip,mpfs-clkcfg"; + reg = <0x0 0x20002000 0x0 0x1000>; + reg-names = "mss_sysreg"; + clocks = <&refclk>; + #clock-cells = <1>; + clock-output-names = "cpu", "axi", "ahb", "envm", /* 0-3 */ + "mac0", "mac1", "mmc", "timer", /* 4-7 */ + "mmuart0", "mmuart1", "mmuart2", "mmuart3", /* 8-11 */ + "mmuart4", "spi0", "spi1", "i2c0", /* 12-15 */ + "i2c1", "can0", "can1", "usb", /* 16-19 */ + "rsvd", "rtc", "qspi", "gpio0", /* 20-23 */ + "gpio1", "gpio2", "ddrc", "fic0", /* 24-27 */ + "fic1", "fic2", "fic3", "athena", "cfm"; /* 28-32 */ + }; + + serial0: serial@20000000 { + compatible = "ns16550a"; + reg = <0x0 0x20000000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <90>; + current-speed = <115200>; + clocks = <&clkcfg 8>; + status = "disabled"; + }; + + serial1: serial@20100000 { + compatible = "ns16550a"; + reg = <0x0 0x20100000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <91>; + current-speed = <115200>; + clocks = <&clkcfg 9>; + status = "disabled"; + }; + + serial2: serial@20102000 { + compatible = "ns16550a"; + reg = <0x0 0x20102000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <92>; + current-speed = <115200>; + clocks = <&clkcfg 10>; + status = "disabled"; + }; + + serial3: serial@20104000 { + compatible = "ns16550a"; + reg = <0x0 0x20104000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <93>; + current-speed = <115200>; + clocks = <&clkcfg 11>; + status = "disabled"; + }; + + emmc: mmc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88 89>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + cap-mmc-highspeed; + mmc-ddr-3_3v; + max-frequency = <200000000>; + non-removable; + no-sd; + no-sdio; + voltage-ranges = <3300 3300>; + status = "disabled"; + }; + + sdcard: sdhc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + disable-wp; + cap-sd-highspeed; + card-detect-delay = <200>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; + max-frequency = <200000000>; + status = "disabled"; + }; + + emac0: ethernet@20110000 { + compatible = "cdns,macb"; + reg = <0x0 0x20110000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <64 65 66 67>; + local-mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 4>, <&clkcfg 2>; + clock-names = "pclk", "hclk"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + emac1: ethernet@20112000 { + compatible = "cdns,macb"; + reg = <0x0 0x20112000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <70 71 72 73>; + mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 5>, <&clkcfg 2>; + status = "disabled"; + clock-names = "pclk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + }; + + }; +}; diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi index eeb4f8c3e0e7..8eef82e4199f 100644 --- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -159,6 +159,7 @@ reg = <0x0 0x10000000 0x0 0x1000>; clocks = <&hfclk>, <&rtcclk>; #clock-cells = <1>; + #reset-cells = <1>; }; uart0: serial@10010000 { compatible = "sifive,fu740-c000-uart", "sifive,uart0"; @@ -289,5 +290,37 @@ clocks = <&prci PRCI_CLK_PCLK>; status = "disabled"; }; + pcie@e00000000 { + compatible = "sifive,fu740-pcie"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0xe 0x00000000 0x0 0x80000000>, + <0xd 0xf0000000 0x0 0x10000000>, + <0x0 0x100d0000 0x0 0x1000>; + reg-names = "dbi", "config", "mgmt"; + device_type = "pci"; + dma-coherent; + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ + <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ + num-lanes = <0x8>; + interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; + interrupt-names = "msi", "inta", "intb", "intc", "intd"; + interrupt-parent = <&plic0>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &plic0 57>, + <0x0 0x0 0x0 0x2 &plic0 58>, + <0x0 0x0 0x0 0x3 &plic0 59>, + <0x0 0x0 0x0 0x4 &plic0 60>; + clock-names = "pcie_aux"; + clocks = <&prci PRCI_CLK_PCIE_AUX>; + pwren-gpios = <&gpio 5 0>; + reset-gpios = <&gpio 8 0>; + resets = <&prci 4>; + status = "okay"; + }; }; }; diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S index 47a5003c2e28..62d94696a19c 100644 --- a/arch/riscv/boot/loader.lds.S +++ b/arch/riscv/boot/loader.lds.S @@ -1,13 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/page.h> +#include <asm/pgtable.h> OUTPUT_ARCH(riscv) ENTRY(_start) SECTIONS { - . = PAGE_OFFSET; + . = KERNEL_LINK_ADDR; .payload : { *(.payload) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 6c0625aa96c7..1f2be234b11c 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -16,6 +16,7 @@ CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y +CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_JUMP_LABEL=y @@ -82,6 +83,9 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC=y CONFIG_MMC_SPI=y CONFIG_RTC_CLASS=y diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile new file mode 100644 index 000000000000..b8f8740a3e44 --- /dev/null +++ b/arch/riscv/errata/Makefile @@ -0,0 +1,2 @@ +obj-y += alternative.o +obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c new file mode 100644 index 000000000000..3b15885db70b --- /dev/null +++ b/arch/riscv/errata/alternative.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * alternative runtime patching + * inspired by the ARM64 and x86 version + * + * Copyright (C) 2021 Sifive. + */ + +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/uaccess.h> +#include <asm/alternative.h> +#include <asm/sections.h> +#include <asm/vendorid_list.h> +#include <asm/sbi.h> +#include <asm/csr.h> + +static struct cpu_manufacturer_info_t { + unsigned long vendor_id; + unsigned long arch_id; + unsigned long imp_id; +} cpu_mfr_info; + +static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +static inline void __init riscv_fill_cpu_mfr_info(void) +{ +#ifdef CONFIG_RISCV_M_MODE + cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID); + cpu_mfr_info.arch_id = csr_read(CSR_MARCHID); + cpu_mfr_info.imp_id = csr_read(CSR_MIMPID); +#else + cpu_mfr_info.vendor_id = sbi_get_mvendorid(); + cpu_mfr_info.arch_id = sbi_get_marchid(); + cpu_mfr_info.imp_id = sbi_get_mimpid(); +#endif +} + +static void __init init_alternative(void) +{ + riscv_fill_cpu_mfr_info(); + + switch (cpu_mfr_info.vendor_id) { +#ifdef CONFIG_ERRATA_SIFIVE + case SIFIVE_VENDOR_ID: + vendor_patch_func = sifive_errata_patch_func; + break; +#endif + default: + vendor_patch_func = NULL; + } +} + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + init_alternative(); + + if (!vendor_patch_func) + return; + + vendor_patch_func((struct alt_entry *)__alt_start, + (struct alt_entry *)__alt_end, + cpu_mfr_info.arch_id, cpu_mfr_info.imp_id); +} + diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile new file mode 100644 index 000000000000..bdd5fc843b8e --- /dev/null +++ b/arch/riscv/errata/sifive/Makefile @@ -0,0 +1,2 @@ +obj-y += errata_cip_453.o +obj-y += errata.o diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c new file mode 100644 index 000000000000..f5e5ae70e829 --- /dev/null +++ b/arch/riscv/errata/sifive/errata.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Sifive. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/bug.h> +#include <asm/patch.h> +#include <asm/alternative.h> +#include <asm/vendorid_list.h> +#include <asm/errata_list.h> + +struct errata_info_t { + char name[ERRATA_STRING_LENGTH_MAX]; + bool (*check_func)(unsigned long arch_id, unsigned long impid); +}; + +static bool errata_cip_453_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 + * Implement ID: 0x20181004 <= impid <= 0x20191105 + */ + if (arch_id != 0x8000000000000007 || + (impid < 0x20181004 || impid > 0x20191105)) + return false; + return true; +} + +static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 or 0x1 + * Implement ID: mimpid[23:0] <= 0x200630 and mimpid != 0x01200626 + */ + if (arch_id != 0x8000000000000007 && arch_id != 0x1) + return false; + if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626) + return false; + return true; +} + +static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = { + { + .name = "cip-453", + .check_func = errata_cip_453_check_func + }, + { + .name = "cip-1200", + .check_func = errata_cip_1200_check_func + }, +}; + +static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid) +{ + int idx; + u32 cpu_req_errata = 0; + + for (idx = 0; idx < ERRATA_SIFIVE_NUMBER; idx++) + if (errata_list[idx].check_func(archid, impid)) + cpu_req_errata |= (1U << idx); + + return cpu_req_errata; +} + +static void __init warn_miss_errata(u32 miss_errata) +{ + int i; + + pr_warn("----------------------------------------------------------------\n"); + pr_warn("WARNING: Missing the following errata may cause potential issues\n"); + for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++) + if (miss_errata & 0x1 << i) + pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name); + pr_warn("Please enable the corresponding Kconfig to apply them\n"); + pr_warn("----------------------------------------------------------------\n"); +} + +void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid) +{ + struct alt_entry *alt; + u32 cpu_req_errata = sifive_errata_probe(archid, impid); + u32 cpu_apply_errata = 0; + u32 tmp; + + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != SIFIVE_VENDOR_ID) + continue; + if (alt->errata_id >= ERRATA_SIFIVE_NUMBER) { + WARN(1, "This errata id:%d is not in kernel errata list", alt->errata_id); + continue; + } + + tmp = (1U << alt->errata_id); + if (cpu_req_errata & tmp) { + patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); + cpu_apply_errata |= tmp; + } + } + if (cpu_apply_errata != cpu_req_errata) + warn_miss_errata(cpu_req_errata - cpu_apply_errata); +} diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S new file mode 100644 index 000000000000..f1b9623fe1de --- /dev/null +++ b/arch/riscv/errata/sifive/errata_cip_453.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/alternative.h> + +.macro ADD_SIGN_EXT pt_reg badaddr tmp_reg + REG_L \badaddr, PT_BADADDR(\pt_reg) + li \tmp_reg,1 + slli \tmp_reg,\tmp_reg,0x26 + and \tmp_reg,\tmp_reg,\badaddr + beqz \tmp_reg, 1f + li \tmp_reg,-1 + slli \tmp_reg,\tmp_reg,0x27 + or \badaddr,\tmp_reg,\badaddr + REG_S \badaddr, PT_BADADDR(\pt_reg) +1: +.endm + +ENTRY(sifive_cip_453_page_fault_trp) + ADD_SIGN_EXT a0, t0, t1 +#ifdef CONFIG_MMU + la t0, do_page_fault +#else + la t0, do_trap_unknown +#endif + jr t0 +END(sifive_cip_453_page_fault_trp) + +ENTRY(sifive_cip_453_insn_fault_trp) + ADD_SIGN_EXT a0, t0, t1 + la t0, do_trap_insn_fault + jr t0 +END(sifive_cip_453_insn_fault_trp) diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h new file mode 100644 index 000000000000..88c08705f64a --- /dev/null +++ b/arch/riscv/include/asm/alternative-macros.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ALTERNATIVE_MACROS_H +#define __ASM_ALTERNATIVE_MACROS_H + +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + +#ifdef __ASSEMBLY__ + +.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len + RISCV_PTR \oldptr + RISCV_PTR \newptr + REG_ASM \vendor_id + REG_ASM \new_len + .word \errata_id +.endm + +.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg + .if \enable + .pushsection .alternative, "a" + ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + .popsection + .subsection 1 +888 : + \new_c +889 : + .previous + .org . - (889b - 888b) + (887b - 886b) + .org . - (887b - 886b) + (889b - 888b) + .endif +.endm + +.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +886 : + \old_c +887 : + ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) + +#else /* !__ASSEMBLY__ */ + +#include <asm/asm.h> +#include <linux/stringify.h> + +#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ + RISCV_PTR " " oldptr "\n" \ + RISCV_PTR " " newptr "\n" \ + REG_ASM " " vendor_id "\n" \ + REG_ASM " " newlen "\n" \ + ".word " errata_id "\n" + +#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \ + ".if " __stringify(enable) " == 1\n" \ + ".pushsection .alternative, \"a\"\n" \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ".popsection\n" \ + ".subsection 1\n" \ + "888 :\n" \ + new_c "\n" \ + "889 :\n" \ + ".previous\n" \ + ".org . - (887b - 886b) + (889b - 888b)\n" \ + ".org . - (889b - 888b) + (887b - 886b)\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ + "886 :\n" \ + old_c "\n" \ + "887 :\n" \ + ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/ +#ifdef __ASSEMBLY__ + +.macro __ALTERNATIVE_CFG old_c + \old_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c + +#else /* !__ASSEMBLY__ */ + +#define __ALTERNATIVE_CFG(old_c) \ + old_c "\n" + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c) + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */ +/* + * Usage: + * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * in the assembly code. Otherwise, + * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * + * old_content: The old content which is probably replaced with new content. + * new_content: The new content. + * vendor_id: The CPU vendor ID. + * errata_id: The errata ID. + * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old + * content will alwyas be executed. + */ +#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) + +/* + * A vendor wants to replace an old_content, but another vendor has used + * ALTERNATIVE() to patch its customized content at the same location. In + * this case, this vendor can create a new macro ALTERNATIVE_2() based + * on the following sample code and then replace ALTERNATIVE() with + * ALTERNATIVE_2() to append its customized content. + * + * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + * new_c_2, vendor_id_2, errata_id_2, enable_2 + * 886 : + * \old_c + * 887 : + * ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 + * ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 + * .endm + * + * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \ + * new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \ + * + * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) + * + */ +#endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h new file mode 100644 index 000000000000..e625d3cafbed --- /dev/null +++ b/arch/riscv/include/asm/alternative.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ + +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#define ERRATA_STRING_LENGTH_MAX 32 + +#include <asm/alternative-macros.h> + +#ifndef __ASSEMBLY__ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <asm/hwcap.h> + +void __init apply_boot_alternatives(void); + +struct alt_entry { + void *old_ptr; /* address of original instruciton or data */ + void *alt_ptr; /* address of replacement instruction or data */ + unsigned long vendor_id; /* cpu vendor id */ + unsigned long alt_len; /* The replacement size */ + unsigned int errata_id; /* The errata id */ +} __packed; + +struct errata_checkfunc_id { + unsigned long vendor_id; + bool (*func)(struct alt_entry *alt); +}; + +void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +#endif +#endif diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index 27e005fca584..2a652b0c987d 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -9,4 +9,20 @@ long long __lshrti3(long long a, int b); long long __ashrti3(long long a, int b); long long __ashlti3(long long a, int b); + +#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) + +DECLARE_DO_ERROR_INFO(do_trap_unknown); +DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_insn_fault); +DECLARE_DO_ERROR_INFO(do_trap_insn_illegal); +DECLARE_DO_ERROR_INFO(do_trap_load_fault); +DECLARE_DO_ERROR_INFO(do_trap_load_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_store_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_store_fault); +DECLARE_DO_ERROR_INFO(do_trap_ecall_u); +DECLARE_DO_ERROR_INFO(do_trap_ecall_s); +DECLARE_DO_ERROR_INFO(do_trap_ecall_m); +DECLARE_DO_ERROR_INFO(do_trap_break); + #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 9c992a88d858..618d7c5af1a2 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -23,6 +23,7 @@ #define REG_L __REG_SEL(ld, lw) #define REG_S __REG_SEL(sd, sw) #define REG_SC __REG_SEL(sc.d, sc.w) +#define REG_ASM __REG_SEL(.dword, .word) #define SZREG __REG_SEL(8, 4) #define LGREG __REG_SEL(3, 2) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index caadfc1d7487..87ac65696871 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -115,6 +115,9 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 #ifdef CONFIG_RISCV_M_MODE diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 5c725e1df58b..f4b490cd0e5d 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -81,4 +81,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); #endif /* CONFIG_MMU */ +#define ELF_CORE_COPY_REGS(dest, regs) \ +do { \ + *(struct user_regs_struct *)&(dest) = \ + *(struct user_regs_struct *)regs; \ +} while (0); + #endif /* _ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h new file mode 100644 index 000000000000..5f1046e82d9f --- /dev/null +++ b/arch/riscv/include/asm/errata_list.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ +#ifndef ASM_ERRATA_LIST_H +#define ASM_ERRATA_LIST_H + +#include <asm/alternative.h> +#include <asm/vendorid_list.h> + +#ifdef CONFIG_ERRATA_SIFIVE +#define ERRATA_SIFIVE_CIP_453 0 +#define ERRATA_SIFIVE_CIP_1200 1 +#define ERRATA_SIFIVE_NUMBER 2 +#endif + +#ifdef __ASSEMBLY__ + +#define ALT_INSN_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ + __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) + +#define ALT_PAGE_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ + __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) +#else /* !__ASSEMBLY__ */ + +#define ALT_FLUSH_TLB_PAGE(x) \ +asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr) : "memory") + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 845002cc2e57..04dad3380041 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -13,9 +13,19 @@ #endif #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR +/* + * Clang prior to 13 had "mcount" instead of "_mcount": + * https://reviews.llvm.org/D98881 + */ +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000 +#define MCOUNT_NAME _mcount +#else +#define MCOUNT_NAME mcount +#endif + #define ARCH_SUPPORTS_FTRACE_OPS 1 #ifndef __ASSEMBLY__ -void _mcount(void); +void MCOUNT_NAME(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; @@ -36,7 +46,7 @@ struct dyn_arch_ftrace { * both auipc and jalr at the same time. */ -#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME) #define JALR_SIGN_MASK (0x00000800) #define JALR_OFFSET_MASK (0x00000fff) #define AUIPC_OFFSET_MASK (0xfffff000) diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h index 9807ad164015..e4c435509983 100644 --- a/arch/riscv/include/asm/irq.h +++ b/arch/riscv/include/asm/irq.h @@ -12,4 +12,6 @@ #include <asm-generic/irq.h> +extern void __init init_IRQ(void); + #endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h new file mode 100644 index 000000000000..1e954101906a --- /dev/null +++ b/arch/riscv/include/asm/kexec.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#ifndef _RISCV_KEXEC_H +#define _RISCV_KEXEC_H + +#include <asm/page.h> /* For PAGE_SIZE */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +#define KEXEC_ARCH KEXEC_ARCH_RISCV + +extern void riscv_crash_save_regs(struct pt_regs *newregs); + +static inline void +crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(struct pt_regs)); + else + riscv_crash_save_regs(newregs); +} + + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long fdt_addr; +}; + +const extern unsigned char riscv_kexec_relocate[]; +const extern unsigned int riscv_kexec_relocate_size; + +typedef void (*riscv_kexec_method)(unsigned long first_ind_entry, + unsigned long jump_addr, + unsigned long fdt_addr, + unsigned long hartid, + unsigned long va_pa_off); + +extern riscv_kexec_method riscv_kexec_norelocate; + +#endif diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index adc9d26f3d75..6a7761c86ec2 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -90,15 +90,58 @@ typedef struct page *pgtable_t; #ifdef CONFIG_MMU extern unsigned long va_pa_offset; +#ifdef CONFIG_64BIT +extern unsigned long va_kernel_pa_offset; +#endif +#ifdef CONFIG_XIP_KERNEL +extern unsigned long va_kernel_xip_pa_offset; +#endif extern unsigned long pfn_base; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 +#ifdef CONFIG_64BIT +#define va_kernel_pa_offset 0 +#endif #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ -#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) -#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +extern unsigned long kernel_virt_addr; + +#ifdef CONFIG_64BIT +#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = y; \ + (_y >= CONFIG_PHYS_RAM_BASE) ? \ + (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ + (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ + }) +#else +#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) +#endif +#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) + +#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_va_to_pa(y) ({ \ + unsigned long _y = y; \ + (_y < kernel_virt_addr + XIP_OFFSET) ? \ + ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ + ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ + }) +#else +#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) +#endif +#define __va_to_pa_nodebug(x) ({ \ + unsigned long _x = x; \ + (_x < kernel_virt_addr) ? \ + linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ + }) +#else +#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) +#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +#endif #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __virt_to_phys(unsigned long x); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index ebf817c1bdf4..9469f464e71a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -11,23 +11,38 @@ #include <asm/pgtable-bits.h> -#ifndef __ASSEMBLY__ +#ifndef CONFIG_MMU +#define KERNEL_LINK_ADDR PAGE_OFFSET +#else -/* Page Upper Directory not used in RISC-V */ -#include <asm-generic/pgtable-nopud.h> -#include <asm/page.h> -#include <asm/tlbflush.h> -#include <linux/mm_types.h> +#define ADDRESS_SPACE_END (UL(-1)) -#ifdef CONFIG_MMU +#ifdef CONFIG_64BIT +/* Leave 2GB for kernel and BPF at the end of the address space */ +#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) +#else +#define KERNEL_LINK_ADDR PAGE_OFFSET +#endif #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) +#ifdef CONFIG_64BIT +/* KASLR should leave at least 128MB for BPF after the kernel */ +#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end) +#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) +#else #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) +#endif + +/* Modules always live before the kernel */ +#ifdef CONFIG_64BIT +#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) +#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#endif /* * Roughly size the vmemmap space to be large enough to fit enough @@ -60,12 +75,35 @@ #endif +#ifdef CONFIG_XIP_KERNEL +#define XIP_OFFSET SZ_8M +#endif + +#ifndef __ASSEMBLY__ + +/* Page Upper Directory not used in RISC-V */ +#include <asm-generic/pgtable-nopud.h> +#include <asm/page.h> +#include <asm/tlbflush.h> +#include <linux/mm_types.h> + #ifdef CONFIG_64BIT #include <asm/pgtable-64.h> #else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ +#ifdef CONFIG_XIP_KERNEL +#define XIP_FIXUP(addr) ({ \ + uintptr_t __a = (uintptr_t)(addr); \ + (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \ + __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ + __a; \ + }) +#else +#define XIP_FIXUP(addr) (addr) +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU /* Number of entries in the page global directory */ #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) @@ -484,8 +522,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) /* FIXME */ -extern void *dtb_early_va; -extern uintptr_t dtb_early_pa; +extern char _start[]; +extern void *_dtb_early_va; +extern uintptr_t _dtb_early_pa; +#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) +#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) +#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) +#else +#define dtb_early_va _dtb_early_va +#define dtb_early_pa _dtb_early_pa +#endif /* CONFIG_XIP_KERNEL */ + void setup_bootmem(void); void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 3a240037bde2..021ed64ee608 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -71,6 +71,7 @@ int riscv_of_processor_hartid(struct device_node *node); int riscv_of_parent_hartid(struct device_node *node); extern void riscv_fill_hwcap(void); +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index cb4abb639e8d..09ad4e923510 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -119,6 +119,11 @@ extern int regs_query_register_offset(const char *name); extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer); +int do_syscall_trace_enter(struct pt_regs *regs); +void do_syscall_trace_exit(struct pt_regs *regs); + /** * regs_get_register() - get register value from its offset * @regs: pt_regs from which register value is gotten diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 99895d9c3bdd..0d42693cb65e 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid { SBI_EXT_RFENCE_REMOTE_FENCE_I = 0, SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, - SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, - SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, + SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, + SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, }; enum sbi_ext_hsm_fid { @@ -97,6 +97,9 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, void sbi_console_putchar(int ch); int sbi_console_getchar(void); +long sbi_get_mvendorid(void); +long sbi_get_marchid(void); +long sbi_get_mimpid(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_clear_ipi(void); diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 1595c5b60cfd..8a303fb1ee3b 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -11,5 +11,6 @@ extern char _start[]; extern char _start_kernel[]; extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; +extern char __alt_start[], __alt_end[]; #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 6887b3d9f371..086f757e8ba3 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -26,6 +26,12 @@ static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } #endif +#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) +void protect_kernel_linear_mapping_text_rodata(void); +#else +static inline void protect_kernel_linear_mapping_text_rodata(void) {} +#endif + int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); bool kernel_page_present(struct page *page); diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index df1f7c4cd433..a7d2811f3536 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -46,7 +46,7 @@ int riscv_hartid_to_cpuid(int hartid); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); /* Set custom IPI operations */ -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops); +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); /* Clear IPI for current CPU */ void riscv_clear_ipi(void); @@ -92,7 +92,7 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, cpumask_set_cpu(boot_cpu_hartid, out); } -static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { } diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index 5477e7ecb6e1..909049366555 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -23,5 +23,10 @@ extern asmlinkage void *__memmove(void *, const void *, size_t); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memset(s, c, n) __memset(s, c, n) #define memmove(dst, src, len) __memmove(dst, src, len) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #endif /* _ASM_RISCV_STRING_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 49350c8bd7b0..b933b1583c9f 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -15,7 +15,7 @@ #include <linux/err.h> /* The array of function pointers for syscalls. */ -extern void *sys_call_table[]; +extern void * const sys_call_table[]; /* * Only the low 32 bits of orig_r0 are meaningful, so we return int. diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index 81de51e6aa32..507cae273bc6 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -88,4 +88,6 @@ static inline int read_current_timer(unsigned long *timer_val) return 0; } +extern void time_init(void); + #endif /* _ASM_RISCV_TIMEX_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 394cfbccdcd9..c84218ad7afc 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -9,6 +9,7 @@ #include <linux/mm_types.h> #include <asm/smp.h> +#include <asm/errata_list.h> #ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) @@ -19,7 +20,7 @@ static inline void local_flush_tlb_all(void) /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); + ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 824b2c9da75b..f314ff44c48d 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -306,7 +306,9 @@ do { \ * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. + * to the result of dereferencing @ptr. The value of @x is copied to avoid + * re-ordering where @x is evaluated inside the block that enables user-space + * access (thus bypassing user space protection if @x is a function). * * Caller must check the pointer with access_ok() before calling this * function. @@ -316,12 +318,13 @@ do { \ #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*__gu_ptr) __val = (x); \ long __pu_err = 0; \ \ __chk_user_ptr(__gu_ptr); \ \ __enable_user_access(); \ - __put_user_nocheck(x, __gu_ptr, __pu_err); \ + __put_user_nocheck(__val, __gu_ptr, __pu_err); \ __disable_user_access(); \ \ __pu_err; \ @@ -372,7 +375,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) extern long strncpy_from_user(char *dest, const char __user *src, long count); -extern long __must_check strlen_user(const char __user *str); extern long __must_check strnlen_user(const char __user *str, long n); extern diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h new file mode 100644 index 000000000000..9d934215b3c8 --- /dev/null +++ b/arch/riscv/include/asm/vendorid_list.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ +#ifndef ASM_VENDOR_LIST_H +#define ASM_VENDOR_LIST_H + +#define SIFIVE_VENDOR_ID 0x489 + +#endif diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 3dc0abde988a..d3081e4d9600 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -8,6 +8,11 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) endif +CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) + +ifdef CONFIG_KEXEC +AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax +endif extra-y += head.o extra-y += vmlinux.lds @@ -54,6 +59,8 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c new file mode 100644 index 000000000000..86cc0ada5752 --- /dev/null +++ b/arch/riscv/kernel/crash_dump.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code comes from arch/arm64/kernel/crash_dump.c + * Created by: AKASHI Takahiro <takahiro.akashi@linaro.org> + * Copyright (C) 2017 Linaro Limited + */ + +#include <linux/crash_dump.h> +#include <linux/io.h> + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is in a user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { + memunmap(vaddr); + return -EFAULT; + } + } else + memcpy(buf, vaddr + offset, csize); + + memunmap(vaddr); + return csize; +} diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S new file mode 100644 index 000000000000..7832fb763aba --- /dev/null +++ b/arch/riscv/kernel/crash_save_regs.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/asm-offsets.h> /* For offsets on pt_regs */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".text" +SYM_CODE_START(riscv_crash_save_regs) + REG_S ra, PT_RA(a0) /* x1 */ + REG_S sp, PT_SP(a0) /* x2 */ + REG_S gp, PT_GP(a0) /* x3 */ + REG_S tp, PT_TP(a0) /* x4 */ + REG_S t0, PT_T0(a0) /* x5 */ + REG_S t1, PT_T1(a0) /* x6 */ + REG_S t2, PT_T2(a0) /* x7 */ + REG_S s0, PT_S0(a0) /* x8/fp */ + REG_S s1, PT_S1(a0) /* x9 */ + REG_S a0, PT_A0(a0) /* x10 */ + REG_S a1, PT_A1(a0) /* x11 */ + REG_S a2, PT_A2(a0) /* x12 */ + REG_S a3, PT_A3(a0) /* x13 */ + REG_S a4, PT_A4(a0) /* x14 */ + REG_S a5, PT_A5(a0) /* x15 */ + REG_S a6, PT_A6(a0) /* x16 */ + REG_S a7, PT_A7(a0) /* x17 */ + REG_S s2, PT_S2(a0) /* x18 */ + REG_S s3, PT_S3(a0) /* x19 */ + REG_S s4, PT_S4(a0) /* x20 */ + REG_S s5, PT_S5(a0) /* x21 */ + REG_S s6, PT_S6(a0) /* x22 */ + REG_S s7, PT_S7(a0) /* x23 */ + REG_S s8, PT_S8(a0) /* x24 */ + REG_S s9, PT_S9(a0) /* x25 */ + REG_S s10, PT_S10(a0) /* x26 */ + REG_S s11, PT_S11(a0) /* x27 */ + REG_S t3, PT_T3(a0) /* x28 */ + REG_S t4, PT_T4(a0) /* x29 */ + REG_S t5, PT_T5(a0) /* x30 */ + REG_S t6, PT_T6(a0) /* x31 */ + + csrr t1, CSR_STATUS + csrr t2, CSR_EPC + csrr t3, CSR_TVAL + csrr t4, CSR_CAUSE + + REG_S t1, PT_STATUS(a0) + REG_S t2, PT_EPC(a0) + REG_S t3, PT_BADADDR(a0) + REG_S t4, PT_CAUSE(a0) + ret +SYM_CODE_END(riscv_crash_save_regs) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 744f3209c48d..80d5a9e017b0 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -12,6 +12,7 @@ #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/errata_list.h> #if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all @@ -130,6 +131,9 @@ skip_context_tracking: */ andi t0, s1, SR_PIE beqz t0, 1f + /* kprobes, entered via ebreak, must have interrupts disabled. */ + li t0, EXC_BREAKPOINT + beq s4, t0, 1f #ifdef CONFIG_TRACE_IRQFLAGS call trace_hardirqs_on #endif @@ -447,10 +451,11 @@ ENDPROC(__switch_to) #endif .section ".rodata" + .align LGREG /* Exception vector table */ ENTRY(excp_vect_table) RISCV_PTR do_trap_insn_misaligned - RISCV_PTR do_trap_insn_fault + ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal RISCV_PTR do_trap_break RISCV_PTR do_trap_load_misaligned @@ -461,7 +466,8 @@ ENTRY(excp_vect_table) RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m - RISCV_PTR do_page_fault /* instruction page fault */ + /* instruciton page fault */ + ALT_PAGE_FAULT(RISCV_PTR do_page_fault) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index f5a9bad86e58..89cc58ab52b4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -9,11 +9,23 @@ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/page.h> +#include <asm/pgtable.h> #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/image.h> #include "efi-header.S" +#ifdef CONFIG_XIP_KERNEL +.macro XIP_FIXUP_OFFSET reg + REG_L t0, _xip_fixup + add \reg, \reg, t0 +.endm +_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET +#else +.macro XIP_FIXUP_OFFSET reg +.endm +#endif /* CONFIG_XIP_KERNEL */ + __HEAD ENTRY(_start) /* @@ -69,7 +81,9 @@ pe_head_start: #ifdef CONFIG_MMU relocate: /* Relocate return address */ - li a1, PAGE_OFFSET + la a1, kernel_virt_addr + XIP_FIXUP_OFFSET a1 + REG_L a1, 0(a1) la a2, _start sub a1, a1, a2 add ra, ra, a1 @@ -91,6 +105,7 @@ relocate: * to ensure the new translations are in use. */ la a0, trampoline_pg_dir + XIP_FIXUP_OFFSET a0 srl a0, a0, PAGE_SHIFT or a0, a0, a1 sfence.vma @@ -144,7 +159,9 @@ secondary_start_sbi: slli a3, a0, LGREG la a4, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a4 la a5, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a5 add a4, a3, a4 add a5, a3, a5 REG_L sp, (a4) @@ -156,6 +173,7 @@ secondary_start_common: #ifdef CONFIG_MMU /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif call setup_trap_vector @@ -236,12 +254,33 @@ pmp_done: .Lgood_cores: #endif +#ifndef CONFIG_XIP_KERNEL /* Pick one hart to run the main boot sequence */ la a3, hart_lottery li a2, 1 amoadd.w a3, a2, (a3) bnez a3, .Lsecondary_start +#else + /* hart_lottery in flash contains a magic number */ + la a3, hart_lottery + mv a2, a3 + XIP_FIXUP_OFFSET a2 + lw t1, (a3) + amoswap.w t0, t1, (a2) + /* first time here if hart_lottery in RAM is not set */ + beq t0, t1, .Lsecondary_start + + la sp, _end + THREAD_SIZE + XIP_FIXUP_OFFSET sp + mv s0, a0 + call __copy_data + + /* Restore a0 copy */ + mv a0, s0 +#endif + +#ifndef CONFIG_XIP_KERNEL /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop @@ -251,15 +290,18 @@ clear_bss: add a3, a3, RISCV_SZPTR blt a3, a4, clear_bss clear_bss_done: - +#endif /* Save hart ID and DTB physical address */ mv s0, a0 mv s1, a1 + la a2, boot_cpu_hartid + XIP_FIXUP_OFFSET a2 REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE + XIP_FIXUP_OFFSET sp #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start #else @@ -268,6 +310,7 @@ clear_bss_done: call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif /* CONFIG_MMU */ @@ -292,7 +335,9 @@ clear_bss_done: slli a3, a0, LGREG la a1, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a1 la a2, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a2 add a1, a3, a1 add a2, a3, a2 diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h index b48dda3d04f6..aabbc3ac3e48 100644 --- a/arch/riscv/kernel/head.h +++ b/arch/riscv/kernel/head.h @@ -12,6 +12,9 @@ extern atomic_t hart_lottery; asmlinkage void do_page_fault(struct pt_regs *regs); asmlinkage void __init setup_vm(uintptr_t dtb_pa); +#ifdef CONFIG_XIP_KERNEL +asmlinkage void __init __copy_data(void); +#endif extern void *__cpu_up_stack_pointer[]; extern void *__cpu_up_task_pointer[]; diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S new file mode 100644 index 000000000000..88c3beabe9b4 --- /dev/null +++ b/arch/riscv/kernel/kexec_relocate.S @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/page.h> /* For PAGE_SIZE */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".rodata" +SYM_CODE_START(riscv_kexec_relocate) + + /* + * s0: Pointer to the current entry + * s1: (const) Phys address to jump to after relocation + * s2: (const) Phys address of the FDT image + * s3: (const) The hartid of the current hart + * s4: Pointer to the destination address for the relocation + * s5: (const) Number of words per page + * s6: (const) 1, used for subtraction + * s7: (const) va_pa_offset, used when switching MMU off + * s8: (const) Physical address of the main loop + * s9: (debug) indirection page counter + * s10: (debug) entry counter + * s11: (debug) copied words counter + */ + mv s0, a0 + mv s1, a1 + mv s2, a2 + mv s3, a3 + mv s4, zero + li s5, (PAGE_SIZE / RISCV_SZPTR) + li s6, 1 + mv s7, a4 + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* + * When we switch SATP.MODE to "Bare" we'll only + * play with physical addresses. However the first time + * we try to jump somewhere, the offset on the jump + * will be relative to pc which will still be on VA. To + * deal with this we set stvec to the physical address at + * the start of the loop below so that we jump there in + * any case. + */ + la s8, 1f + sub s8, s8, s7 + csrw CSR_STVEC, s8 + + /* Process entries in a loop */ +.align 2 +1: + addi s10, s10, 1 + REG_L t0, 0(s0) /* t0 = *image->entry */ + addi s0, s0, RISCV_SZPTR /* image->entry++ */ + + /* IND_DESTINATION entry ? -> save destination address */ + andi t1, t0, 0x1 + beqz t1, 2f + andi s4, t0, ~0x1 + j 1b + +2: + /* IND_INDIRECTION entry ? -> update next entry ptr (PA) */ + andi t1, t0, 0x2 + beqz t1, 2f + andi s0, t0, ~0x2 + addi s9, s9, 1 + csrw CSR_SATP, zero + jalr zero, s8, 0 + +2: + /* IND_DONE entry ? -> jump to done label */ + andi t1, t0, 0x4 + beqz t1, 2f + j 4f + +2: + /* + * IND_SOURCE entry ? -> copy page word by word to the + * destination address we got from IND_DESTINATION + */ + andi t1, t0, 0x8 + beqz t1, 1b /* Unknown entry type, ignore it */ + andi t0, t0, ~0x8 + mv t3, s5 /* i = num words per page */ +3: /* copy loop */ + REG_L t1, (t0) /* t1 = *src_ptr */ + REG_S t1, (s4) /* *dst_ptr = *src_ptr */ + addi t0, t0, RISCV_SZPTR /* stc_ptr++ */ + addi s4, s4, RISCV_SZPTR /* dst_ptr++ */ + sub t3, t3, s6 /* i-- */ + addi s11, s11, 1 /* c++ */ + beqz t3, 1b /* copy done ? */ + j 3b + +4: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s3 + mv a1, s2 + mv a2, s1 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + /* + * Make sure the relocated code is visible + * and jump to the new kernel + */ + fence.i + + jalr zero, a2, 0 + +SYM_CODE_END(riscv_kexec_relocate) +riscv_kexec_relocate_end: + + +/* Used for jumping to crashkernel */ +.section ".text" +SYM_CODE_START(riscv_kexec_norelocate) + /* + * s0: (const) Phys address to jump to + * s1: (const) Phys address of the FDT image + * s2: (const) The hartid of the current hart + * s3: (const) va_pa_offset, used when switching MMU off + */ + mv s0, a1 + mv s1, a2 + mv s2, a3 + mv s3, a4 + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* Switch to physical addressing */ + la s4, 1f + sub s4, s4, s3 + csrw CSR_STVEC, s4 + csrw CSR_SATP, zero + +.align 2 +1: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s2 + mv a1, s1 + mv a2, s0 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + jalr zero, a2, 0 +SYM_CODE_END(riscv_kexec_norelocate) + +.section ".rodata" +SYM_DATA(riscv_kexec_relocate_size, + .long riscv_kexec_relocate_end - riscv_kexec_relocate) + diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c new file mode 100644 index 000000000000..cc048143fba5 --- /dev/null +++ b/arch/riscv/kernel/machine_kexec.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#include <linux/kexec.h> +#include <asm/kexec.h> /* For riscv_kexec_* symbol defines */ +#include <linux/smp.h> /* For smp_send_stop () */ +#include <asm/cacheflush.h> /* For local_flush_icache_all() */ +#include <asm/barrier.h> /* For smp_wmb() */ +#include <asm/page.h> /* For PAGE_MASK */ +#include <linux/libfdt.h> /* For fdt_check_header() */ +#include <asm/set_memory.h> /* For set_memory_x() */ +#include <linux/compiler.h> /* For unreachable() */ +#include <linux/cpu.h> /* For cpu_down() */ + +/** + * kexec_image_info - Print received image details + */ +static void +kexec_image_info(const struct kimage *image) +{ + unsigned long i; + + pr_debug("Kexec image info:\n"); + pr_debug("\ttype: %d\n", image->type); + pr_debug("\tstart: %lx\n", image->start); + pr_debug("\thead: %lx\n", image->head); + pr_debug("\tnr_segments: %lu\n", image->nr_segments); + + for (i = 0; i < image->nr_segments; i++) { + pr_debug("\t segment[%lu]: %016lx - %016lx", i, + image->segment[i].mem, + image->segment[i].mem + image->segment[i].memsz); + pr_debug("\t\t0x%lx bytes, %lu pages\n", + (unsigned long) image->segment[i].memsz, + (unsigned long) image->segment[i].memsz / PAGE_SIZE); + } +} + +/** + * machine_kexec_prepare - Initialize kexec + * + * This function is called from do_kexec_load, when the user has + * provided us with an image to be loaded. Its goal is to validate + * the image and prepare the control code buffer as needed. + * Note that kimage_alloc_init has already been called and the + * control buffer has already been allocated. + */ +int +machine_kexec_prepare(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + struct fdt_header fdt = {0}; + void *control_code_buffer = NULL; + unsigned int control_code_buffer_sz = 0; + int i = 0; + + kexec_image_info(image); + + /* Find the Flattened Device Tree and save its physical address */ + for (i = 0; i < image->nr_segments; i++) { + if (image->segment[i].memsz <= sizeof(fdt)) + continue; + + if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) + continue; + + if (fdt_check_header(&fdt)) + continue; + + internal->fdt_addr = (unsigned long) image->segment[i].mem; + break; + } + + if (!internal->fdt_addr) { + pr_err("Device tree not included in the provided image\n"); + return -EINVAL; + } + + /* Copy the assembler code for relocation to the control page */ + if (image->type != KEXEC_TYPE_CRASH) { + control_code_buffer = page_address(image->control_code_page); + control_code_buffer_sz = page_size(image->control_code_page); + + if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) { + pr_err("Relocation code doesn't fit within a control page\n"); + return -EINVAL; + } + + memcpy(control_code_buffer, riscv_kexec_relocate, + riscv_kexec_relocate_size); + + /* Mark the control page executable */ + set_memory_x((unsigned long) control_code_buffer, 1); + } + + return 0; +} + + +/** + * machine_kexec_cleanup - Cleanup any leftovers from + * machine_kexec_prepare + * + * This function is called by kimage_free to handle any arch-specific + * allocations done on machine_kexec_prepare. Since we didn't do any + * allocations there, this is just an empty function. Note that the + * control buffer is freed by kimage_free. + */ +void +machine_kexec_cleanup(struct kimage *image) +{ +} + + +/* + * machine_shutdown - Prepare for a kexec reboot + * + * This function is called by kernel_kexec just before machine_kexec + * below. Its goal is to prepare the rest of the system (the other + * harts and possibly devices etc) for a kexec reboot. + */ +void machine_shutdown(void) +{ + /* + * No more interrupts on this hart + * until we are back up. + */ + local_irq_disable(); + +#if defined(CONFIG_HOTPLUG_CPU) + smp_shutdown_nonboot_cpus(smp_processor_id()); +#endif +} + +/** + * machine_crash_shutdown - Prepare to kexec after a kernel crash + * + * This function is called by crash_kexec just before machine_kexec + * below and its goal is similar to machine_shutdown, but in case of + * a kernel crash. Since we don't handle such cases yet, this function + * is empty. + */ +void +machine_crash_shutdown(struct pt_regs *regs) +{ + crash_save_cpu(regs, smp_processor_id()); + machine_shutdown(); + pr_info("Starting crashdump kernel...\n"); +} + +/** + * machine_kexec - Jump to the loaded kimage + * + * This function is called by kernel_kexec which is called by the + * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC, + * or by crash_kernel which is called by the kernel's arch-specific + * trap handler in case of a kernel panic. It's the final stage of + * the kexec process where the pre-loaded kimage is ready to be + * executed. We assume at this point that all other harts are + * suspended and this hart will be the new boot hart. + */ +void __noreturn +machine_kexec(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + unsigned long jump_addr = (unsigned long) image->start; + unsigned long first_ind_entry = (unsigned long) &image->head; + unsigned long this_hart_id = raw_smp_processor_id(); + unsigned long fdt_addr = internal->fdt_addr; + void *control_code_buffer = page_address(image->control_code_page); + riscv_kexec_method kexec_method = NULL; + + if (image->type != KEXEC_TYPE_CRASH) + kexec_method = control_code_buffer; + else + kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate; + + pr_notice("Will call new kernel at %08lx from hart id %lx\n", + jump_addr, this_hart_id); + pr_notice("FDT image at %08lx\n", fdt_addr); + + /* Make sure the relocation code is visible to the hart */ + local_flush_icache_all(); + + /* Jump to the relocation code */ + pr_notice("Bye...\n"); + kexec_method(first_ind_entry, jump_addr, fdt_addr, + this_hart_id, va_pa_offset); + unreachable(); +} diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 8a5593ff9ff3..6d462681c9c0 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -47,8 +47,8 @@ ENTRY(ftrace_stub) #ifdef CONFIG_DYNAMIC_FTRACE - .global _mcount - .set _mcount, ftrace_stub + .global MCOUNT_NAME + .set MCOUNT_NAME, ftrace_stub #endif ret ENDPROC(ftrace_stub) @@ -78,7 +78,7 @@ ENDPROC(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE -ENTRY(_mcount) +ENTRY(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return @@ -124,6 +124,6 @@ do_trace: jalr t5 RESTORE_ABI_STATE ret -ENDPROC(_mcount) +ENDPROC(MCOUNT_NAME) #endif -EXPORT_SYMBOL(_mcount) +EXPORT_SYMBOL(MCOUNT_NAME) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 104fba889cf7..68a9e3d1fe16 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -408,13 +408,11 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) -#define VMALLOC_MODULE_START \ - max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START) void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START, - VMALLOC_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + return __vmalloc_node_range(size, 1, MODULES_VADDR, + MODULES_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c index e6372490aa0b..aab85a82f419 100644 --- a/arch/riscv/kernel/probes/ftrace.c +++ b/arch/riscv/kernel/probes/ftrace.c @@ -2,39 +2,47 @@ #include <linux/kprobes.h> -/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +/* Ftrace callback handler for kprobes -- called under preepmt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct ftrace_regs *regs) + struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct kprobe *p; + struct pt_regs *regs; struct kprobe_ctlblk *kcb; + int bit; + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) - return; + goto out; + regs = ftrace_get_regs(fregs); kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { - unsigned long orig_ip = instruction_pointer(&(regs->regs)); + unsigned long orig_ip = instruction_pointer(regs); - instruction_pointer_set(&(regs->regs), ip); + instruction_pointer_set(regs, ip); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) { + if (!p->pre_handler || !p->pre_handler(p, regs)) { /* * Emulate singlestep (and also recover regs->pc) * as if there is a nop */ - instruction_pointer_set(&(regs->regs), + instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE); if (unlikely(p->post_handler)) { kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, &(regs->regs), 0); + p->post_handler(p, regs, 0); } - instruction_pointer_set(&(regs->regs), orig_ip); + instruction_pointer_set(regs, orig_ip); } /* @@ -43,6 +51,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, */ __this_cpu_write(current_kprobe, NULL); } +out: + preempt_enable_notrace(); + ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index a2ec18662fee..10b965c34536 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -84,6 +84,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } +void *alloc_insn_page(void) +{ + return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_READ_EXEC, + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, + __builtin_return_address(0)); +} + /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { @@ -256,13 +264,14 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) * normal page fault. */ regs->epc = (unsigned long) cur->addr; - if (!instruction_pointer(regs)) - BUG(); + BUG_ON(!instruction_pointer(regs)); if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); - else + else { + kprobes_restore_local_irqflag(kcb, regs); reset_current_kprobe(); + } break; case KPROBE_HIT_ACTIVE: diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 6f728e731bed..f9cd57c9c67d 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -10,6 +10,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/tick.h> #include <linux/ptrace.h> diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index f4a7db3d309e..7402a417f38e 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -11,14 +11,14 @@ #include <asm/smp.h> /* default SBI version is 0.1 */ -unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT; +unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); -static void (*__sbi_set_timer)(uint64_t stime); -static int (*__sbi_send_ipi)(const unsigned long *hart_mask); +static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; +static int (*__sbi_send_ipi)(const unsigned long *hart_mask) __ro_after_init; static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask, unsigned long start, unsigned long size, - unsigned long arg4, unsigned long arg5); + unsigned long arg4, unsigned long arg5) __ro_after_init; struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, @@ -116,7 +116,7 @@ void sbi_clear_ipi(void) EXPORT_SYMBOL(sbi_clear_ipi); /** - * sbi_set_timer_v01() - Program the timer for next timer event. + * __sbi_set_timer_v01() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. * * Return: None @@ -547,6 +547,21 @@ static inline long sbi_get_firmware_version(void) return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } +long sbi_get_mvendorid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); +} + +long sbi_get_marchid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); +} + +long sbi_get_mimpid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); +} + static void sbi_send_cpumask_ipi(const struct cpumask *target) { struct cpumask hartid_mask; @@ -556,7 +571,7 @@ static void sbi_send_cpumask_ipi(const struct cpumask *target) sbi_send_ipi(cpumask_bits(&hartid_mask)); } -static struct riscv_ipi_ops sbi_ipi_ops = { +static const struct riscv_ipi_ops sbi_ipi_ops = { .ipi_inject = sbi_send_cpumask_ipi }; @@ -577,19 +592,19 @@ void __init sbi_init(void) sbi_get_firmware_id(), sbi_get_firmware_version()); if (sbi_probe_extension(SBI_EXT_TIME) > 0) { __sbi_set_timer = __sbi_set_timer_v02; - pr_info("SBI v0.2 TIME extension detected\n"); + pr_info("SBI TIME extension detected\n"); } else { __sbi_set_timer = __sbi_set_timer_v01; } if (sbi_probe_extension(SBI_EXT_IPI) > 0) { __sbi_send_ipi = __sbi_send_ipi_v02; - pr_info("SBI v0.2 IPI extension detected\n"); + pr_info("SBI IPI extension detected\n"); } else { __sbi_send_ipi = __sbi_send_ipi_v01; } if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) { __sbi_rfence = __sbi_rfence_v02; - pr_info("SBI v0.2 RFENCE extension detected\n"); + pr_info("SBI RFENCE extension detected\n"); } else { __sbi_rfence = __sbi_rfence_v01; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index e85bacff1b50..03901d3a8b02 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -20,9 +20,11 @@ #include <linux/swiotlb.h> #include <linux/smp.h> #include <linux/efi.h> +#include <linux/crash_dump.h> #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> +#include <asm/pgtable.h> #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/sections.h> @@ -50,7 +52,11 @@ struct screen_info screen_info __section(".data") = { * This is used before the kernel initializes the BSS so it can't be in the * BSS. */ -atomic_t hart_lottery __section(".sdata"); +atomic_t hart_lottery __section(".sdata") +#ifdef CONFIG_XIP_KERNEL += ATOMIC_INIT(0xC001BEEF) +#endif +; unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -60,10 +66,14 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); * also add "System RAM" regions for compatibility with other * archs, and the rest of the known regions for completeness. */ +static struct resource kimage_res = { .name = "Kernel image", }; static struct resource code_res = { .name = "Kernel code", }; static struct resource data_res = { .name = "Kernel data", }; static struct resource rodata_res = { .name = "Kernel rodata", }; static struct resource bss_res = { .name = "Kernel bss", }; +#ifdef CONFIG_CRASH_DUMP +static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; +#endif static int __init add_resource(struct resource *parent, struct resource *res) @@ -80,45 +90,54 @@ static int __init add_resource(struct resource *parent, return 1; } -static int __init add_kernel_resources(struct resource *res) +static int __init add_kernel_resources(void) { int ret = 0; /* * The memory region of the kernel image is continuous and - * was reserved on setup_bootmem, find it here and register - * it as a resource, then register the various segments of - * the image as child nodes + * was reserved on setup_bootmem, register it here as a + * resource, with the various segments of the image as + * child nodes. */ - if (!(res->start <= code_res.start && res->end >= data_res.end)) - return 0; - res->name = "Kernel image"; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + code_res.start = __pa_symbol(_text); + code_res.end = __pa_symbol(_etext) - 1; + code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - /* - * We removed a part of this region on setup_bootmem so - * we need to expand the resource for the bss to fit in. - */ - res->end = bss_res.end; + rodata_res.start = __pa_symbol(__start_rodata); + rodata_res.end = __pa_symbol(__end_rodata) - 1; + rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + data_res.start = __pa_symbol(_data); + data_res.end = __pa_symbol(_edata) - 1; + data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - ret = add_resource(&iomem_resource, res); + bss_res.start = __pa_symbol(__bss_start); + bss_res.end = __pa_symbol(__bss_stop) - 1; + bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + kimage_res.start = code_res.start; + kimage_res.end = bss_res.end; + kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + ret = add_resource(&iomem_resource, &kimage_res); if (ret < 0) return ret; - ret = add_resource(res, &code_res); + ret = add_resource(&kimage_res, &code_res); if (ret < 0) return ret; - ret = add_resource(res, &rodata_res); + ret = add_resource(&kimage_res, &rodata_res); if (ret < 0) return ret; - ret = add_resource(res, &data_res); + ret = add_resource(&kimage_res, &data_res); if (ret < 0) return ret; - ret = add_resource(res, &bss_res); + ret = add_resource(&kimage_res, &bss_res); return ret; } @@ -129,53 +148,59 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int ret = 0, i = 0; - - code_res.start = __pa_symbol(_text); - code_res.end = __pa_symbol(_etext) - 1; - code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - rodata_res.start = __pa_symbol(__start_rodata); - rodata_res.end = __pa_symbol(__end_rodata) - 1; - rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - data_res.start = __pa_symbol(_data); - data_res.end = __pa_symbol(_edata) - 1; - data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + int num_resources = 0, res_idx = 0; + int ret = 0; - bss_res.start = __pa_symbol(__bss_start); - bss_res.end = __pa_symbol(__bss_stop) - 1; - bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ + num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; + res_idx = num_resources - 1; - mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res); + mem_res_sz = num_resources * sizeof(*mem_res); mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); if (!mem_res) panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); + /* * Start by adding the reserved regions, if they overlap * with /memory regions, insert_resource later on will take * care of it. */ + ret = add_kernel_resources(); + if (ret < 0) + goto error; + +#ifdef CONFIG_KEXEC_CORE + if (crashk_res.start != crashk_res.end) { + ret = add_resource(&iomem_resource, &crashk_res); + if (ret < 0) + goto error; + } +#endif + +#ifdef CONFIG_CRASH_DUMP + if (elfcorehdr_size > 0) { + elfcorehdr_res.start = elfcorehdr_addr; + elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; + elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + add_resource(&iomem_resource, &elfcorehdr_res); + } +#endif + for_each_reserved_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; res->name = "Reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; - ret = add_kernel_resources(res); - if (ret < 0) - goto error; - else if (ret) - continue; - /* * Ignore any other reserved regions within * system memory. */ if (memblock_is_memory(res->start)) { - memblock_free((phys_addr_t) res, sizeof(struct resource)); + /* Re-use this pre-allocated resource */ + res_idx++; continue; } @@ -186,7 +211,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -204,6 +229,9 @@ static void __init init_resources(void) goto error; } + /* Clean-up any unused pre-allocated resources */ + mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); + memblock_free((phys_addr_t) mem_res, mem_res_sz); return; error: @@ -250,21 +278,24 @@ void __init setup_arch(char **cmdline_p) efi_init(); setup_bootmem(); paging_init(); - init_resources(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else - if (early_init_dt_verify(__va(dtb_early_pa))) + if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) unflatten_device_tree(); else pr_err("No DTB found in kernel mappings\n"); #endif misc_mem_init(); + init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { protect_kernel_text_data(); + protect_kernel_linear_mapping_text_rodata(); + } + #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index ea028d9e0d24..921d9d7df400 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -9,6 +9,7 @@ */ #include <linux/cpu.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/profile.h> @@ -27,10 +28,11 @@ enum ipi_message_type { IPI_CALL_FUNC, IPI_CPU_STOP, IPI_IRQ_WORK, + IPI_TIMER, IPI_MAX }; -unsigned long __cpuid_to_hartid_map[NR_CPUS] = { +unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; @@ -54,7 +56,7 @@ int riscv_hartid_to_cpuid(int hartid) return i; pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); - return i; + return -ENOENT; } void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) @@ -85,9 +87,9 @@ static void ipi_stop(void) wait_for_interrupt(); } -static struct riscv_ipi_ops *ipi_ops; +static const struct riscv_ipi_ops *ipi_ops __ro_after_init; -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { ipi_ops = ops; } @@ -176,6 +178,12 @@ void handle_IPI(struct pt_regs *regs) irq_work_run(); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + if (ops & (1 << IPI_TIMER)) { + stats[IPI_TIMER]++; + tick_receive_broadcast(); + } +#endif BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ @@ -192,6 +200,7 @@ static const char * const ipi_names[] = { [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) @@ -217,6 +226,13 @@ void arch_send_call_function_single_ipi(int cpu) send_ipi_single(cpu, IPI_CALL_FUNC); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + send_ipi_mask(mask, IPI_TIMER); +} +#endif + void smp_send_stop(void) { unsigned long timeout; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 5e276c25646f..9a408e2942ac 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -32,6 +32,7 @@ #include <asm/sections.h> #include <asm/sbi.h> #include <asm/smp.h> +#include <asm/alternative.h> #include "head.h" @@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { init_cpu_topology(); +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + apply_boot_alternatives(); +#endif } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 3f893c9d9d85..2b3e0cb90d78 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -14,7 +14,7 @@ #include <asm/stacktrace.h> -register const unsigned long sp_in_global __asm__("sp"); +register unsigned long sp_in_global __asm__("sp"); #ifdef CONFIG_FRAME_POINTER diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c index f1ead9df96ca..a63c667c27b3 100644 --- a/arch/riscv/kernel/syscall_table.c +++ b/arch/riscv/kernel/syscall_table.c @@ -13,7 +13,7 @@ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), -void *sys_call_table[__NR_syscalls] = { +void * const sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 8a5cf99c0776..8217b0f67c6c 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -9,8 +9,9 @@ #include <linux/delay.h> #include <asm/sbi.h> #include <asm/processor.h> +#include <asm/timex.h> -unsigned long riscv_timebase; +unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); void __init time_init(void) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 3ed2c23601a0..0721b9798595 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/irq.h> +#include <asm/asm-prototypes.h> #include <asm/bug.h> #include <asm/processor.h> #include <asm/ptrace.h> @@ -24,8 +25,6 @@ int show_unhandled_signals = 1; -extern asmlinkage void handle_exception(void); - static DEFINE_SPINLOCK(die_lock); void die(struct pt_regs *regs, const char *str) @@ -177,6 +176,7 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs) else die(regs, "Kernel BUG"); } +NOKPROBE_SYMBOL(do_trap_break); #ifdef CONFIG_GENERIC_BUG int is_valid_bugaddr(unsigned long pc) @@ -195,6 +195,6 @@ int is_valid_bugaddr(unsigned long pc) #endif /* CONFIG_GENERIC_BUG */ /* stvec & scratch is already set from head.S */ -void trap_init(void) +void __init trap_init(void) { } diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 3f1d35e7c98a..25a3b8849599 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -20,8 +20,8 @@ extern char vdso_start[], vdso_end[]; -static unsigned int vdso_pages; -static struct page **vdso_pagelist; +static unsigned int vdso_pages __ro_after_init; +static struct page **vdso_pagelist __ro_after_init; /* * The vDSO data page. diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 71a315e73cbe..24d936c147cd 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),) endif # Build rules -targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o @@ -41,11 +41,10 @@ KASAN_SANITIZE := n $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first -SYSCFLAGS_vdso.so.dbg = $(c_flags) -$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--build-id=sha1 -Wl,--hash-style=both +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \ + --build-id=sha1 --hash-style=both --eh-frame-hdr # We also create a special relocatable object that should mirror the symbol # table and layout of the linked DSO. With ld --just-symbols we can then @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ - -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ - $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ + $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ rm $@.tmp # Extracts symbol offsets from the VDSO, converting them into an assembly file diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S new file mode 100644 index 000000000000..4b29b9917f99 --- /dev/null +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + * Copyright (C) 2020 Vitaly Wool, Konsulko AB + */ + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> + +OUTPUT_ARCH(riscv) +ENTRY(_start) + +jiffies = jiffies_64; + +SECTIONS +{ + /* Beginning of code and text segment */ + . = LOAD_OFFSET; + _xiprom = .; + _start = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + + .text : { + _text = .; + _stext = .; + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + ENTRY_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + _etext = .; + } + RO_DATA(L1_CACHE_BYTES) + .srodata : { + *(.srodata*) + } + .init.rodata : { + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + INIT_RAM_FS + } + _exiprom = .; /* End of XIP ROM area */ + + +/* + * From this point, stuff is considered writable and will be copied to RAM + */ + __data_loc = ALIGN(16); /* location in file */ + . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */ + + _sdata = .; /* Start of data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + __start_ro_after_init = .; + .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { + *(.data..ro_after_init) + } + __end_ro_after_init = .; + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + .init.data : { + INIT_DATA + } + .exit.data : { + EXIT_DATA + } + . = ALIGN(8); + __soc_early_init_table : { + __soc_early_init_table_start = .; + KEEP(*(__soc_early_init_table)) + __soc_early_init_table_end = .; + } + __soc_builtin_dtb_table : { + __soc_builtin_dtb_table_start = .; + KEEP(*(__soc_builtin_dtb_table)) + __soc_builtin_dtb_table_end = .; + } + PERCPU_SECTION(L1_CACHE_BYTES) + + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .sdata : { + __global_pointer$ = . + 0x800; + *(.sdata*) + *(.sbss*) + } + + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) + + .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { + *(.rel.dyn*) + } + + /* + * End of copied data. We need a dummy section to get its LMA. + * Also located before final ALIGN() as trailing padding is not stored + * in the resulting binary file and useless to copy. + */ + .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } + _edata_loc = LOADADDR(.data.endmark); + + . = ALIGN(PAGE_SIZE); + _end = .; + + STABS_DEBUG + DWARF_DEBUG + + DISCARDS +} diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index de03cb22d0e9..891742ff75a7 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -4,7 +4,13 @@ * Copyright (C) 2017 SiFive */ -#define LOAD_OFFSET PAGE_OFFSET +#ifdef CONFIG_XIP_KERNEL +#include "vmlinux-xip.lds.S" +#else + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR + #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/cache.h> @@ -90,6 +96,13 @@ SECTIONS } __init_data_end = .; + + . = ALIGN(8); + .alternative : { + __alt_start = .; + *(.alternative) + __alt_end = .; + } __init_end = .; /* Start of data section */ @@ -132,3 +145,4 @@ SECTIONS DISCARDS } +#endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 8f17519208c7..096463cc6fff 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -231,6 +231,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs) return; } +#ifdef CONFIG_64BIT + /* + * Modules in 64bit kernels lie in their own virtual region which is not + * in the vmalloc region, but dealing with page faults in this region + * or the vmalloc region amounts to doing the same thing: checking that + * the mapping exists in init_mm.pgd and updating user page table, so + * just use vmalloc_fault. + */ + if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { + vmalloc_fault(regs, code, addr); + return; + } +#endif /* Enable interrupts if they were enabled in the parent context. */ if (likely(regs->status & SR_PIE)) local_irq_enable(); @@ -328,3 +341,4 @@ good_area: } return; } +NOKPROBE_SYMBOL(do_page_fault); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 067583ab1bd7..4faf8bd157ea 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -2,6 +2,8 @@ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> */ #include <linux/init.h> @@ -11,9 +13,11 @@ #include <linux/swap.h> #include <linux/sizes.h> #include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> #include <linux/libfdt.h> #include <linux/set_memory.h> #include <linux/dma-map-ops.h> +#include <linux/crash_dump.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> @@ -25,14 +29,20 @@ #include "../kernel/head.h" +unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; +EXPORT_SYMBOL(kernel_virt_addr); +#ifdef CONFIG_XIP_KERNEL +#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) +#endif + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); extern char _start[]; #define DTB_EARLY_BASE_VA PGDIR_SIZE -void *dtb_early_va __initdata; -uintptr_t dtb_early_pa __initdata; +void *_dtb_early_va __initdata; +uintptr_t _dtb_early_pa __initdata; struct pt_alloc_ops { pte_t *(*get_pte_virt)(phys_addr_t pa); @@ -57,7 +67,7 @@ static void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -static void setup_zero_page(void) +static void __init setup_zero_page(void) { memset((void *)empty_zero_page, 0, PAGE_SIZE); } @@ -75,7 +85,7 @@ static inline void print_mlm(char *name, unsigned long b, unsigned long t) (((t) - (b)) >> 20)); } -static void print_vm_layout(void) +static void __init print_vm_layout(void) { pr_notice("Virtual kernel memory layout:\n"); print_mlk("fixmap", (unsigned long)FIXADDR_START, @@ -88,6 +98,10 @@ static void print_vm_layout(void) (unsigned long)VMALLOC_END); print_mlm("lowmem", (unsigned long)PAGE_OFFSET, (unsigned long)high_memory); +#ifdef CONFIG_64BIT + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, + (unsigned long)ADDRESS_SPACE_END); +#endif } #else static void print_vm_layout(void) { } @@ -102,7 +116,6 @@ void __init mem_init(void) high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); memblock_free_all(); - mem_init_print_info(NULL); print_vm_layout(); } @@ -113,10 +126,24 @@ void __init setup_bootmem(void) phys_addr_t dram_end = memblock_end_of_DRAM(); phys_addr_t max_mapped_addr = __pa(~(ulong)0); +#ifdef CONFIG_XIP_KERNEL + vmlinux_start = __pa_symbol(&_sdata); +#endif + /* The maximal physical memory size is -PAGE_OFFSET. */ memblock_enforce_memory_limit(-PAGE_OFFSET); - /* Reserve from the start of the kernel to the end of the kernel */ + /* + * Reserve from the start of the kernel to the end of the kernel + */ +#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) + /* + * Make sure we align the reservation on PMD_SIZE since we will + * map the kernel in the linear mapping as read-only: we do not want + * any allocation to happen between _end and the next pmd aligned page. + */ + vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; +#endif memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); /* @@ -128,8 +155,9 @@ void __init setup_bootmem(void) if (max_mapped_addr == (dram_end - 1)) memblock_set_current_limit(max_mapped_addr - 4096); - max_pfn = PFN_DOWN(dram_end); - max_low_pfn = max_pfn; + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(dram_end); + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); @@ -148,12 +176,42 @@ void __init setup_bootmem(void) memblock_allow_resize(); } +#ifdef CONFIG_XIP_KERNEL + +extern char _xiprom[], _exiprom[]; +extern char _sdata[], _edata[]; + +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU -static struct pt_alloc_ops pt_ops; +static struct pt_alloc_ops _pt_ops __ro_after_init; -unsigned long va_pa_offset; +#ifdef CONFIG_XIP_KERNEL +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) +#else +#define pt_ops _pt_ops +#endif + +/* Offset between linear mapping virtual address and kernel load address */ +unsigned long va_pa_offset __ro_after_init; EXPORT_SYMBOL(va_pa_offset); -unsigned long pfn_base; +#ifdef CONFIG_XIP_KERNEL +#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset))) +#endif +/* Offset between kernel mapping virtual address and kernel load address */ +#ifdef CONFIG_64BIT +unsigned long va_kernel_pa_offset; +EXPORT_SYMBOL(va_kernel_pa_offset); +#endif +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) +#endif +unsigned long va_kernel_xip_pa_offset; +EXPORT_SYMBOL(va_kernel_xip_pa_offset); +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) +#endif +unsigned long pfn_base __ro_after_init; EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; @@ -162,6 +220,12 @@ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) +#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) +#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) +#endif /* CONFIG_XIP_KERNEL */ + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); @@ -213,8 +277,8 @@ static phys_addr_t alloc_pte_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))) - BUG(); + BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); + return __pa(vaddr); } @@ -237,6 +301,12 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) +#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) +#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) +#endif /* CONFIG_XIP_KERNEL */ + static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { /* Before MMU is enabled */ @@ -256,7 +326,7 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); + BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); return (uintptr_t)early_pmd; } @@ -353,6 +423,19 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) return PMD_SIZE; } +#ifdef CONFIG_XIP_KERNEL +/* called from head.S with MMU off */ +asmlinkage void __init __copy_data(void) +{ + void *from = (void *)(&_sdata); + void *end = (void *)(&_end); + void *to = (void *)CONFIG_PHYS_RAM_BASE; + size_t sz = (size_t)(end - from + 1); + + memcpy(to, from, sz); +} +#endif + /* * setup_vm() is called from head.S with MMU-off. * @@ -371,17 +454,74 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif +uintptr_t load_pa, load_sz; +#ifdef CONFIG_XIP_KERNEL +#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) +#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) +#endif + +#ifdef CONFIG_XIP_KERNEL +uintptr_t xiprom, xiprom_sz; +#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) +#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) + +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + /* Map the flash resident part */ + end_va = kernel_virt_addr + xiprom_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + xiprom + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); + + /* Map the data in RAM */ + end_va = kernel_virt_addr + XIP_OFFSET + load_sz; + for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), + map_size, PAGE_KERNEL); +} +#else +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + end_va = kernel_virt_addr + load_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); +} +#endif + asmlinkage void __init setup_vm(uintptr_t dtb_pa) { - uintptr_t va, pa, end_va; - uintptr_t load_pa = (uintptr_t)(&_start); - uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; + uintptr_t __maybe_unused pa; uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif +#ifdef CONFIG_XIP_KERNEL + xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; + xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + + load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; + load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); + + va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; +#else + load_pa = (uintptr_t)(&_start); + load_sz = (uintptr_t)(&_end) - load_pa; +#endif + va_pa_offset = PAGE_OFFSET - load_pa; +#ifdef CONFIG_64BIT + va_kernel_pa_offset = kernel_virt_addr - load_pa; +#endif + pfn_base = PFN_DOWN(load_pa); /* @@ -409,26 +549,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(fixmap_pmd, FIXADDR_START, (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); /* Setup trampoline PGD and PMD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); - create_pmd_mapping(trampoline_pmd, PAGE_OFFSET, +#ifdef CONFIG_XIP_KERNEL + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, + xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); +#else + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); +#endif #else /* Setup trampoline PGD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); #endif /* - * Setup early PGD covering entire kernel which will allows + * Setup early PGD covering entire kernel which will allow * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - end_va = PAGE_OFFSET + load_sz; - for (va = PAGE_OFFSET; va < end_va; va += map_size) - create_pgd_mapping(early_pg_dir, va, - load_pa + (va - PAGE_OFFSET), - map_size, PAGE_KERNEL_EXEC); + create_kernel_page_table(early_pg_dir, map_size); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -443,7 +584,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + /* + * __va can't be used since it would return a linear mapping address + * whereas dtb_early_va will be used before setup_vm_final installs + * the linear mapping. + */ + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #else #ifndef CONFIG_BUILTIN_DTB @@ -455,7 +605,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #endif dtb_early_pa = dtb_pa; @@ -491,6 +645,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } +#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) +void protect_kernel_linear_mapping_text_rodata(void) +{ + unsigned long text_start = (unsigned long)lm_alias(_start); + unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); + unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); + unsigned long data_start = (unsigned long)lm_alias(_data); + + set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + + set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); + set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); +} +#endif + static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -512,7 +682,7 @@ static void __init setup_vm_final(void) __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); - /* Map all memory banks */ + /* Map all memory banks in the linear mapping */ for_each_mem_range(i, &start, &end) { if (start >= end) break; @@ -524,10 +694,22 @@ static void __init setup_vm_final(void) for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, PAGE_KERNEL_EXEC); + map_size, +#ifdef CONFIG_64BIT + PAGE_KERNEL +#else + PAGE_KERNEL_EXEC +#endif + ); + } } +#ifdef CONFIG_64BIT + /* Map the kernel */ + create_kernel_page_table(swapper_pg_dir, PMD_SIZE); +#endif + /* Clear fixmap PTE and PMD mappings */ clear_fixmap(FIX_PTE); clear_fixmap(FIX_PMD); @@ -557,7 +739,7 @@ static inline void setup_vm_final(void) #endif /* CONFIG_MMU */ #ifdef CONFIG_STRICT_KERNEL_RWX -void protect_kernel_text_data(void) +void __init protect_kernel_text_data(void) { unsigned long text_start = (unsigned long)_start; unsigned long init_text_start = (unsigned long)__init_text_begin; @@ -585,6 +767,103 @@ void mark_rodata_ro(void) } #endif +#ifdef CONFIG_KEXEC_CORE +/* + * reserve_crashkernel() - reserves memory for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_base = 0; + unsigned long long crash_size = 0; + unsigned long search_start = memblock_start_of_DRAM(); + unsigned long search_end = memblock_end_of_DRAM(); + + int ret = 0; + + /* + * Don't reserve a region for a crash kernel on a crash kernel + * since it doesn't make much sense and we have limited memory + * resources. + */ +#ifdef CONFIG_CRASH_DUMP + if (is_kdump_kernel()) { + pr_info("crashkernel: ignoring reservation request\n"); + return; + } +#endif + + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + crash_size = PAGE_ALIGN(crash_size); + + if (crash_base == 0) { + /* + * Current riscv boot protocol requires 2MB alignment for + * RV64 and 4MB alignment for RV32 (hugepage size) + */ + crash_base = memblock_find_in_range(search_start, search_end, + crash_size, PMD_SIZE); + + if (crash_base == 0) { + pr_warn("crashkernel: couldn't allocate %lldKB\n", + crash_size >> 10); + return; + } + } else { + /* User specifies base address explicitly. */ + if (!memblock_is_region_memory(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is not memory\n"); + return; + } + + if (memblock_is_region_reserved(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is reserved\n"); + return; + } + + + if (!IS_ALIGNED(crash_base, PMD_SIZE)) { + pr_warn("crashkernel: requested region is misaligned\n"); + return; + } + } + memblock_reserve(crash_base, crash_size); + + pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", + crash_base, crash_base + crash_size, crash_size >> 20); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +} +#endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_CRASH_DUMP +/* + * We keep track of the ELF core header of the crashed + * kernel with a reserved-memory region with compatible + * string "linux,elfcorehdr". Here we register a callback + * to populate elfcorehdr_addr/size when this region is + * present. Note that this region will be marked as + * reserved once we call early_init_fdt_scan_reserved_mem() + * later on. + */ +static int elfcore_hdr_setup(struct reserved_mem *rmem) +{ + elfcorehdr_addr = rmem->base; + elfcorehdr_size = rmem->size; + return 0; +} + +RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); +#endif + void __init paging_init(void) { setup_vm_final(); @@ -593,9 +872,13 @@ void __init paging_init(void) void __init misc_mem_init(void) { + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); arch_numa_init(); sparse_init(); zone_sizes_init(); +#ifdef CONFIG_KEXEC_CORE + reserve_crashkernel(); +#endif memblock_dump_all(); } diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 3fc18f469efb..9daacae93e33 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -11,18 +11,6 @@ #include <asm/fixmap.h> #include <asm/pgalloc.h> -static __init void *early_alloc(size_t size, int node) -{ - void *ptr = memblock_alloc_try_nid(size, size, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); - - if (!ptr) - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); - - return ptr; -} - extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) { @@ -60,7 +48,7 @@ asmlinkage void __init kasan_early_init(void) local_flush_tlb_all(); } -static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pte_t *ptep, *base_pte; @@ -82,7 +70,7 @@ static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long en set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE)); } -static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pmd_t *pmdp, *base_pmd; @@ -117,7 +105,7 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); } -static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pgd_t *pgdp = pgd_offset_k(vaddr); @@ -155,38 +143,28 @@ static void __init kasan_populate(void *start, void *end) memset(start, KASAN_SHADOW_INIT, end - start); } -void __init kasan_shallow_populate(void *start, void *end) +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) { - unsigned long vaddr = (unsigned long)start & PAGE_MASK; - unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long pfn; - int index; + unsigned long next; void *p; - pud_t *pud_dir, *pud_k; - pgd_t *pgd_dir, *pgd_k; - p4d_t *p4d_dir, *p4d_k; - - while (vaddr < vend) { - index = pgd_index(vaddr); - pfn = csr_read(CSR_SATP) & SATP_PPN; - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; - pgd_k = init_mm.pgd + index; - pgd_dir = pgd_offset_k(vaddr); - set_pgd(pgd_dir, *pgd_k); - - p4d_dir = p4d_offset(pgd_dir, vaddr); - p4d_k = p4d_offset(pgd_k, vaddr); - - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; - pud_dir = pud_offset(p4d_dir, vaddr); - pud_k = pud_offset(p4d_k, vaddr); - - if (pud_present(*pud_dir)) { - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); - pud_populate(&init_mm, pud_dir, p); + pgd_t *pgd_k = pgd_offset_k(vaddr); + + do { + next = pgd_addr_end(vaddr, end); + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } - vaddr += PAGE_SIZE; - } + } while (pgd_k++, vaddr = next, vaddr != end); +} + +static void __init kasan_shallow_populate(void *start, void *end) +{ + unsigned long vaddr = (unsigned long)start & PAGE_MASK; + unsigned long vend = PAGE_ALIGN((unsigned long)end); + + kasan_shallow_populate_pgd(vaddr, vend); + local_flush_tlb_all(); } void __init kasan_init(void) @@ -194,6 +172,10 @@ void __init kasan_init(void) phys_addr_t _start, _end; u64 i; + /* + * Populate all kernel virtual address space with kasan_early_shadow_page + * except for the linear mapping and the modules/kernel/BPF mapping. + */ kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) VMEMMAP_END)); @@ -206,6 +188,7 @@ void __init kasan_init(void) (void *)kasan_mem_to_shadow((void *)VMALLOC_START), (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + /* Populate the linear mapping */ for_each_mem_range(i, &_start, &_end) { void *start = (void *)__va(_start); void *end = (void *)__va(_end); @@ -214,7 +197,11 @@ void __init kasan_init(void) break; kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); - }; + } + + /* Populate kernel, BPF, modules mapping */ + kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR), + kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END)); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index e8e4dcd39fed..35703d5ef5fd 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { - unsigned long kernel_start = (unsigned long)PAGE_OFFSET; + unsigned long kernel_start = (unsigned long)kernel_virt_addr; unsigned long kernel_end = (unsigned long)_end; /* diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index ace74dec7492..0536ac84b730 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -58,29 +58,56 @@ struct ptd_mm_info { unsigned long end; }; +enum address_markers_idx { +#ifdef CONFIG_KASAN + KASAN_SHADOW_START_NR, + KASAN_SHADOW_END_NR, +#endif + FIXMAP_START_NR, + FIXMAP_END_NR, + PCI_IO_START_NR, + PCI_IO_END_NR, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + VMEMMAP_START_NR, + VMEMMAP_END_NR, +#endif + VMALLOC_START_NR, + VMALLOC_END_NR, + PAGE_OFFSET_NR, +#ifdef CONFIG_64BIT + MODULES_MAPPING_NR, + KERNEL_MAPPING_NR, +#endif + END_OF_SPACE_NR +}; + static struct addr_marker address_markers[] = { #ifdef CONFIG_KASAN - {KASAN_SHADOW_START, "Kasan shadow start"}, - {KASAN_SHADOW_END, "Kasan shadow end"}, + {0, "Kasan shadow start"}, + {0, "Kasan shadow end"}, #endif - {FIXADDR_START, "Fixmap start"}, - {FIXADDR_TOP, "Fixmap end"}, - {PCI_IO_START, "PCI I/O start"}, - {PCI_IO_END, "PCI I/O end"}, + {0, "Fixmap start"}, + {0, "Fixmap end"}, + {0, "PCI I/O start"}, + {0, "PCI I/O end"}, #ifdef CONFIG_SPARSEMEM_VMEMMAP - {VMEMMAP_START, "vmemmap start"}, - {VMEMMAP_END, "vmemmap end"}, + {0, "vmemmap start"}, + {0, "vmemmap end"}, +#endif + {0, "vmalloc() area"}, + {0, "vmalloc() end"}, + {0, "Linear mapping"}, +#ifdef CONFIG_64BIT + {0, "Modules mapping"}, + {0, "Kernel mapping (kernel, BPF)"}, #endif - {VMALLOC_START, "vmalloc() area"}, - {VMALLOC_END, "vmalloc() end"}, - {PAGE_OFFSET, "Linear mapping"}, {-1, NULL}, }; static struct ptd_mm_info kernel_ptd_info = { .mm = &init_mm, .markers = address_markers, - .base_addr = KERN_VIRT_START, + .base_addr = 0, .end = ULONG_MAX, }; @@ -331,10 +358,32 @@ static int ptdump_show(struct seq_file *m, void *v) DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_init(void) +static int __init ptdump_init(void) { unsigned int i, j; +#ifdef CONFIG_KASAN + address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; + address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; +#endif + address_markers[FIXMAP_START_NR].start_address = FIXADDR_START; + address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP; + address_markers[PCI_IO_START_NR].start_address = PCI_IO_START; + address_markers[PCI_IO_END_NR].start_address = PCI_IO_END; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; + address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END; +#endif + address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; + address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET; +#ifdef CONFIG_64BIT + address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR; + address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr; +#endif + + kernel_ptd_info.base_addr = KERN_VIRT_START; + for (i = 0; i < ARRAY_SIZE(pg_level); i++) for (j = 0; j < ARRAY_SIZE(pte_bits); j++) pg_level[i].mask |= pte_bits[j].mask; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index b44ff52f84a6..87e3bf5b9086 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -1148,16 +1148,3 @@ void bpf_jit_build_epilogue(struct rv_jit_context *ctx) { __build_epilogue(false, ctx); } - -void *bpf_jit_alloc_exec(unsigned long size) -{ - return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, - BPF_JIT_REGION_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - -void bpf_jit_free_exec(void *addr) -{ - return vfree(addr); -} diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 3630d447352c..fed86f42dfbe 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -152,6 +152,7 @@ skip_init_ctx: bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(jit_data->header); out_offset: kfree(ctx->offset); kfree(jit_data); @@ -164,3 +165,16 @@ out: tmp : orig_prog); return prog; } + +void *bpf_jit_alloc_exec(unsigned long size) +{ + return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, + BPF_JIT_REGION_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} + +void bpf_jit_free_exec(void *addr) +{ + return vfree(addr); +} diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c1ff874e6c2e..b4c7c34069f8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,6 +60,9 @@ config S390 imply IMA_SECURE_AND_OR_TRUSTED_BOOT select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -137,6 +140,7 @@ config S390 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_VMALLOC + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_TRACEHOOK @@ -626,15 +630,6 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y if SPARSEMEM - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - config MAX_PHYSMEM_BITS int "Maximum size of supported physical memory in bits (42-53)" range 42 53 diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index ef96c25fa921..9ea6e61d5858 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -15,3 +15,11 @@ config DEBUG_ENTRY exits or otherwise impact performance. If unsure, say N. + +config CIO_INJECT + bool "CIO Inject interfaces" + depends on DEBUG_KERNEL && DEBUG_FS + help + This option provides a debugging facility to inject certain artificial events + and instruction responses to the CIO layer of Linux kernel. The newly created + debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/* diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index dc0b69058ac4..86afcc6b56bf 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -387,6 +387,7 @@ CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y +CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set CONFIG_PCI_DEBUG=y CONFIG_HOTPLUG_PCI=y @@ -548,7 +549,7 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m @@ -771,7 +772,6 @@ CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y -CONFIG_FRAME_WARN=1024 CONFIG_HEADERS_INSTALL=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y @@ -829,6 +829,7 @@ CONFIG_HIST_TRIGGERS=y CONFIG_FTRACE_STARTUP_TEST=y # CONFIG_EVENT_TRACE_STARTUP_TEST is not set CONFIG_DEBUG_ENTRY=y +CONFIG_CIO_INJECT=y CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 320379da96d9..71b49ea5b058 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -377,6 +377,7 @@ CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m CONFIG_PCI=y +CONFIG_PCI_IOV=y # CONFIG_PCIEASPM is not set CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -540,7 +541,7 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m @@ -756,7 +757,6 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y -CONFIG_FRAME_WARN=1024 CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_WX=y diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index 7b947728d57e..56007c763902 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) { + /* max hunk is ARCH_RNG_BUF_SIZE */ + if (nbytes > ARCH_RNG_BUF_SIZE) + return false; + /* lock rng buffer */ if (!spin_trylock(&arch_rng_lock)) return false; diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index 0099044e2c86..6b3d1009c392 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S @@ -32,7 +32,7 @@ * process particular chunks of the input data stream in parallel. * * For the CRC-32 variants, the constants are precomputed according to - * these defintions: + * these definitions: * * R1 = x4*128+64 mod P(x) * R2 = x4*128 mod P(x) @@ -189,7 +189,7 @@ ENTRY(crc32_be_vgfm_16) * Note: To compensate the division by x^32, use the vector unpack * instruction to move the leftmost word into the leftmost doubleword * of the vector register. The rightmost doubleword is multiplied - * with zero to not contribute to the intermedate results. + * with zero to not contribute to the intermediate results. */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 5860ae790f2d..7c93c6573524 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,48 +15,46 @@ #include <asm/barrier.h> #include <asm/cmpxchg.h> -static inline int atomic_read(const atomic_t *v) +static inline int arch_atomic_read(const atomic_t *v) { - int c; - - asm volatile( - " l %0,%1\n" - : "=d" (c) : "Q" (v->counter)); - return c; + return __atomic_read(v); } +#define arch_atomic_read arch_atomic_read -static inline void atomic_set(atomic_t *v, int i) +static inline void arch_atomic_set(atomic_t *v, int i) { - asm volatile( - " st %1,%0\n" - : "=Q" (v->counter) : "d" (i)); + __atomic_set(v, i); } +#define arch_atomic_set arch_atomic_set -static inline int atomic_add_return(int i, atomic_t *v) +static inline int arch_atomic_add_return(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter) + i; } +#define arch_atomic_add_return arch_atomic_add_return -static inline int atomic_fetch_add(int i, atomic_t *v) +static inline int arch_atomic_fetch_add(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter); } +#define arch_atomic_fetch_add arch_atomic_fetch_add -static inline void atomic_add(int i, atomic_t *v) +static inline void arch_atomic_add(int i, atomic_t *v) { __atomic_add(i, &v->counter); } +#define arch_atomic_add arch_atomic_add -#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) -#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) -#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) +#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v) +#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v) +#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) #define ATOMIC_OPS(op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ __atomic_##op(i, &v->counter); \ } \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ return __atomic_##op##_barrier(i, &v->counter); \ } @@ -67,60 +65,67 @@ ATOMIC_OPS(xor) #undef ATOMIC_OPS -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_and arch_atomic_and +#define arch_atomic_or arch_atomic_or +#define arch_atomic_xor arch_atomic_xor +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { return __atomic_cmpxchg(&v->counter, old, new); } +#define arch_atomic_cmpxchg arch_atomic_cmpxchg #define ATOMIC64_INIT(i) { (i) } -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { - s64 c; - - asm volatile( - " lg %0,%1\n" - : "=d" (c) : "Q" (v->counter)); - return c; + return __atomic64_read(v); } +#define arch_atomic64_read arch_atomic64_read -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { - asm volatile( - " stg %1,%0\n" - : "=Q" (v->counter) : "d" (i)); + __atomic64_set(v, i); } +#define arch_atomic64_set arch_atomic64_set -static inline s64 atomic64_add_return(s64 i, atomic64_t *v) +static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter) + i; } +#define arch_atomic64_add_return arch_atomic64_add_return -static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) +static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter); } +#define arch_atomic64_fetch_add arch_atomic64_fetch_add -static inline void atomic64_add(s64 i, atomic64_t *v) +static inline void arch_atomic64_add(s64 i, atomic64_t *v) { __atomic64_add(i, (long *)&v->counter); } +#define arch_atomic64_add arch_atomic64_add -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) -static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { return __atomic64_cmpxchg((long *)&v->counter, old, new); } +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg #define ATOMIC64_OPS(op) \ -static inline void atomic64_##op(s64 i, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ __atomic64_##op(i, (long *)&v->counter); \ } \ -static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ { \ return __atomic64_##op##_barrier(i, (long *)&v->counter); \ } @@ -131,8 +136,17 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OPS -#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v) -#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v) -#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v) +#define arch_atomic64_and arch_atomic64_and +#define arch_atomic64_or arch_atomic64_or +#define arch_atomic64_xor arch_atomic64_xor +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v) +#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v) +#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v) + +#define ARCH_ATOMIC #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h index 61467b9eecc7..50510e08b893 100644 --- a/arch/s390/include/asm/atomic_ops.h +++ b/arch/s390/include/asm/atomic_ops.h @@ -8,6 +8,40 @@ #ifndef __ARCH_S390_ATOMIC_OPS__ #define __ARCH_S390_ATOMIC_OPS__ +static inline int __atomic_read(const atomic_t *v) +{ + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "R" (v->counter)); + return c; +} + +static inline void __atomic_set(atomic_t *v, int i) +{ + asm volatile( + " st %1,%0\n" + : "=R" (v->counter) : "d" (i)); +} + +static inline s64 __atomic64_read(const atomic64_t *v) +{ + s64 c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "RT" (v->counter)); + return c; +} + +static inline void __atomic64_set(atomic64_t *v, s64 i) +{ + asm volatile( + " stg %1,%0\n" + : "=RT" (v->counter) : "d" (i)); +} + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ @@ -18,7 +52,7 @@ static inline op_type op_name(op_type val, op_type *ptr) \ asm volatile( \ op_string " %[old],%[val],%[ptr]\n" \ op_barrier \ - : [old] "=d" (old), [ptr] "+Q" (*ptr) \ + : [old] "=d" (old), [ptr] "+QS" (*ptr) \ : [val] "d" (val) : "cc", "memory"); \ return old; \ } \ @@ -46,7 +80,7 @@ static __always_inline void op_name(op_type val, op_type *ptr) \ asm volatile( \ op_string " %[ptr],%[val]\n" \ op_barrier \ - : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\ + : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ } #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ @@ -97,7 +131,7 @@ static inline long op_name(long val, long *ptr) \ op_string " %[new],%[val]\n" \ " csg %[old],%[new],%[ptr]\n" \ " jl 0b" \ - : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\ + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ return old; \ } @@ -122,22 +156,46 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") static inline int __atomic_cmpxchg(int *ptr, int old, int new) { - return __sync_val_compare_and_swap(ptr, old, new); + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; } -static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new) +static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) { - return __sync_bool_compare_and_swap(ptr, old, new); + int old_expected = old; + + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; } static inline long __atomic64_cmpxchg(long *ptr, long old, long new) { - return __sync_val_compare_and_swap(ptr, old, new); + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; } -static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new) +static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new) { - return __sync_bool_compare_and_swap(ptr, old, new); + long old_expected = old; + + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; } #endif /* __ARCH_S390_ATOMIC_OPS__ */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 31121d32f81d..68da67d2c4c9 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -42,7 +42,7 @@ #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) static inline unsigned long * -__bitops_word(unsigned long nr, volatile unsigned long *ptr) +__bitops_word(unsigned long nr, const volatile unsigned long *ptr) { unsigned long addr; @@ -50,37 +50,33 @@ __bitops_word(unsigned long nr, volatile unsigned long *ptr) return (unsigned long *)addr; } -static inline unsigned char * -__bitops_byte(unsigned long nr, volatile unsigned long *ptr) +static inline unsigned long __bitops_mask(unsigned long nr) { - return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); + return 1UL << (nr & (BITS_PER_LONG - 1)); } static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_or(mask, (long *)addr); } static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - __atomic64_and(mask, (long *)addr); + __atomic64_and(~mask, (long *)addr); } static __always_inline void arch_change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_xor(mask, (long *)addr); } @@ -88,99 +84,104 @@ static inline bool arch_test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = 1UL << (nr & (BITS_PER_LONG - 1)); old = __atomic64_or_barrier(mask, (long *)addr); - return (old & mask) != 0; + return old & mask; } static inline bool arch_test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - old = __atomic64_and_barrier(mask, (long *)addr); - return (old & ~mask) != 0; + old = __atomic64_and_barrier(~mask, (long *)addr); + return old & mask; } static inline bool arch_test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = 1UL << (nr & (BITS_PER_LONG - 1)); old = __atomic64_xor_barrier(mask, (long *)addr); - return (old & mask) != 0; + return old & mask; } static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr |= 1 << (nr & 7); + *addr |= mask; } static inline void arch___clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr &= ~(1 << (nr & 7)); + *addr &= ~mask; } static inline void arch___change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr ^= 1 << (nr & 7); + *addr ^= mask; } static inline bool arch___test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr |= 1 << (nr & 7); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr |= mask; + return old & mask; } static inline bool arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr &= ~(1 << (nr & 7)); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr &= ~mask; + return old & mask; } static inline bool arch___test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr ^= 1 << (nr & 7); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr ^= mask; + return old & mask; } static inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr) { - const volatile unsigned char *addr; + const volatile unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - addr = ((const volatile unsigned char *)ptr); - addr += (nr ^ (BITS_PER_LONG - 8)) >> 3; - return (*addr >> (nr & 7)) & 1; + return *addr & mask; } static inline bool arch_test_and_set_bit_lock(unsigned long nr, diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 778247bb1d61..d4e90f2ba77e 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -152,9 +152,6 @@ extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, * when new devices for its type pop up */ extern int ccw_driver_register (struct ccw_driver *driver); extern void ccw_driver_unregister (struct ccw_driver *driver); - -struct ccw1; - extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); extern int ccw_device_set_options(struct ccw_device *, unsigned long); extern void ccw_device_clear_options(struct ccw_device *, unsigned long); diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index af99c1f66f12..1960a7295ae5 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -12,27 +12,163 @@ #include <linux/types.h> #include <linux/bug.h> -#define cmpxchg(ptr, o, n) \ +void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __xchg(unsigned long x, + unsigned long address, int size) +{ + unsigned long old; + int shift; + + switch (size) { + case 1: + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 2: + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 4: + asm volatile( + " l %0,%1\n" + "0: cs %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" (x) + : "memory", "cc"); + return old; + case 8: + asm volatile( + " lg %0,%1\n" + "0: csg %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+QS" (*(long *) address) + : "d" (x) + : "memory", "cc"); + return old; + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define arch_xchg(ptr, x) \ ({ \ - __typeof__(*(ptr)) __o = (o); \ - __typeof__(*(ptr)) __n = (n); \ - (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (unsigned long)(ptr), \ + sizeof(*(ptr))); \ + __ret; \ }) -#define cmpxchg64 cmpxchg -#define cmpxchg_local cmpxchg -#define cmpxchg64_local cmpxchg +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __cmpxchg(unsigned long address, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev, tmp; + int shift; + + switch (size) { + case 1: + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) + : "d" ((old & 0xff) << shift), + "d" ((new & 0xff) << shift), + "d" (~(0xff << shift)) + : "memory", "cc"); + return prev >> shift; + case 2: + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) + : "d" ((old & 0xffff) << shift), + "d" ((new & 0xffff) << shift), + "d" (~(0xffff << shift)) + : "memory", "cc"); + return prev >> shift; + case 4: + asm volatile( + " cs %0,%3,%1\n" + : "=&d" (prev), "+Q" (*(int *) address) + : "0" (old), "d" (new) + : "memory", "cc"); + return prev; + case 8: + asm volatile( + " csg %0,%3,%1\n" + : "=&d" (prev), "+QS" (*(long *) address) + : "0" (old), "d" (new) + : "memory", "cc"); + return prev; + } + __cmpxchg_called_with_bad_pointer(); + return old; +} -#define xchg(ptr, x) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old; \ - do { \ - __old = *__ptr; \ - } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \ - __old; \ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ }) +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#define system_has_cmpxchg_double() 1 + #define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ register __typeof__(*(p1)) __old1 asm("2") = (o1); \ @@ -51,7 +187,7 @@ !cc; \ }) -#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ +#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ __typeof__(p1) __p1 = (p1); \ __typeof__(p2) __p2 = (p2); \ @@ -61,6 +197,4 @@ __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \ }) -#define system_has_cmpxchg_double() 1 - #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h index 649b9fc60685..3e4cbcb7c4cc 100644 --- a/arch/s390/include/asm/cpu_mcf.h +++ b/arch/s390/include/asm/cpu_mcf.h @@ -123,4 +123,6 @@ static inline int stccm_avail(void) return test_facility(142); } +size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info); #endif /* _ASM_S390_CPU_MCF_H */ diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 75cebc80474e..baa8005090c3 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -4,9 +4,11 @@ #include <linux/sched.h> #include <linux/audit.h> +#include <linux/randomize_kstack.h> #include <linux/tracehook.h> #include <linux/processor.h> #include <linux/uaccess.h> +#include <asm/timex.h> #include <asm/fpu/api.h> #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) @@ -14,10 +16,6 @@ void do_per_trap(struct pt_regs *regs); void do_syscall(struct pt_regs *regs); -typedef void (*pgm_check_func)(struct pt_regs *regs); - -extern pgm_check_func pgm_check_table[128]; - #ifdef CONFIG_DEBUG_ENTRY static __always_inline void arch_check_user_regs(struct pt_regs *regs) { @@ -52,6 +50,14 @@ static __always_inline void arch_exit_to_user_mode(void) #define arch_exit_to_user_mode arch_exit_to_user_mode +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + choose_random_kstack_offset(get_tod_clock_fast() & 0xff); +} + +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare + static inline bool on_thread_stack(void) { return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1)); diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 28664ee0abc1..e3882b012bfa 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -20,11 +20,6 @@ void *xlate_dev_mem_ptr(phys_addr_t phys); #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define IO_SPACE_LIMIT 0 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 6bcfc5614bbc..8925f3969478 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -454,6 +454,7 @@ struct kvm_vcpu_stat { u64 diagnose_44; u64 diagnose_9c; u64 diagnose_9c_ignored; + u64 diagnose_9c_forward; u64 diagnose_258; u64 diagnose_308; u64 diagnose_500; @@ -700,6 +701,10 @@ struct kvm_hw_bp_info_arch { #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) +#define KVM_GUESTDBG_VALID_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |\ + KVM_GUESTDBG_USE_HW_BP | KVM_GUESTDBG_EXIT_PENDING) + struct kvm_guestdbg_info_arch { unsigned long cr0; unsigned long cr9; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 053fe8b8dec7..10b67f8aab99 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -85,7 +85,6 @@ enum zpci_state { ZPCI_FN_STATE_STANDBY = 0, ZPCI_FN_STATE_CONFIGURED = 1, ZPCI_FN_STATE_RESERVED = 2, - ZPCI_FN_STATE_ONLINE = 3, }; struct zpci_bar_struct { @@ -131,9 +130,10 @@ struct zpci_dev { u8 port; u8 rid_available : 1; u8 has_hp_slot : 1; + u8 has_resources : 1; u8 is_physfn : 1; u8 util_str_avail : 1; - u8 reserved : 4; + u8 reserved : 3; unsigned int devfn; /* DEVFN part of the RID*/ struct mutex lock; @@ -201,10 +201,12 @@ extern unsigned int s390_pci_no_rid; Prototypes ----------------------------------------------------------------------------- */ /* Base stuff */ -int zpci_create_device(u32 fid, u32 fh, enum zpci_state state); -void zpci_remove_device(struct zpci_dev *zdev); +struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); +int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh); +int zpci_deconfigure_device(struct zpci_dev *zdev); + int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_unregister_ioat(struct zpci_dev *, u8); void zpci_remove_reserved_devices(void); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d9215c7106f0..8fc52679543d 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -246,21 +246,8 @@ struct slsb { u8 val[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(256))); -/** - * struct qdio_outbuf_state - SBAL related asynchronous operation information - * (for communication with upper layer programs) - * (only required for use with completion queues) - * @user: pointer to upper layer program's state information related to SBAL - * (stored in user1 data of QAOB) - */ -struct qdio_outbuf_state { - void *user; -}; - -#define CHSC_AC1_INITIATE_INPUTQ 0x80 - - /* qdio adapter-characteristics-1 flag */ +#define CHSC_AC1_INITIATE_INPUTQ 0x80 #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @int_parm: interruption parameter * @input_sbal_addr_array: per-queue array, each element points to 128 SBALs * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL) */ struct qdio_initialize { unsigned char q_format; @@ -357,7 +343,6 @@ struct qdio_initialize { unsigned long int_parm; struct qdio_buffer ***input_sbal_addr_array; struct qdio_buffer ***output_sbal_addr_array; - struct qdio_outbuf_state *output_sbal_state_array; }; #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, extern int qdio_establish(struct ccw_device *cdev, struct qdio_initialize *init_data); extern int qdio_activate(struct ccw_device *); +extern struct qaob *qdio_allocate_aob(void); extern void qdio_release_aob(struct qaob *); -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, - unsigned int); +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, + unsigned int bufnr, unsigned int count, struct qaob *aob); extern int qdio_start_irq(struct ccw_device *cdev); extern int qdio_stop_irq(struct ccw_device *cdev); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 01e360004481..e317fd4866c1 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -63,5 +63,6 @@ extern void __noreturn cpu_die(void); extern void __cpu_die(unsigned int cpu); extern int __cpu_disable(void); extern void schedule_mcck_handler(void); +void notrace smp_yield_cpu(int cpu); #endif /* __ASM_SMP_H */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 3a37172d5398..ef59588a3042 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -88,7 +88,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) asm_inline volatile( ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ " sth %1,%0\n" - : "=Q" (((unsigned short *) &lp->lock)[1]) + : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index cfed272e4fd5..a2bbfd7df85f 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -8,7 +8,7 @@ typedef struct { int lock; -} __attribute__ ((aligned (4))) arch_spinlock_t; +} arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index ee056f4a4fa3..2b543163d90a 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -12,6 +12,7 @@ enum stack_type { STACK_TYPE_IRQ, STACK_TYPE_NODAT, STACK_TYPE_RESTART, + STACK_TYPE_MCCK, }; struct stack_info { diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h index 7b3cdb4a5f48..73ee89142666 100644 --- a/arch/s390/include/asm/vdso/data.h +++ b/arch/s390/include/asm/vdso/data.h @@ -6,7 +6,7 @@ #include <vdso/datapage.h> struct arch_vdso_data { - __u64 tod_steering_delta; + __s64 tod_steering_delta; __u64 tod_steering_end; }; diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h index ed89ef742530..383c53c3dddd 100644 --- a/arch/s390/include/asm/vdso/gettimeofday.h +++ b/arch/s390/include/asm/vdso/gettimeofday.h @@ -68,7 +68,8 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts) } #ifdef CONFIG_TIME_NS -static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) { return _timens_data; } diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c97818a382f3..68ca1834316f 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -36,7 +36,7 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o -obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index af013b4244d3..2da027359798 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen) static int diag8_response(int cmdlen, char *response, int *rlen) { + unsigned long _cmdlen = cmdlen | 0x40000000L; + unsigned long _rlen = *rlen; register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; register unsigned long reg3 asm ("3") = (addr_t) response; - register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; - register unsigned long reg5 asm ("5") = *rlen; + register unsigned long reg4 asm ("4") = _cmdlen; + register unsigned long reg5 asm ("5") = _rlen; asm volatile( " diag %2,%0,0x8\n" diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index a7eab7be4db0..5412efe328f8 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs) void print_fn_code(unsigned char *code, unsigned long len) { - char buffer[64], *ptr; + char buffer[128], *ptr; int opsize, i; while (len) { diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 0dc4b258b98d..db1bc00229ca 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info) return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top); } +static bool in_mcck_stack(unsigned long sp, struct stack_info *info) +{ + unsigned long frame_size, top; + + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + top = S390_lowcore.mcck_stack + frame_size; + return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top); +} + static bool in_restart_stack(unsigned long sp, struct stack_info *info) { unsigned long frame_size, top; @@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task, /* Check per-cpu stacks */ if (!in_irq_stack(sp, info) && !in_nodat_stack(sp, info) && - !in_restart_stack(sp, info)) + !in_restart_stack(sp, info) && + !in_mcck_stack(sp, info)) goto unknown; recursion_check: diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c10b9f31eef7..12de7a9c85b3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -401,15 +401,13 @@ ENTRY(\name) brasl %r14,.Lcleanup_sie_int #endif 0: CHECK_STACK __LC_SAVE_AREA_ASYNC - lgr %r11,%r15 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - stg %r11,__SF_BACKCHAIN(%r15) j 2f 1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lctlg %c1,%c1,__LC_KERNEL_ASCE lg %r15,__LC_KERNEL_STACK - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) -2: la %r11,STACK_FRAME_OVERHEAD(%r15) +2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 @@ -445,6 +443,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq * Load idle PSW. */ ENTRY(psw_idle) + stg %r14,(__SF_GPRS+8*8)(%r15) stg %r3,__SF_EMPTY(%r15) larl %r1,psw_idle_exit stg %r1,__SF_EMPTY+8(%r15) diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 3d0c0ac5c20e..1ab33465382f 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -26,29 +26,6 @@ void do_dat_exception(struct pt_regs *regs); void do_secure_storage_access(struct pt_regs *regs); void do_non_secure_storage_access(struct pt_regs *regs); void do_secure_storage_violation(struct pt_regs *regs); - -void addressing_exception(struct pt_regs *regs); -void data_exception(struct pt_regs *regs); -void default_trap_handler(struct pt_regs *regs); -void divide_exception(struct pt_regs *regs); -void execute_exception(struct pt_regs *regs); -void hfp_divide_exception(struct pt_regs *regs); -void hfp_overflow_exception(struct pt_regs *regs); -void hfp_significance_exception(struct pt_regs *regs); -void hfp_sqrt_exception(struct pt_regs *regs); -void hfp_underflow_exception(struct pt_regs *regs); -void illegal_op(struct pt_regs *regs); -void operand_exception(struct pt_regs *regs); -void overflow_exception(struct pt_regs *regs); -void privileged_op(struct pt_regs *regs); -void space_switch_exception(struct pt_regs *regs); -void special_op_exception(struct pt_regs *regs); -void specification_exception(struct pt_regs *regs); -void transaction_exception(struct pt_regs *regs); -void translation_exception(struct pt_regs *regs); -void vector_exception(struct pt_regs *regs); -void monitor_event_exception(struct pt_regs *regs); - void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str); void kernel_stack_overflow(struct pt_regs * regs); void do_signal(struct pt_regs *regs); @@ -59,7 +36,7 @@ void do_notify_resume(struct pt_regs *regs); void __init init_IRQ(void); void do_io_irq(struct pt_regs *regs); void do_ext_irq(struct pt_regs *regs); -void do_restart(void); +void do_restart(void *arg); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); int setup_profiling_timer(unsigned int multiplier); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 7a21eca498aa..dba04fbc37a2 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1849,12 +1849,12 @@ static void __do_restart(void *ignore) stop_run(&on_restart_trigger); } -void do_restart(void) +void do_restart(void *arg) { tracing_off(); debug_locks_off(); lgr_info_log(); - smp_call_online_cpu(__do_restart, NULL); + smp_call_online_cpu(__do_restart, arg); } /* on halt */ diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 601c21791338..714269e10eec 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs) memcpy(®s->int_code, &S390_lowcore.ext_cpu_addr, 4); regs->int_parm = S390_lowcore.ext_params; - regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2; + regs->int_parm_long = S390_lowcore.ext_params2; from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit; if (from_idle) diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index 0a5e4bafb6ad..5a7420b23aa8 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -52,7 +52,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size) } /* - * Initialize OS info struture and set lowcore pointer + * Initialize OS info structure and set lowcore pointer */ void __init os_info_init(void) { diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index b3beef64d3d4..31a605bcbc6e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -230,9 +230,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) /* No support for kernel space counters only */ } else if (!attr->exclude_kernel && attr->exclude_user) { return -EOPNOTSUPP; - - /* Count user and kernel space */ - } else { + } else { /* Count user and kernel space */ if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) return -EOPNOTSUPP; ev = cpumf_generic_events_basic[ev]; @@ -402,12 +400,12 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) */ if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) ctr_set_stop(&cpuhw->state, hwc->config_base); - event->hw.state |= PERF_HES_STOPPED; + hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { hw_perf_event_update(event); - event->hw.state |= PERF_HES_UPTODATE; + hwc->state |= PERF_HES_UPTODATE; } } @@ -430,8 +428,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) if (flags & PERF_EF_START) cpumf_pmu_start(event, PERF_EF_RELOAD); - perf_event_update_userpage(event); - return 0; } @@ -451,8 +447,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) */ if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) ctr_set_disable(&cpuhw->state, event->hw.config_base); - - perf_event_update_userpage(event); } /* diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c index 3bced89caffb..6d53215c8484 100644 --- a/arch/s390/kernel/perf_cpum_cf_common.c +++ b/arch/s390/kernel/perf_cpum_cf_common.c @@ -170,6 +170,52 @@ static int cpum_cf_offline_cpu(unsigned int cpu) return cpum_cf_setup(cpu, PMC_RELEASE); } +/* Return the maximum possible counter set size (in number of 8 byte counters) + * depending on type and model number. + */ +size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info) +{ + size_t ctrset_size = 0; + + switch (ctrset) { + case CPUMF_CTR_SET_BASIC: + if (info->cfvn >= 1) + ctrset_size = 6; + break; + case CPUMF_CTR_SET_USER: + if (info->cfvn == 1) + ctrset_size = 6; + else if (info->cfvn >= 3) + ctrset_size = 2; + break; + case CPUMF_CTR_SET_CRYPTO: + if (info->csvn >= 1 && info->csvn <= 5) + ctrset_size = 16; + else if (info->csvn == 6) + ctrset_size = 20; + break; + case CPUMF_CTR_SET_EXT: + if (info->csvn == 1) + ctrset_size = 32; + else if (info->csvn == 2) + ctrset_size = 48; + else if (info->csvn >= 3 && info->csvn <= 5) + ctrset_size = 128; + else if (info->csvn == 6) + ctrset_size = 160; + break; + case CPUMF_CTR_SET_MT_DIAG: + if (info->csvn > 3) + ctrset_size = 48; + break; + case CPUMF_CTR_SET_MAX: + break; + } + + return ctrset_size; +} + static int __init cpum_cf_init(void) { int rc; diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index bc302b86ce28..08c985c1097c 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -316,52 +316,6 @@ static void cf_diag_read(struct perf_event *event) debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event); } -/* Return the maximum possible counter set size (in number of 8 byte counters) - * depending on type and model number. - */ -static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset, - struct cpumf_ctr_info *info) -{ - size_t ctrset_size = 0; - - switch (ctrset) { - case CPUMF_CTR_SET_BASIC: - if (info->cfvn >= 1) - ctrset_size = 6; - break; - case CPUMF_CTR_SET_USER: - if (info->cfvn == 1) - ctrset_size = 6; - else if (info->cfvn >= 3) - ctrset_size = 2; - break; - case CPUMF_CTR_SET_CRYPTO: - if (info->csvn >= 1 && info->csvn <= 5) - ctrset_size = 16; - else if (info->csvn == 6) - ctrset_size = 20; - break; - case CPUMF_CTR_SET_EXT: - if (info->csvn == 1) - ctrset_size = 32; - else if (info->csvn == 2) - ctrset_size = 48; - else if (info->csvn >= 3 && info->csvn <= 5) - ctrset_size = 128; - else if (info->csvn == 6) - ctrset_size = 160; - break; - case CPUMF_CTR_SET_MT_DIAG: - if (info->csvn > 3) - ctrset_size = 48; - break; - case CPUMF_CTR_SET_MAX: - break; - } - - return ctrset_size; -} - /* Calculate memory needed to store all counter sets together with header and * trailer data. This is independend of the counter set authorization which * can vary depending on the configuration. @@ -372,7 +326,7 @@ static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info) enum cpumf_ctr_set i; for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { - size_t size = cf_diag_ctrset_size(i, info); + size_t size = cpum_cf_ctrset_size(i, info); if (size) max_size += size * sizeof(u64) + @@ -405,7 +359,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset, ctrdata->def = CF_DIAG_CTRSET_DEF; ctrdata->set = ctrset; ctrdata->res1 = 0; - ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info); + ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info); if (ctrset_size) { /* Save data */ need = ctrset_size * sizeof(u64) + sizeof(*ctrdata); @@ -845,7 +799,7 @@ static void cf_diag_cpu_read(void *parm) if (!(p->sets & cpumf_ctr_ctl[set])) continue; /* Counter set not in list */ - set_size = cf_diag_ctrset_size(set, &cpuhw->info); + set_size = cpum_cf_ctrset_size(set, &cpuhw->info); space = sizeof(csd->data) - csd->used; space = cf_diag_cpuset_read(sp, set, set_size, space); if (space) { @@ -968,14 +922,14 @@ static int cf_diag_all_start(void) */ static size_t cf_diag_needspace(unsigned int sets) { - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events); size_t bytes = 0; int i; for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { if (!(sets & cpumf_ctr_ctl[i])) continue; - bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) + + bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) + sizeof(((struct s390_ctrset_setdata *)0)->set) + sizeof(((struct s390_ctrset_setdata *)0)->no_cnts); } @@ -984,6 +938,7 @@ static size_t cf_diag_needspace(unsigned int sets) sizeof(((struct s390_ctrset_cpudata *)0)->no_sets)); debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__, bytes); + put_cpu_ptr(&cpu_cf_events); return bytes; } diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 1e75cc983546..ea7729bebaa0 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -23,27 +23,6 @@ #include <asm/sysinfo.h> #include <asm/unwind.h> -const char *perf_pmu_name(void) -{ - if (cpum_cf_avail() || cpum_sf_avail()) - return "CPU-Measurement Facilities (CPU-MF)"; - return "pmu"; -} -EXPORT_SYMBOL(perf_pmu_name); - -int perf_num_counters(void) -{ - int num = 0; - - if (cpum_cf_avail()) - num += PERF_CPUM_CF_MAX_CTR; - if (cpum_sf_avail()) - num += PERF_CPUM_SF_MAX_CTR; - - return num; -} -EXPORT_SYMBOL(perf_num_counters); - static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) { struct stack_frame *stack = (struct stack_frame *) regs->gprs[15]; diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S deleted file mode 100644 index 9a92638360ee..000000000000 --- a/arch/s390/kernel/pgm_check.S +++ /dev/null @@ -1,147 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Program check table. - * - * Copyright IBM Corp. 2012 - */ - -#include <linux/linkage.h> - -#define PGM_CHECK(handler) .quad handler -#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) - -/* - * The program check table contains exactly 128 (0x00-0x7f) entries. Each - * line defines the function to be called corresponding to the program check - * interruption code. - */ -.section .rodata, "a" -ENTRY(pgm_check_table) -PGM_CHECK_DEFAULT /* 00 */ -PGM_CHECK(illegal_op) /* 01 */ -PGM_CHECK(privileged_op) /* 02 */ -PGM_CHECK(execute_exception) /* 03 */ -PGM_CHECK(do_protection_exception) /* 04 */ -PGM_CHECK(addressing_exception) /* 05 */ -PGM_CHECK(specification_exception) /* 06 */ -PGM_CHECK(data_exception) /* 07 */ -PGM_CHECK(overflow_exception) /* 08 */ -PGM_CHECK(divide_exception) /* 09 */ -PGM_CHECK(overflow_exception) /* 0a */ -PGM_CHECK(divide_exception) /* 0b */ -PGM_CHECK(hfp_overflow_exception) /* 0c */ -PGM_CHECK(hfp_underflow_exception) /* 0d */ -PGM_CHECK(hfp_significance_exception) /* 0e */ -PGM_CHECK(hfp_divide_exception) /* 0f */ -PGM_CHECK(do_dat_exception) /* 10 */ -PGM_CHECK(do_dat_exception) /* 11 */ -PGM_CHECK(translation_exception) /* 12 */ -PGM_CHECK(special_op_exception) /* 13 */ -PGM_CHECK_DEFAULT /* 14 */ -PGM_CHECK(operand_exception) /* 15 */ -PGM_CHECK_DEFAULT /* 16 */ -PGM_CHECK_DEFAULT /* 17 */ -PGM_CHECK(transaction_exception) /* 18 */ -PGM_CHECK_DEFAULT /* 19 */ -PGM_CHECK_DEFAULT /* 1a */ -PGM_CHECK(vector_exception) /* 1b */ -PGM_CHECK(space_switch_exception) /* 1c */ -PGM_CHECK(hfp_sqrt_exception) /* 1d */ -PGM_CHECK_DEFAULT /* 1e */ -PGM_CHECK_DEFAULT /* 1f */ -PGM_CHECK_DEFAULT /* 20 */ -PGM_CHECK_DEFAULT /* 21 */ -PGM_CHECK_DEFAULT /* 22 */ -PGM_CHECK_DEFAULT /* 23 */ -PGM_CHECK_DEFAULT /* 24 */ -PGM_CHECK_DEFAULT /* 25 */ -PGM_CHECK_DEFAULT /* 26 */ -PGM_CHECK_DEFAULT /* 27 */ -PGM_CHECK_DEFAULT /* 28 */ -PGM_CHECK_DEFAULT /* 29 */ -PGM_CHECK_DEFAULT /* 2a */ -PGM_CHECK_DEFAULT /* 2b */ -PGM_CHECK_DEFAULT /* 2c */ -PGM_CHECK_DEFAULT /* 2d */ -PGM_CHECK_DEFAULT /* 2e */ -PGM_CHECK_DEFAULT /* 2f */ -PGM_CHECK_DEFAULT /* 30 */ -PGM_CHECK_DEFAULT /* 31 */ -PGM_CHECK_DEFAULT /* 32 */ -PGM_CHECK_DEFAULT /* 33 */ -PGM_CHECK_DEFAULT /* 34 */ -PGM_CHECK_DEFAULT /* 35 */ -PGM_CHECK_DEFAULT /* 36 */ -PGM_CHECK_DEFAULT /* 37 */ -PGM_CHECK(do_dat_exception) /* 38 */ -PGM_CHECK(do_dat_exception) /* 39 */ -PGM_CHECK(do_dat_exception) /* 3a */ -PGM_CHECK(do_dat_exception) /* 3b */ -PGM_CHECK_DEFAULT /* 3c */ -PGM_CHECK(do_secure_storage_access) /* 3d */ -PGM_CHECK(do_non_secure_storage_access) /* 3e */ -PGM_CHECK(do_secure_storage_violation) /* 3f */ -PGM_CHECK(monitor_event_exception) /* 40 */ -PGM_CHECK_DEFAULT /* 41 */ -PGM_CHECK_DEFAULT /* 42 */ -PGM_CHECK_DEFAULT /* 43 */ -PGM_CHECK_DEFAULT /* 44 */ -PGM_CHECK_DEFAULT /* 45 */ -PGM_CHECK_DEFAULT /* 46 */ -PGM_CHECK_DEFAULT /* 47 */ -PGM_CHECK_DEFAULT /* 48 */ -PGM_CHECK_DEFAULT /* 49 */ -PGM_CHECK_DEFAULT /* 4a */ -PGM_CHECK_DEFAULT /* 4b */ -PGM_CHECK_DEFAULT /* 4c */ -PGM_CHECK_DEFAULT /* 4d */ -PGM_CHECK_DEFAULT /* 4e */ -PGM_CHECK_DEFAULT /* 4f */ -PGM_CHECK_DEFAULT /* 50 */ -PGM_CHECK_DEFAULT /* 51 */ -PGM_CHECK_DEFAULT /* 52 */ -PGM_CHECK_DEFAULT /* 53 */ -PGM_CHECK_DEFAULT /* 54 */ -PGM_CHECK_DEFAULT /* 55 */ -PGM_CHECK_DEFAULT /* 56 */ -PGM_CHECK_DEFAULT /* 57 */ -PGM_CHECK_DEFAULT /* 58 */ -PGM_CHECK_DEFAULT /* 59 */ -PGM_CHECK_DEFAULT /* 5a */ -PGM_CHECK_DEFAULT /* 5b */ -PGM_CHECK_DEFAULT /* 5c */ -PGM_CHECK_DEFAULT /* 5d */ -PGM_CHECK_DEFAULT /* 5e */ -PGM_CHECK_DEFAULT /* 5f */ -PGM_CHECK_DEFAULT /* 60 */ -PGM_CHECK_DEFAULT /* 61 */ -PGM_CHECK_DEFAULT /* 62 */ -PGM_CHECK_DEFAULT /* 63 */ -PGM_CHECK_DEFAULT /* 64 */ -PGM_CHECK_DEFAULT /* 65 */ -PGM_CHECK_DEFAULT /* 66 */ -PGM_CHECK_DEFAULT /* 67 */ -PGM_CHECK_DEFAULT /* 68 */ -PGM_CHECK_DEFAULT /* 69 */ -PGM_CHECK_DEFAULT /* 6a */ -PGM_CHECK_DEFAULT /* 6b */ -PGM_CHECK_DEFAULT /* 6c */ -PGM_CHECK_DEFAULT /* 6d */ -PGM_CHECK_DEFAULT /* 6e */ -PGM_CHECK_DEFAULT /* 6f */ -PGM_CHECK_DEFAULT /* 70 */ -PGM_CHECK_DEFAULT /* 71 */ -PGM_CHECK_DEFAULT /* 72 */ -PGM_CHECK_DEFAULT /* 73 */ -PGM_CHECK_DEFAULT /* 74 */ -PGM_CHECK_DEFAULT /* 75 */ -PGM_CHECK_DEFAULT /* 76 */ -PGM_CHECK_DEFAULT /* 77 */ -PGM_CHECK_DEFAULT /* 78 */ -PGM_CHECK_DEFAULT /* 79 */ -PGM_CHECK_DEFAULT /* 7a */ -PGM_CHECK_DEFAULT /* 7b */ -PGM_CHECK_DEFAULT /* 7c */ -PGM_CHECK_DEFAULT /* 7d */ -PGM_CHECK_DEFAULT /* 7e */ -PGM_CHECK_DEFAULT /* 7f */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 60da976eee6f..5aab59ad5688 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -354,7 +354,7 @@ static int __init stack_realloc(void) if (!new) panic("Couldn't allocate machine check stack"); WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET); - memblock_free(old, THREAD_SIZE); + memblock_free_late(old, THREAD_SIZE); return 0; } early_initcall(stack_realloc); @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void) if (MACHINE_HAS_VX) { elf_hwcap |= HWCAP_S390_VXRS; if (test_facility(134)) - elf_hwcap |= HWCAP_S390_VXRS_EXT; - if (test_facility(135)) elf_hwcap |= HWCAP_S390_VXRS_BCD; + if (test_facility(135)) + elf_hwcap |= HWCAP_S390_VXRS_EXT; if (test_facility(148)) elf_hwcap |= HWCAP_S390_VXRS_EXT2; if (test_facility(152)) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 58c8afa3da65..2fec2b80d35d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -429,6 +429,7 @@ void notrace smp_yield_cpu(int cpu) asm volatile("diag %0,0,0x9c" : : "d" (pcpu_devices[cpu].address)); } +EXPORT_SYMBOL_GPL(smp_yield_cpu); /* * Send cpus emergency shutdown signal. This gives the cpus the diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 7f1266c24f6b..101477b3e263 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -24,12 +24,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, } } -/* - * This function returns an error if it detects any unreliable features of the - * stack. Otherwise it guarantees that the stack trace is reliable. - * - * If the task is not 'current', the caller *must* ensure the task is inactive. - */ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task) { diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c index bc8e650e377d..4e5cc7d2364e 100644 --- a/arch/s390/kernel/syscall.c +++ b/arch/s390/kernel/syscall.c @@ -142,6 +142,7 @@ void do_syscall(struct pt_regs *regs) void noinstr __do_syscall(struct pt_regs *regs, int per_trap) { + add_random_kstack_offset(); enter_from_user_mode(regs); memcpy(®s->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long)); diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 3abef2144dac..7e4a2aba366d 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -445,3 +445,7 @@ 440 common process_madvise sys_process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 165da961f901..326cb8f75f58 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -80,10 +80,12 @@ void __init time_early_init(void) { struct ptff_qto qto; struct ptff_qui qui; + int cs; /* Initialize TOD steering parameters */ tod_steering_end = tod_clock_base.tod; - vdso_data->arch_data.tod_steering_end = tod_steering_end; + for (cs = 0; cs < CS_BASES; cs++) + vdso_data[cs].arch_data.tod_steering_end = tod_steering_end; if (!test_facility(28)) return; @@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta) { unsigned long now, adj; struct ptff_qto qto; + int cs; /* Fixup the monotonic sched clock. */ tod_clock_base.eitod += delta; @@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta) panic("TOD clock sync offset %li is too large to drift\n", tod_steering_delta); tod_steering_end = now + (abs(tod_steering_delta) << 15); - vdso_data->arch_data.tod_steering_end = tod_steering_end; + for (cs = 0; cs < CS_BASES; cs++) { + vdso_data[cs].arch_data.tod_steering_end = tod_steering_end; + vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta; + } /* Update LPAR offset. */ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index db7dd59b570c..8dd23c703718 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -17,6 +17,7 @@ #include "asm/ptrace.h" #include <linux/kprobes.h> #include <linux/kdebug.h> +#include <linux/randomize_kstack.h> #include <linux/extable.h> #include <linux/ptrace.h> #include <linux/sched.h> @@ -79,7 +80,7 @@ void do_per_trap(struct pt_regs *regs) } NOKPROBE_SYMBOL(do_per_trap); -void default_trap_handler(struct pt_regs *regs) +static void default_trap_handler(struct pt_regs *regs) { if (user_mode(regs)) { report_user_fault(regs, SIGSEGV, 0); @@ -89,7 +90,7 @@ void default_trap_handler(struct pt_regs *regs) } #define DO_ERROR_INFO(name, signr, sicode, str) \ -void name(struct pt_regs *regs) \ +static void name(struct pt_regs *regs) \ { \ do_trap(regs, signr, sicode, str); \ } @@ -141,13 +142,13 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) do_trap(regs, SIGFPE, si_code, "floating point exception"); } -void translation_exception(struct pt_regs *regs) +static void translation_exception(struct pt_regs *regs) { /* May never happen. */ panic("Translation exception"); } -void illegal_op(struct pt_regs *regs) +static void illegal_op(struct pt_regs *regs) { __u8 opcode[6]; __u16 __user *location; @@ -189,7 +190,7 @@ NOKPROBE_SYMBOL(illegal_op); DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); -void vector_exception(struct pt_regs *regs) +static void vector_exception(struct pt_regs *regs) { int si_code, vic; @@ -223,7 +224,7 @@ void vector_exception(struct pt_regs *regs) do_trap(regs, SIGFPE, si_code, "vector exception"); } -void data_exception(struct pt_regs *regs) +static void data_exception(struct pt_regs *regs) { save_fpu_regs(); if (current->thread.fpu.fpc & FPC_DXC_MASK) @@ -232,7 +233,7 @@ void data_exception(struct pt_regs *regs) do_trap(regs, SIGILL, ILL_ILLOPN, "data exception"); } -void space_switch_exception(struct pt_regs *regs) +static void space_switch_exception(struct pt_regs *regs) { /* Set user psw back to home space mode. */ if (user_mode(regs)) @@ -241,7 +242,7 @@ void space_switch_exception(struct pt_regs *regs) do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); } -void monitor_event_exception(struct pt_regs *regs) +static void monitor_event_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; @@ -293,12 +294,15 @@ void __init trap_init(void) test_monitor_call(); } +static void (*pgm_check_table[128])(struct pt_regs *regs); + void noinstr __do_pgm_check(struct pt_regs *regs) { unsigned long last_break = S390_lowcore.breaking_event_addr; unsigned int trapnr, syscall_redirect = 0; irqentry_state_t state; + add_random_kstack_offset(); regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc; regs->int_parm_long = S390_lowcore.trans_exc_code; @@ -353,3 +357,61 @@ out: exit_to_user_mode(); } } + +/* + * The program check table contains exactly 128 (0x00-0x7f) entries. Each + * line defines the function to be called corresponding to the program check + * interruption code. + */ +static void (*pgm_check_table[128])(struct pt_regs *regs) = { + [0x00] = default_trap_handler, + [0x01] = illegal_op, + [0x02] = privileged_op, + [0x03] = execute_exception, + [0x04] = do_protection_exception, + [0x05] = addressing_exception, + [0x06] = specification_exception, + [0x07] = data_exception, + [0x08] = overflow_exception, + [0x09] = divide_exception, + [0x0a] = overflow_exception, + [0x0b] = divide_exception, + [0x0c] = hfp_overflow_exception, + [0x0d] = hfp_underflow_exception, + [0x0e] = hfp_significance_exception, + [0x0f] = hfp_divide_exception, + [0x10] = do_dat_exception, + [0x11] = do_dat_exception, + [0x12] = translation_exception, + [0x13] = special_op_exception, + [0x14] = default_trap_handler, + [0x15] = operand_exception, + [0x16] = default_trap_handler, + [0x17] = default_trap_handler, + [0x18] = transaction_exception, + [0x19] = default_trap_handler, + [0x1a] = default_trap_handler, + [0x1b] = vector_exception, + [0x1c] = space_switch_exception, + [0x1d] = hfp_sqrt_exception, + [0x1e ... 0x37] = default_trap_handler, + [0x38] = do_dat_exception, + [0x39] = do_dat_exception, + [0x3a] = do_dat_exception, + [0x3b] = do_dat_exception, + [0x3c] = default_trap_handler, + [0x3d] = do_secure_storage_access, + [0x3e] = do_non_secure_storage_access, + [0x3f] = do_secure_storage_violation, + [0x40] = monitor_event_exception, + [0x41 ... 0x7f] = default_trap_handler, +}; + +#define COND_TRAP(x) asm( \ + ".weak " __stringify(x) "\n\t" \ + ".set " __stringify(x) "," \ + __stringify(default_trap_handler)) + +COND_TRAP(do_secure_storage_access); +COND_TRAP(do_non_secure_storage_access); +COND_TRAP(do_secure_storage_violation); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index b2d2ad153067..370f664580af 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -406,6 +406,41 @@ static struct attribute_group uv_query_attr_group = { .attrs = uv_query_attrs, }; +static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + int val = 0; + +#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST + val = prot_virt_guest; +#endif + return scnprintf(page, PAGE_SIZE, "%d\n", val); +} + +static ssize_t uv_is_prot_virt_host(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + int val = 0; + +#if IS_ENABLED(CONFIG_KVM) + val = prot_virt_host; +#endif + + return scnprintf(page, PAGE_SIZE, "%d\n", val); +} + +static struct kobj_attribute uv_prot_virt_guest = + __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL); + +static struct kobj_attribute uv_prot_virt_host = + __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL); + +static const struct attribute *uv_prot_virt_attrs[] = { + &uv_prot_virt_guest.attr, + &uv_prot_virt_host.attr, + NULL, +}; + static struct kset *uv_query_kset; static struct kobject *uv_kobj; @@ -420,15 +455,23 @@ static int __init uv_info_init(void) if (!uv_kobj) return -ENOMEM; - uv_query_kset = kset_create_and_add("query", NULL, uv_kobj); - if (!uv_query_kset) + rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs); + if (rc) goto out_kobj; + uv_query_kset = kset_create_and_add("query", NULL, uv_kobj); + if (!uv_query_kset) { + rc = -ENOMEM; + goto out_ind_files; + } + rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group); if (!rc) return 0; kset_unregister(uv_query_kset); +out_ind_files: + sysfs_remove_files(uv_kobj, uv_prot_virt_attrs); out_kobj: kobject_del(uv_kobj); kobject_put(uv_kobj); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 73c7afcc0527..f216a1b2f825 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -214,7 +214,7 @@ void vtime_flush(struct task_struct *tsk) avg_steal = S390_lowcore.avg_steal_timer / 2; if ((s64) steal > 0) { S390_lowcore.steal_timer = 0; - account_steal_time(steal); + account_steal_time(cputime_to_nsecs(steal)); avg_steal += steal; } S390_lowcore.avg_steal_timer = avg_steal; diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 5b8ec1c447e1..02c146f9e5cd 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -150,6 +150,19 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) return 0; } +static int forward_cnt; +static unsigned long cur_slice; + +static int diag9c_forwarding_overrun(void) +{ + /* Reset the count on a new slice */ + if (time_after(jiffies, cur_slice)) { + cur_slice = jiffies; + forward_cnt = diag9c_forwarding_hz / HZ; + } + return forward_cnt-- <= 0 ? 1 : 0; +} + static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) { struct kvm_vcpu *tcpu; @@ -167,9 +180,21 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) if (!tcpu) goto no_yield; - /* target already running */ - if (READ_ONCE(tcpu->cpu) >= 0) - goto no_yield; + /* target guest VCPU already running */ + if (READ_ONCE(tcpu->cpu) >= 0) { + if (!diag9c_forwarding_hz || diag9c_forwarding_overrun()) + goto no_yield; + + /* target host CPU already running */ + if (!vcpu_is_preempted(tcpu->cpu)) + goto no_yield; + smp_yield_cpu(tcpu->cpu); + VCPU_EVENT(vcpu, 5, + "diag time slice end directed to %d: yield forwarded", + tid); + vcpu->stat.diagnose_9c_forward++; + return 0; + } if (kvm_vcpu_yield_to(tcpu) <= 0) goto no_yield; diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 6d6b57059493..b9f85b2dc053 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) * kvm_s390_shadow_tables - walk the guest page table and create shadow tables * @sg: pointer to the shadow guest address space structure * @saddr: faulting address in the shadow gmap - * @pgt: pointer to the page table address result + * @pgt: pointer to the beginning of the page table for the given address if + * successful (return value 0), or to the first invalid DAT entry in + * case of exceptions (return value > 0) * @fake: pgt references contiguous guest memory block, not a pgtable */ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, @@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, rfte.val = ptr; goto shadow_r2t; } + *pgt = ptr + vaddr.rfx * 8; rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); if (rc) return rc; @@ -1060,6 +1063,7 @@ shadow_r2t: rste.val = ptr; goto shadow_r3t; } + *pgt = ptr + vaddr.rsx * 8; rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); if (rc) return rc; @@ -1087,6 +1091,7 @@ shadow_r3t: rtte.val = ptr; goto shadow_sgt; } + *pgt = ptr + vaddr.rtx * 8; rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); if (rc) return rc; @@ -1123,6 +1128,7 @@ shadow_sgt: ste.val = ptr; goto shadow_pgt; } + *pgt = ptr + vaddr.sx * 8; rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val); if (rc) return rc; @@ -1157,6 +1163,8 @@ shadow_pgt: * @vcpu: virtual cpu * @sg: pointer to the shadow guest address space structure * @saddr: faulting address in the shadow gmap + * @datptr: will contain the address of the faulting DAT table entry, or of + * the valid leaf, plus some flags * * Returns: - 0 if the shadow fault was successfully resolved * - > 0 (pgm exception code) on exceptions while faulting @@ -1165,11 +1173,11 @@ shadow_pgt: * - -ENOMEM if out of memory */ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, - unsigned long saddr) + unsigned long saddr, unsigned long *datptr) { union vaddress vaddr; union page_table_entry pte; - unsigned long pgt; + unsigned long pgt = 0; int dat_protection, fake; int rc; @@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, pte.val = pgt + vaddr.px * PAGE_SIZE; goto shadow_page; } - if (!rc) - rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); + + switch (rc) { + case PGM_SEGMENT_TRANSLATION: + case PGM_REGION_THIRD_TRANS: + case PGM_REGION_SECOND_TRANS: + case PGM_REGION_FIRST_TRANS: + pgt |= PEI_NOT_PTE; + break; + case 0: + pgt += vaddr.px * 8; + rc = gmap_read_table(sg->parent, pgt, &pte.val); + } + if (datptr) + *datptr = pgt | dat_protection * PEI_DAT_PROT; if (!rc && pte.i) rc = PGM_PAGE_TRANSLATION; if (!rc && pte.z) diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index f4c51756c462..7c72a5e3449f 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,17 +18,14 @@ /** * kvm_s390_real_to_abs - convert guest real address to guest absolute address - * @vcpu - guest virtual cpu + * @prefix - guest prefix * @gra - guest real address * * Returns the guest absolute address that corresponds to the passed guest real - * address @gra of a virtual guest cpu by applying its prefix. + * address @gra of by applying the given prefix. */ -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, - unsigned long gra) +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra) { - unsigned long prefix = kvm_s390_get_prefix(vcpu); - if (gra < 2 * PAGE_SIZE) gra += prefix; else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) @@ -37,6 +34,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, } /** + * kvm_s390_real_to_abs - convert guest real address to guest absolute address + * @vcpu - guest virtual cpu + * @gra - guest real address + * + * Returns the guest absolute address that corresponds to the passed guest real + * address @gra of a virtual guest cpu by applying its prefix. + */ +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, + unsigned long gra) +{ + return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra); +} + +/** + * _kvm_s390_logical_to_effective - convert guest logical to effective address + * @psw: psw of the guest + * @ga: guest logical address + * + * Convert a guest logical address to an effective address by applying the + * rules of the addressing mode defined by bits 31 and 32 of the given PSW + * (extendended/basic addressing mode). + * + * Depending on the addressing mode, the upper 40 bits (24 bit addressing + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing + * mode) of @ga will be zeroed and the remaining bits will be returned. + */ +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw, + unsigned long ga) +{ + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) + return ga; + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) + return ga & ((1UL << 31) - 1); + return ga & ((1UL << 24) - 1); +} + +/** * kvm_s390_logical_to_effective - convert guest logical to effective address * @vcpu: guest virtual cpu * @ga: guest logical address @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, unsigned long ga) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; - - if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) - return ga; - if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) - return ga & ((1UL << 31) - 1); - return ga & ((1UL << 24) - 1); + return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga); } /* @@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); +/* MVPG PEI indication bits */ +#define PEI_DAT_PROT 2 +#define PEI_NOT_PTE 4 + int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow, - unsigned long saddr); + unsigned long saddr, unsigned long *datptr); #endif /* __KVM_S390_GACCESS_H */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2f09e9d7dc95..1296fc10f80c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -158,6 +158,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("instruction_diag_44", diagnose_44), VCPU_STAT("instruction_diag_9c", diagnose_9c), VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored), + VCPU_STAT("diag_9c_forward", diagnose_9c_forward), VCPU_STAT("instruction_diag_258", diagnose_258), VCPU_STAT("instruction_diag_308", diagnose_308), VCPU_STAT("instruction_diag_500", diagnose_500), @@ -185,6 +186,11 @@ static bool use_gisa = true; module_param(use_gisa, bool, 0644); MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it."); +/* maximum diag9c forwarding per second */ +unsigned int diag9c_forwarding_hz; +module_param(diag9c_forwarding_hz, uint, 0644); +MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off"); + /* * For now we handle at most 16 double words as this is what the s390 base * kernel handles and stores in the prefix page. If we ever need to go beyond @@ -544,6 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_DIAG318: r = 1; break; + case KVM_CAP_SET_GUEST_DEBUG2: + r = KVM_GUESTDBG_VALID_MASK; + break; case KVM_CAP_S390_HPAGE_1M: r = 0; if (hpage && !kvm_is_ucontrol(kvm)) @@ -4307,16 +4316,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu) kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; if (MACHINE_HAS_GS) { + preempt_disable(); __ctl_set_bit(2, 4); if (vcpu->arch.gs_enabled) save_gs_cb(current->thread.gs_cb); - preempt_disable(); current->thread.gs_cb = vcpu->arch.host_gscb; restore_gs_cb(vcpu->arch.host_gscb); - preempt_enable(); if (!vcpu->arch.host_gscb) __ctl_clear_bit(2, 4); vcpu->arch.host_gscb = NULL; + preempt_enable(); } /* SIE will save etoken directly into SDNX and therefore kvm_run */ } @@ -4542,7 +4551,7 @@ int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) /* * As we are starting a second VCPU, we have to disable * the IBS facility on all VCPUs to remove potentially - * oustanding ENABLE requests. + * outstanding ENABLE requests. */ __disable_ibs_on_all_vcpus(vcpu->kvm); } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 79dcd647b378..9fad25109b0d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -471,4 +471,12 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, * @kvm: the KVM guest */ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm); + +/** + * diag9c_forwarding_hz + * + * Set the maximum number of diag9c forwarding per second + */ +extern unsigned int diag9c_forwarding_hz; + #endif diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index bd803e091918..4002a24bc43a 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) memcpy((void *)((u64)scb_o + 0xc0), (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0); break; - case ICPT_PARTEXEC: - /* MVPG only */ - memcpy((void *)((u64)scb_o + 0xc0), - (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0); - break; } if (scb_s->ihcpu != 0xffffU) @@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) /* with mso/msl, the prefix lies at offset *mso* */ prefix += scb_s->mso; - rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); + rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL); if (!rc && (scb_s->ecb & ECB_TE)) rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, - prefix + PAGE_SIZE); + prefix + PAGE_SIZE, NULL); /* * We don't have to mprotect, we will be called for all unshadows. * SIE will detect if protection applies and trigger a validity. @@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) current->thread.gmap_addr, 1); rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, - current->thread.gmap_addr); + current->thread.gmap_addr, NULL); if (rc > 0) { rc = inject_fault(vcpu, rc, current->thread.gmap_addr, @@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu, { if (vsie_page->fault_addr) kvm_s390_shadow_fault(vcpu, vsie_page->gmap, - vsie_page->fault_addr); + vsie_page->fault_addr, NULL); vsie_page->fault_addr = 0; } @@ -984,6 +979,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } /* + * Get a register for a nested guest. + * @vcpu the vcpu of the guest + * @vsie_page the vsie_page for the nested guest + * @reg the register number, the upper 4 bits are ignored. + * returns: the value of the register. + */ +static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg) +{ + /* no need to validate the parameter and/or perform error handling */ + reg &= 0xf; + switch (reg) { + case 15: + return vsie_page->scb_s.gg15; + case 14: + return vsie_page->scb_s.gg14; + default: + return vcpu->run->s.regs.gprs[reg]; + } +} + +static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) +{ + struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; + unsigned long pei_dest, pei_src, src, dest, mask, prefix; + u64 *pei_block = &vsie_page->scb_o->mcic; + int edat, rc_dest, rc_src; + union ctlreg0 cr0; + + cr0.val = vcpu->arch.sie_block->gcr[0]; + edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); + mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK); + prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; + + dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask; + dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso; + src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask; + src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso; + + rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest); + rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src); + /* + * Either everything went well, or something non-critical went wrong + * e.g. because of a race. In either case, simply retry. + */ + if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) { + retry_vsie_icpt(vsie_page); + return -EAGAIN; + } + /* Something more serious went wrong, propagate the error */ + if (rc_dest < 0) + return rc_dest; + if (rc_src < 0) + return rc_src; + + /* The only possible suppressing exception: just deliver it */ + if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) { + clear_vsie_icpt(vsie_page); + rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC); + WARN_ON_ONCE(rc_dest); + return 1; + } + + /* + * Forward the PEI intercept to the guest if it was a page fault, or + * also for segment and region table faults if EDAT applies. + */ + if (edat) { + rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0; + rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0; + } else { + rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0; + rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0; + } + if (!rc_dest && !rc_src) { + pei_block[0] = pei_dest; + pei_block[1] = pei_src; + return 1; + } + + retry_vsie_icpt(vsie_page); + + /* + * The host has edat, and the guest does not, or it was an ASCE type + * exception. The host needs to inject the appropriate DAT interrupts + * into the guest. + */ + if (rc_dest) + return inject_fault(vcpu, rc_dest, dest, 1); + return inject_fault(vcpu, rc_src, src, 0); +} + +/* * Run the vsie on a shadow scb and a shadow gmap, without any further * sanity checks, handling SIE faults. * @@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if ((scb_s->ipa & 0xf000) != 0xf000) scb_s->ipa += 0x1000; break; + case ICPT_PARTEXEC: + if (scb_s->ipa == 0xb254) + rc = vsie_handle_mvpg(vcpu, vsie_page); + break; } return rc; } diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index dcd8946255be..2f32802f79ce 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -64,8 +64,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, break; if (state.reliable && !addr) { pr_err("unwind state reliable but addr is 0\n"); - kfree(bt); - return -EINVAL; + ret = -EINVAL; + break; } sprint_symbol(sym, addr); if (bt_pos < BT_BUF_SIZE) { @@ -296,19 +296,22 @@ static int test_unwind_flags(int flags) static int test_unwind_init(void) { - int ret = 0; + int failed = 0; + int total = 0; #define TEST(flags) \ do { \ pr_info("[ RUN ] " #flags "\n"); \ + total++; \ if (!test_unwind_flags((flags))) { \ pr_info("[ OK ] " #flags "\n"); \ } else { \ pr_err("[ FAILED ] " #flags "\n"); \ - ret = -EINVAL; \ + failed++; \ } \ } while (0) + pr_info("running stack unwinder tests"); TEST(UWM_DEFAULT); TEST(UWM_SP); TEST(UWM_REGS); @@ -335,8 +338,14 @@ do { \ TEST(UWM_PGM | UWM_SP | UWM_REGS); #endif #undef TEST + if (failed) { + pr_err("%d of %d stack unwinder tests failed", failed, total); + WARN(1, "%d of %d stack unwinder tests failed", failed, total); + } else { + pr_info("all %d stack unwinder tests passed", total); + } - return ret; + return failed ? -EINVAL : 0; } static void test_unwind_exit(void) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e30c7c781172..826d01777361 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -783,6 +783,7 @@ early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */ #if IS_ENABLED(CONFIG_PGSTE) + void do_secure_storage_access(struct pt_regs *regs) { unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; @@ -859,19 +860,4 @@ void do_secure_storage_violation(struct pt_regs *regs) send_sig(SIGSEGV, current, 0); } -#else -void do_secure_storage_access(struct pt_regs *regs) -{ - default_trap_handler(regs); -} - -void do_non_secure_storage_access(struct pt_regs *regs) -{ - default_trap_handler(regs); -} - -void do_secure_storage_violation(struct pt_regs *regs) -{ - default_trap_handler(regs); -} -#endif +#endif /* CONFIG_PGSTE */ diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0e76b2127dc6..8ac710de1ab1 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -209,8 +209,6 @@ void __init mem_init(void) setup_zero_pages(); /* Setup zeroed pages. */ cmma_init_nodat(); - - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 567c69f3069e..7f0e154a470a 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) next = pmd_addr_end(addr, end); if (pmd_none(*pmd) || pmd_large(*pmd)) continue; - page = virt_to_page(pmd_val(*pmd)); + page = phys_to_page(pmd_val(*pmd)); set_bit(PG_arch_1, &page->flags); } while (pmd++, addr = next, addr != end); } @@ -130,7 +130,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) if (pud_none(*pud) || pud_large(*pud)) continue; if (!pud_folded(*pud)) { - page = virt_to_page(pud_val(*pud)); + page = phys_to_page(pud_val(*pud)); for (i = 0; i < 3; i++) set_bit(PG_arch_1, &page[i].flags); } @@ -151,7 +151,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) if (p4d_none(*p4d)) continue; if (!p4d_folded(*p4d)) { - page = virt_to_page(p4d_val(*p4d)); + page = phys_to_page(p4d_val(*p4d)); for (i = 0; i < 3; i++) set_bit(PG_arch_1, &page[i].flags); } @@ -173,7 +173,7 @@ static void mark_kernel_pgd(void) if (pgd_none(*pgd)) continue; if (!pgd_folded(*pgd)) { - page = virt_to_page(pgd_val(*pgd)); + page = phys_to_page(pgd_val(*pgd)); for (i = 0; i < 3; i++) set_bit(PG_arch_1, &page[i].flags); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index f973e2ead197..63cae0476bb4 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1209,21 +1209,67 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_STX | BPF_ATOMIC | BPF_W: - if (insn->imm != BPF_ADD) { + { + bool is32 = BPF_SIZE(insn->code) == BPF_W; + + switch (insn->imm) { +/* {op32|op64} {%w0|%src},%src,off(%dst) */ +#define EMIT_ATOMIC(op32, op64) do { \ + EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ + (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ + src_reg, dst_reg, off); \ + if (is32 && (insn->imm & BPF_FETCH)) \ + EMIT_ZERO(src_reg); \ +} while (0) + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + /* {laal|laalg} */ + EMIT_ATOMIC(0x00fa, 0x00ea); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + /* {lan|lang} */ + EMIT_ATOMIC(0x00f4, 0x00e4); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + /* {lao|laog} */ + EMIT_ATOMIC(0x00f6, 0x00e6); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + /* {lax|laxg} */ + EMIT_ATOMIC(0x00f7, 0x00e7); + break; +#undef EMIT_ATOMIC + case BPF_XCHG: + /* {ly|lg} %w0,off(%dst) */ + EMIT6_DISP_LH(0xe3000000, + is32 ? 0x0058 : 0x0004, REG_W0, REG_0, + dst_reg, off); + /* 0: {csy|csg} %w0,%src,off(%dst) */ + EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, + REG_W0, src_reg, dst_reg, off); + /* brc 4,0b */ + EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6); + /* {llgfr|lgr} %src,%w0 */ + EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); + if (is32 && insn_is_zext(&insn[1])) + insn_count = 2; + break; + case BPF_CMPXCHG: + /* 0: {csy|csg} %b0,%src,off(%dst) */ + EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, + BPF_REG_0, src_reg, dst_reg, off); + break; + default: pr_err("Unknown atomic operation %02x\n", insn->imm); return -1; } - /* *(u32/u64 *)(dst + off) += src - * - * BFW_W: laal %w0,%src,off(%dst) - * BPF_DW: laalg %w0,%src,off(%dst) - */ - EMIT6_DISP_LH(0xeb000000, - BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea, - REG_W0, src_reg, dst_reg, off); jit->seen |= SEEN_MEM; break; + } /* * BPF_LDX */ diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 600881d894dd..b0993e05affe 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -538,6 +538,7 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, zdev->bars[i].res = res; pci_add_resource(resources, res); } + zdev->has_resources = 1; return 0; } @@ -554,6 +555,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) release_resource(zdev->bars[i].res); kfree(zdev->bars[i].res); } + zdev->has_resources = 0; } int pcibios_add_device(struct pci_dev *pdev) @@ -661,7 +663,6 @@ int zpci_enable_device(struct zpci_dev *zdev) if (rc) goto out_dma; - zdev->state = ZPCI_FN_STATE_ONLINE; return 0; out_dma: @@ -669,7 +670,6 @@ out_dma: out: return rc; } -EXPORT_SYMBOL_GPL(zpci_enable_device); int zpci_disable_device(struct zpci_dev *zdev) { @@ -680,20 +680,6 @@ int zpci_disable_device(struct zpci_dev *zdev) */ return clp_disable_fh(zdev); } -EXPORT_SYMBOL_GPL(zpci_disable_device); - -void zpci_remove_device(struct zpci_dev *zdev) -{ - struct zpci_bus *zbus = zdev->zbus; - struct pci_dev *pdev; - - pdev = pci_get_slot(zbus->bus, zdev->devfn); - if (pdev) { - if (pdev->is_virtfn) - return zpci_iov_remove_virtfn(pdev, zdev->vfn); - pci_stop_and_remove_bus_device_locked(pdev); - } -} /** * zpci_create_device() - Create a new zpci_dev and add it to the zbus @@ -704,9 +690,9 @@ void zpci_remove_device(struct zpci_dev *zdev) * Creates a new zpci device and adds it to its, possibly newly created, zbus * as well as zpci_list. * - * Returns: 0 on success, an error value otherwise + * Returns: the zdev on success or an error pointer otherwise */ -int zpci_create_device(u32 fid, u32 fh, enum zpci_state state) +struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state) { struct zpci_dev *zdev; int rc; @@ -714,7 +700,7 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state) zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); if (!zdev) - return -ENOMEM; + return ERR_PTR(-ENOMEM); /* FID and Function Handle are the static/dynamic identifiers */ zdev->fid = fid; @@ -733,44 +719,105 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state) if (rc) goto error; - if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { - rc = zpci_enable_device(zdev); - if (rc) - goto error_destroy_iommu; - } - rc = zpci_bus_device_register(zdev, &pci_root_ops); if (rc) - goto error_disable; + goto error_destroy_iommu; spin_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); spin_unlock(&zpci_list_lock); - return 0; + return zdev; -error_disable: - if (zdev->state == ZPCI_FN_STATE_ONLINE) - zpci_disable_device(zdev); error_destroy_iommu: zpci_destroy_iommu(zdev); error: zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc); kfree(zdev); + return ERR_PTR(rc); +} + +/** + * zpci_scan_configured_device() - Scan a freshly configured zpci_dev + * @zdev: The zpci_dev to be configured + * @fh: The general function handle supplied by the platform + * + * Given a device in the configuration state Configured, enables, scans and + * adds it to the common code PCI subsystem if possible. If the PCI device is + * parked because we can not yet create a PCI bus because we have not seen + * function 0, it is ignored but will be scanned once function 0 appears. + * If any failure occurs, the zpci_dev is left disabled. + * + * Return: 0 on success, or an error code otherwise + */ +int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh) +{ + int rc; + + zdev->fh = fh; + /* the PCI function will be scanned once function 0 appears */ + if (!zdev->zbus->bus) + return 0; + + /* For function 0 on a multi-function bus scan whole bus as we might + * have to pick up existing functions waiting for it to allow creating + * the PCI bus + */ + if (zdev->devfn == 0 && zdev->zbus->multifunction) + rc = zpci_bus_scan_bus(zdev->zbus); + else + rc = zpci_bus_scan_device(zdev); + return rc; } +/** + * zpci_deconfigure_device() - Deconfigure a zpci_dev + * @zdev: The zpci_dev to configure + * + * Deconfigure a zPCI function that is currently configured and possibly known + * to the common code PCI subsystem. + * If any failure occurs the device is left as is. + * + * Return: 0 on success, or an error code otherwise + */ +int zpci_deconfigure_device(struct zpci_dev *zdev) +{ + int rc; + + if (zdev->zbus->bus) + zpci_bus_remove_device(zdev, false); + + if (zdev_enabled(zdev)) { + rc = zpci_disable_device(zdev); + if (rc) + return rc; + } + + rc = sclp_pci_deconfigure(zdev->fid); + zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc); + if (rc) + return rc; + zdev->state = ZPCI_FN_STATE_STANDBY; + + return 0; +} + void zpci_release_device(struct kref *kref) { struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); + int ret; if (zdev->zbus->bus) - zpci_remove_device(zdev); + zpci_bus_remove_device(zdev, false); + + if (zdev_enabled(zdev)) + zpci_disable_device(zdev); switch (zdev->state) { - case ZPCI_FN_STATE_ONLINE: case ZPCI_FN_STATE_CONFIGURED: - zpci_disable_device(zdev); + ret = sclp_pci_deconfigure(zdev->fid); + zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret); fallthrough; case ZPCI_FN_STATE_STANDBY: if (zdev->has_hp_slot) @@ -905,6 +952,7 @@ static int __init pci_base_init(void) rc = clp_scan_pci_devices(); if (rc) goto out_find; + zpci_bus_scan_busses(); s390_pci_initialized = 1; return 0; diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 755b46f4c595..9629f9779c79 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -27,28 +27,184 @@ #include "pci_iov.h" static LIST_HEAD(zbus_list); -static DEFINE_SPINLOCK(zbus_list_lock); +static DEFINE_MUTEX(zbus_list_lock); static int zpci_nb_devices; -/* zpci_bus_scan +/* zpci_bus_prepare_device - Prepare a zPCI function for scanning + * @zdev: the zPCI function to be prepared + * + * The PCI resources for the function are set up and added to its zbus and the + * function is enabled. The function must be added to a zbus which must have + * a PCI bus created. If an error occurs the zPCI function is not enabled. + * + * Return: 0 on success, an error code otherwise + */ +static int zpci_bus_prepare_device(struct zpci_dev *zdev) +{ + struct resource_entry *window, *n; + struct resource *res; + int rc; + + if (!zdev_enabled(zdev)) { + rc = zpci_enable_device(zdev); + if (rc) + return rc; + } + + if (!zdev->has_resources) { + zpci_setup_bus_resources(zdev, &zdev->zbus->resources); + resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) { + res = window->res; + pci_bus_add_resource(zdev->zbus->bus, res, 0); + } + } + + return 0; +} + +/* zpci_bus_scan_device - Scan a single device adding it to the PCI core + * @zdev: the zdev to be scanned + * + * Scans the PCI function making it available to the common PCI code. + * + * Return: 0 on success, an error value otherwise + */ +int zpci_bus_scan_device(struct zpci_dev *zdev) +{ + struct pci_dev *pdev; + int rc; + + rc = zpci_bus_prepare_device(zdev); + if (rc) + return rc; + + pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn); + if (!pdev) + return -ENODEV; + + pci_bus_add_device(pdev); + pci_lock_rescan_remove(); + pci_bus_add_devices(zdev->zbus->bus); + pci_unlock_rescan_remove(); + + return 0; +} + +/* zpci_bus_remove_device - Removes the given zdev from the PCI core + * @zdev: the zdev to be removed from the PCI core + * @set_error: if true the device's error state is set to permanent failure + * + * Sets a zPCI device to a configured but offline state; the zPCI + * device is still accessible through its hotplug slot and the zPCI + * API but is removed from the common code PCI bus, making it + * no longer available to drivers. + */ +void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error) +{ + struct zpci_bus *zbus = zdev->zbus; + struct pci_dev *pdev; + + if (!zdev->zbus->bus) + return; + + pdev = pci_get_slot(zbus->bus, zdev->devfn); + if (pdev) { + if (set_error) + pdev->error_state = pci_channel_io_perm_failure; + if (pdev->is_virtfn) { + zpci_iov_remove_virtfn(pdev, zdev->vfn); + /* balance pci_get_slot */ + pci_dev_put(pdev); + return; + } + pci_stop_and_remove_bus_device_locked(pdev); + /* balance pci_get_slot */ + pci_dev_put(pdev); + } +} + +/* zpci_bus_scan_bus - Scan all configured zPCI functions on the bus + * @zbus: the zbus to be scanned + * + * Enables and scans all PCI functions on the bus making them available to the + * common PCI code. If there is no function 0 on the zbus nothing is scanned. If + * a function does not have a slot yet because it was added to the zbus before + * function 0 the slot is created. If a PCI function fails to be initialized + * an error will be returned but attempts will still be made for all other + * functions on the bus. + * + * Return: 0 on success, an error value otherwise + */ +int zpci_bus_scan_bus(struct zpci_bus *zbus) +{ + struct zpci_dev *zdev; + int devfn, rc, ret = 0; + + if (!zbus->function[0]) + return 0; + + for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) { + zdev = zbus->function[devfn]; + if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) { + rc = zpci_bus_prepare_device(zdev); + if (rc) + ret = -EIO; + } + } + + pci_lock_rescan_remove(); + pci_scan_child_bus(zbus->bus); + pci_bus_add_devices(zbus->bus); + pci_unlock_rescan_remove(); + + return ret; +} + +/* zpci_bus_scan_busses - Scan all registered busses + * + * Scan all available zbusses + * + */ +void zpci_bus_scan_busses(void) +{ + struct zpci_bus *zbus = NULL; + + mutex_lock(&zbus_list_lock); + list_for_each_entry(zbus, &zbus_list, bus_next) { + zpci_bus_scan_bus(zbus); + cond_resched(); + } + mutex_unlock(&zbus_list_lock); +} + +/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus * @zbus: the zbus holding the zdevices + * @f0: function 0 of the bus * @ops: the pci operations * - * The domain number must be set before pci_scan_root_bus is called. - * This function can be called once the domain is known, hence - * when the function_0 is dicovered. + * Function zero is taken as a parameter as this is used to determine the + * domain, multifunction property and maximum bus speed of the entire bus. + * + * Return: 0 on success, an error code otherwise */ -static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops) +static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *f0, struct pci_ops *ops) { struct pci_bus *bus; - int rc; + int domain; - rc = zpci_alloc_domain(domain); - if (rc < 0) - return rc; - zbus->domain_nr = rc; + domain = zpci_alloc_domain((u16)f0->uid); + if (domain < 0) + return domain; + + zbus->domain_nr = domain; + zbus->multifunction = f0->rid_available; + zbus->max_bus_speed = f0->max_bus_speed; - bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources); + /* + * Note that the zbus->resources are taken over and zbus->resources + * is empty after a successful call + */ + bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources); if (!bus) { zpci_free_domain(zbus->domain_nr); return -EFAULT; @@ -56,6 +212,7 @@ static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops) zbus->bus = bus; pci_bus_add_devices(bus); + return 0; } @@ -74,9 +231,9 @@ static void zpci_bus_release(struct kref *kref) pci_unlock_rescan_remove(); } - spin_lock(&zbus_list_lock); + mutex_lock(&zbus_list_lock); list_del(&zbus->bus_next); - spin_unlock(&zbus_list_lock); + mutex_unlock(&zbus_list_lock); kfree(zbus); } @@ -89,7 +246,7 @@ static struct zpci_bus *zpci_bus_get(int pchid) { struct zpci_bus *zbus; - spin_lock(&zbus_list_lock); + mutex_lock(&zbus_list_lock); list_for_each_entry(zbus, &zbus_list, bus_next) { if (pchid == zbus->pchid) { kref_get(&zbus->kref); @@ -98,7 +255,7 @@ static struct zpci_bus *zpci_bus_get(int pchid) } zbus = NULL; out_unlock: - spin_unlock(&zbus_list_lock); + mutex_unlock(&zbus_list_lock); return zbus; } @@ -112,9 +269,9 @@ static struct zpci_bus *zpci_bus_alloc(int pchid) zbus->pchid = pchid; INIT_LIST_HEAD(&zbus->bus_next); - spin_lock(&zbus_list_lock); + mutex_lock(&zbus_list_lock); list_add_tail(&zbus->bus_next, &zbus_list); - spin_unlock(&zbus_list_lock); + mutex_unlock(&zbus_list_lock); kref_init(&zbus->kref); INIT_LIST_HEAD(&zbus->resources); @@ -141,53 +298,77 @@ void pcibios_bus_add_device(struct pci_dev *pdev) } } -static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev) +/* zpci_bus_create_hotplug_slots - Add hotplug slot(s) for device added to bus + * @zdev: the zPCI device that was newly added + * + * Add the hotplug slot(s) for the newly added PCI function. Normally this is + * simply the slot for the function itself. If however we are adding the + * function 0 on a zbus, it might be that we already registered functions on + * that zbus but could not create their hotplug slots yet so add those now too. + * + * Return: 0 on success, an error code otherwise + */ +static int zpci_bus_create_hotplug_slots(struct zpci_dev *zdev) { - struct pci_bus *bus; - struct resource_entry *window, *n; - struct resource *res; - struct pci_dev *pdev; - int rc; - - bus = zbus->bus; - if (!bus) - return -EINVAL; - - pdev = pci_get_slot(bus, zdev->devfn); - if (pdev) { - /* Device is already known. */ - pci_dev_put(pdev); - return 0; - } + struct zpci_bus *zbus = zdev->zbus; + int devfn, rc = 0; rc = zpci_init_slot(zdev); if (rc) return rc; zdev->has_hp_slot = 1; - resource_list_for_each_entry_safe(window, n, &zbus->resources) { - res = window->res; - pci_bus_add_resource(bus, res, 0); - } + if (zdev->devfn == 0 && zbus->multifunction) { + /* Now that function 0 is there we can finally create the + * hotplug slots for those functions with devfn != 0 that have + * been parked in zbus->function[] waiting for us to be able to + * create the PCI bus. + */ + for (devfn = 1; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) { + zdev = zbus->function[devfn]; + if (zdev && !zdev->has_hp_slot) { + rc = zpci_init_slot(zdev); + if (rc) + return rc; + zdev->has_hp_slot = 1; + } + } - pdev = pci_scan_single_device(bus, zdev->devfn); - if (pdev) - pci_bus_add_device(pdev); + } - return 0; + return rc; } -static void zpci_bus_add_devices(struct zpci_bus *zbus) +static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev) { - int i; + int rc = -EINVAL; - for (i = 1; i < ZPCI_FUNCTIONS_PER_BUS; i++) - if (zbus->function[i]) - zpci_bus_add_device(zbus, zbus->function[i]); + zdev->zbus = zbus; + if (zbus->function[zdev->devfn]) { + pr_err("devfn %04x is already assigned\n", zdev->devfn); + return rc; + } + zbus->function[zdev->devfn] = zdev; + zpci_nb_devices++; - pci_lock_rescan_remove(); - pci_bus_add_devices(zbus->bus); - pci_unlock_rescan_remove(); + if (zbus->bus) { + if (zbus->multifunction && !zdev->rid_available) { + WARN_ONCE(1, "rid_available not set for multifunction\n"); + goto error; + } + + zpci_bus_create_hotplug_slots(zdev); + } else { + /* Hotplug slot will be created once function 0 appears */ + zbus->multifunction = 1; + } + + return 0; + +error: + zbus->function[zdev->devfn] = NULL; + zpci_nb_devices--; + return rc; } int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops) @@ -200,7 +381,6 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops) zdev->fid, ZPCI_NR_DEVICES); return -ENOSPC; } - zpci_nb_devices++; if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS) return -EINVAL; @@ -214,51 +394,18 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops) return -ENOMEM; } - zdev->zbus = zbus; - if (zbus->function[zdev->devfn]) { - pr_err("devfn %04x is already assigned\n", zdev->devfn); - goto error; /* rc already set */ - } - zbus->function[zdev->devfn] = zdev; - - zpci_setup_bus_resources(zdev, &zbus->resources); - - if (zbus->bus) { - if (!zbus->multifunction) { - WARN_ONCE(1, "zbus is not multifunction\n"); - goto error_bus; - } - if (!zdev->rid_available) { - WARN_ONCE(1, "rid_available not set for multifunction\n"); - goto error_bus; - } - rc = zpci_bus_add_device(zbus, zdev); - if (rc) - goto error_bus; - } else if (zdev->devfn == 0) { - if (zbus->multifunction && !zdev->rid_available) { - WARN_ONCE(1, "rid_available not set on function 0 for multifunction\n"); - goto error_bus; - } - rc = zpci_bus_scan(zbus, (u16)zdev->uid, ops); - if (rc) - goto error_bus; - zpci_bus_add_devices(zbus); - rc = zpci_init_slot(zdev); + if (zdev->devfn == 0) { + rc = zpci_bus_create_pci_bus(zbus, zdev, ops); if (rc) - goto error_bus; - zdev->has_hp_slot = 1; - zbus->multifunction = zdev->rid_available; - zbus->max_bus_speed = zdev->max_bus_speed; - } else { - zbus->multifunction = 1; + goto error; } + rc = zpci_bus_add_device(zbus, zdev); + if (rc) + goto error; + return 0; -error_bus: - zpci_nb_devices--; - zbus->function[zdev->devfn] = NULL; error: pr_err("Adding PCI function %08x failed\n", zdev->fid); zpci_bus_put(zbus); diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index f8dfac0b5b71..b877a97e6745 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -10,6 +10,12 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops); void zpci_bus_device_unregister(struct zpci_dev *zdev); +int zpci_bus_scan_bus(struct zpci_bus *zbus); +void zpci_bus_scan_busses(void); + +int zpci_bus_scan_device(struct zpci_dev *zdev); +void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); + void zpci_release_device(struct kref *kref); static inline void zpci_zdev_put(struct zpci_dev *zdev) { diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index b4162da4e8a2..cd447b96b4b1 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <asm/pci_debug.h> +#include <asm/pci_dma.h> #include <asm/sclp.h> #include "pci_bus.h" @@ -73,15 +74,24 @@ void zpci_event_error(void *data) __zpci_event_error(data); } +static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) +{ + zdev->fh = fh; + /* Give the driver a hint that the function is + * already unusable. + */ + zpci_bus_remove_device(zdev, true); + /* Even though the device is already gone we still + * need to free zPCI resources as part of the disable. + */ + zpci_disable_device(zdev); + zdev->state = ZPCI_FN_STATE_STANDBY; +} + static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); - struct pci_dev *pdev = NULL; enum zpci_state state; - int ret; - - if (zdev && zdev->zbus->bus) - pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); zpci_err("avail CCDF:\n"); zpci_err_hex(ccdf, sizeof(*ccdf)); @@ -89,70 +99,46 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) switch (ccdf->pec) { case 0x0301: /* Reserved|Standby -> Configured */ if (!zdev) { - zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED); - break; + zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED); + if (IS_ERR(zdev)) + break; + } else { + /* the configuration request may be stale */ + if (zdev->state != ZPCI_FN_STATE_STANDBY) + break; + zdev->state = ZPCI_FN_STATE_CONFIGURED; } - /* the configuration request may be stale */ - if (zdev->state != ZPCI_FN_STATE_STANDBY) - break; - zdev->fh = ccdf->fh; - zdev->state = ZPCI_FN_STATE_CONFIGURED; - ret = zpci_enable_device(zdev); - if (ret) - break; - - /* the PCI function will be scanned once function 0 appears */ - if (!zdev->zbus->bus) - break; - - pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn); - if (!pdev) - break; - - pci_bus_add_device(pdev); - pci_lock_rescan_remove(); - pci_bus_add_devices(zdev->zbus->bus); - pci_unlock_rescan_remove(); + zpci_scan_configured_device(zdev, ccdf->fh); break; case 0x0302: /* Reserved -> Standby */ - if (!zdev) { + if (!zdev) zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY); - break; - } - zdev->fh = ccdf->fh; + else + zdev->fh = ccdf->fh; break; case 0x0303: /* Deconfiguration requested */ - if (!zdev) - break; - if (pdev) - zpci_remove_device(zdev); - - ret = zpci_disable_device(zdev); - if (ret) - break; - - ret = sclp_pci_deconfigure(zdev->fid); - zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret); - if (!ret) - zdev->state = ZPCI_FN_STATE_STANDBY; - + if (zdev) { + /* The event may have been queued before we confirgured + * the device. + */ + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) + break; + zdev->fh = ccdf->fh; + zpci_deconfigure_device(zdev); + } break; case 0x0304: /* Configured -> Standby|Reserved */ - if (!zdev) - break; - if (pdev) { - /* Give the driver a hint that the function is - * already unusable. */ - pdev->error_state = pci_channel_io_perm_failure; - zpci_remove_device(zdev); - } - - zdev->fh = ccdf->fh; - zpci_disable_device(zdev); - zdev->state = ZPCI_FN_STATE_STANDBY; - if (!clp_get_state(ccdf->fid, &state) && - state == ZPCI_FN_STATE_RESERVED) { - zpci_zdev_put(zdev); + if (zdev) { + /* The event may have been queued before we confirgured + * the device.: + */ + if (zdev->state == ZPCI_FN_STATE_CONFIGURED) + zpci_event_hard_deconfigured(zdev, ccdf->fh); + /* The 0x0304 event may immediately reserve the device */ + if (!clp_get_state(zdev->fid, &state) && + state == ZPCI_FN_STATE_RESERVED) { + zpci_zdev_put(zdev); + } } break; case 0x0306: /* 0x308 or 0x302 for multiple devices */ diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index 5c028bee91b9..6e2450c2b9c1 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -131,6 +131,45 @@ static ssize_t report_error_write(struct file *filp, struct kobject *kobj, } static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE); +static ssize_t uid_is_unique_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", zpci_unique_uid ? 1 : 0); +} +static DEVICE_ATTR_RO(uid_is_unique); + +#ifndef CONFIG_DMI +/* analogous to smbios index */ +static ssize_t index_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); + u32 index = ~0; + + if (zpci_unique_uid) + index = zdev->uid; + + return sysfs_emit(buf, "%u\n", index); +} +static DEVICE_ATTR_RO(index); + +static umode_t zpci_index_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + return zpci_unique_uid ? attr->mode : 0; +} + +static struct attribute *zpci_ident_attrs[] = { + &dev_attr_index.attr, + NULL, +}; + +static struct attribute_group zpci_ident_attr_group = { + .attrs = zpci_ident_attrs, + .is_visible = zpci_index_is_visible, +}; +#endif + static struct bin_attribute *zpci_bin_attrs[] = { &bin_attr_util_string, &bin_attr_report_error, @@ -148,8 +187,10 @@ static struct attribute *zpci_dev_attrs[] = { &dev_attr_uid.attr, &dev_attr_recover.attr, &dev_attr_mio_enabled.attr, + &dev_attr_uid_is_unique.attr, NULL, }; + static struct attribute_group zpci_attr_group = { .attrs = zpci_dev_attrs, .bin_attrs = zpci_bin_attrs, @@ -170,5 +211,8 @@ static struct attribute_group pfip_attr_group = { const struct attribute_group *zpci_attr_groups[] = { &zpci_attr_group, &pfip_attr_group, +#ifndef CONFIG_DMI + &zpci_ident_attr_group, +#endif NULL, }; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e798e55915c2..68129537e350 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,6 +2,8 @@ config SUPERH def_bool y select ARCH_32BIT_OFF_T + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU + select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU @@ -101,9 +103,6 @@ config SYS_SUPPORTS_APM_EMULATION bool select ARCH_SUSPEND_POSSIBLE -config SYS_SUPPORTS_HUGETLBFS - bool - config SYS_SUPPORTS_SMP bool @@ -175,12 +174,12 @@ config CPU_SH3 config CPU_SH4 bool + select ARCH_SUPPORTS_HUGETLBFS if MMU select CPU_HAS_INTEVT select CPU_HAS_SR_RB select CPU_HAS_FPU if !CPU_SH4AL_DSP select SH_INTC select SYS_SUPPORTS_SH_TMU - select SYS_SUPPORTS_HUGETLBFS if MMU config CPU_SH4A bool diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 3bcbf52fb30e..44bcb80e791a 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -9,7 +9,7 @@ # License. See the file "COPYING" in the main directory of this archive # for more details. # -ifneq ($(SUBARCH),$(ARCH)) +ifdef cross_compiling ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-) endif diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index ef7cc31997b1..9ee35269bee2 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -23,7 +23,6 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 315b04a8dd2f..601d062250d1 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -71,7 +71,6 @@ CONFIG_SMC91X=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=4 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 99975db461d8..79f02f1c0dc8 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -75,7 +75,6 @@ CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index 2c46c0004780..cbc9389a89a8 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -18,7 +18,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 88193153e51b..ee2357deba0f 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 9b885c14c400..5c725c75fcef 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -66,7 +66,6 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 450b5854d7c6..3b6c7b5b7ec9 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -58,15 +58,16 @@ static inline unsigned long __ffs(unsigned long word) return result; } -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/find.h> + #endif /* __ASM_SH_BITOPS_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 6d5c6463bc07..cf9a3ec32406 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -283,11 +283,6 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #define ARCH_HAS_VALID_PHYS_ADDR_RANGE int valid_phys_addr_range(phys_addr_t addr, size_t size); int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 360f713d009b..aeb8915e9254 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -4,12 +4,11 @@ #ifndef __ASSEMBLY__ #include <linux/pagemap.h> +#include <asm-generic/tlb.h> #ifdef CONFIG_MMU #include <linux/swap.h> -#include <asm-generic/tlb.h> - #if defined(CONFIG_CPU_SH4) extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); extern void tlb_unwire_entry(void); @@ -24,12 +23,7 @@ static inline void tlb_unwire_entry(void) { BUG(); } -#endif - -#else /* CONFIG_MMU */ - -#include <asm-generic/tlb.h> - +#endif /* CONFIG_CPU_SH4 */ #endif /* CONFIG_MMU */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLB_H */ diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 0646c5961846..295c43315bbe 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -67,7 +67,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) * Modifying code must take extra care. On an SMP machine, if * the code being modified is also being executed on another CPU * that CPU will have undefined results and possibly take a GPF. - * We use kstop_machine to stop other CPUS from exectuing code. + * We use kstop_machine to stop other CPUS from executing code. * But this does not stop NMIs from happening. We still need * to protect against that. We separate out the modification of * the code to take care of this. diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 445e3ece4c23..1d2507f22437 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -57,24 +57,6 @@ static inline int sh_pmu_initialized(void) return !!sh_pmu; } -const char *perf_pmu_name(void) -{ - if (!sh_pmu) - return NULL; - - return sh_pmu->name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - if (!sh_pmu) - return 0; - - return sh_pmu->num_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - /* * Release the PMU if this is the last perf_event. */ diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile index 285aaba832d9..6713c65a25e1 100644 --- a/arch/sh/kernel/syscalls/Makefile +++ b/arch/sh/kernel/syscalls/Makefile @@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index d08eebad6b7f..f47a0dc55445 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -445,3 +445,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/sh/kernel/syscalls/syscallhdr.sh b/arch/sh/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 4c0519861e97..000000000000 --- a/arch/sh/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_SH_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 904b8e6e625d..000000000000 --- a/arch/sh/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 77aa2f802d8d..d551a9cac41e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -136,14 +136,6 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on SPARSEMEM && MMU - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - depends on SPARSEMEM && MMU - config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index ddfa9685f1ef..72c2e1b46c08 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -16,6 +16,7 @@ #include <linux/mutex.h> #include <linux/fs.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/mmu_context.h> #include <asm/cache_insns.h> #include <asm/cacheflush.h> diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 4c67b3d88775..9b63a53a5e46 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -13,6 +13,7 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 0db6919af8d3..168d7d4dd735 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -359,7 +359,6 @@ void __init mem_init(void) vsyscall_init(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 12a4fb0bd52a..18099099583e 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -122,7 +122,6 @@ CONFIG_INPUT_SPARCSPKR=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_PCIPS2=m CONFIG_SERIO_RAW=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SUNSU=y CONFIG_SERIAL_SUNSU_CONSOLE=y CONFIG_SERIAL_SUNSAB=y diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index aec20406145e..0b9d98ced34a 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h generic-y += export.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index d3aa1a524431..e284394cb3aa 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -17,7 +17,7 @@ void _mcount(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9bb27e5c22f1..5ffa820dcd4d 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -409,6 +409,10 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) #define ioremap_uc(X,Y) ioremap((X),(Y)) #define ioremap_wc(X,Y) ioremap((X),(Y)) #define ioremap_wt(X,Y) ioremap((X),(Y)) +static inline void __iomem *ioremap_np(unsigned long offset, unsigned long size) +{ + return NULL; +} static inline void iounmap(volatile void __iomem *addr) { @@ -450,11 +454,6 @@ void sbus_set_sbus64(struct device *, int); */ #define xlate_dev_mem_ptr(p) __va(p) -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #endif #endif /* !(__SPARC64_IO_H) */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 632cdb959542..a5cf79c149fe 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -321,6 +321,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) pgprot_val(newprot)); } +/* only used by the huge vmap code, should never be called */ +#define pud_page(pud) NULL + struct seq_file; void mmu_info(struct seq_file *m); diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile index 283f64407b07..0f2ea5bcb0d7 100644 --- a/arch/sparc/kernel/syscalls/Makefile +++ b/arch/sparc/kernel/syscalls/Makefile @@ -6,46 +6,34 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ -syshdr_abis_unistd_32 := common,32 +$(uapi)/unistd_32.h: abis := common,32 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abis_unistd_64 := common,64 +$(uapi)/unistd_64.h: abis := common,64 $(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -systbl_abis_syscall_table_32 := common,32 +$(kapi)/syscall_table_32.h: abis := common,32 $(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_64 := common,64 +$(kapi)/syscall_table_64.h: abis := common,64 $(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 84403a99039c..b9e1c0e735b7 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -488,3 +488,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/sparc/kernel/syscalls/syscallhdr.sh b/arch/sparc/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index cf50a75cc0bb..000000000000 --- a/arch/sparc/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_SPARC_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/sparc/kernel/syscalls/syscalltbl.sh b/arch/sparc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 77cf0143ba19..000000000000 --- a/arch/sparc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_nis_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index ab9e4d57685a..3aaffa017706 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -9,10 +9,10 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) */ -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, entry) .long entry .data .align 4 .globl sys_call_table sys_call_table: #include <asm/syscall_table_32.h> /* 32-bit native syscalls */ -#undef __SYSCALL diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index a27394bf7d7f..398fe449dd34 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -10,18 +10,20 @@ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) */ -#define __SYSCALL(nr, entry, nargs) .word entry +#define __SYSCALL(nr, entry) .word entry .text .align 4 #ifdef CONFIG_COMPAT .globl sys_call_table32 sys_call_table32: -#include <asm/syscall_table_c32.h> /* Compat syscalls */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) +#include <asm/syscall_table_32.h> /* Compat syscalls */ +#undef __SYSCALL_WITH_COMPAT #endif /* CONFIG_COMPAT */ .align 4 .globl sys_call_table64, sys_call_table sys_call_table64: sys_call_table: +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #include <asm/syscall_table_64.h> /* 64-bit native syscalls */ -#undef __SYSCALL diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..04d8790f6c32 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6139c5700ccc..1e9f577f084d 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -292,8 +292,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - - mem_init_print_info(NULL); } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 182bb7bdaa0a..e454f179cf5d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2520,7 +2520,6 @@ void __init mem_init(void) } mark_page_reserved(mem_map_zero); - mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 20ee14739333..9a725547578e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/um/Kconfig b/arch/um/Kconfig index c3030db3325f..57cfd9a1c082 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -5,6 +5,7 @@ menu "UML-specific options" config UML bool default y + select ARCH_EPHEMERAL_INODES select ARCH_HAS_KCOV select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug index 315d368e63ad..1dfb2959c73b 100644 --- a/arch/um/Kconfig.debug +++ b/arch/um/Kconfig.debug @@ -17,6 +17,7 @@ config GCOV bool "Enable gcov support" depends on DEBUG_INFO depends on !KCOV + depends on !MODULES help This option allows developers to retrieve coverage data from a UML session. diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index 103adac691ed..9a67c017000f 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h @@ -24,10 +24,3 @@ extern void cow_sizes(int version, __u64 size, int sectorsize, int align, int *data_offset_out); #endif - -/* - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index d35d3f305a31..5b064d360cb7 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c @@ -122,13 +122,11 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer, static __poll_t hostaudio_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t mask = 0; - #ifdef DEBUG printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n"); #endif - return mask; + return 0; } static long hostaudio_ioctl(struct file *file, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 47a02e60898d..d27a2a9faf3e 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -8,7 +8,6 @@ * Copyright (C) 2001 by various other people who didn't put their name here. */ -#include <linux/version.h> #include <linux/memblock.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index def376194dce..b9e20bbe2f75 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -302,7 +302,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,ptep) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do {} while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 5) & 0x1f) diff --git a/arch/um/include/uapi/asm/Kbuild b/arch/um/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..f66554cd5c45 --- /dev/null +++ b/arch/um/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 5aa882011e04..e698e0c7dbdc 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o obj-$(CONFIG_GPROF) += gprof_syms.o -obj-$(CONFIG_GCOV) += gmon_syms.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index dacbfabf66d8..2f2a8ce92f1e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { PROVIDE (__executable_start = START); diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c deleted file mode 100644 index 9361a8eb9bf1..000000000000 --- a/arch/um/kernel/gmon_syms.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - */ - -#include <linux/module.h> - -extern void __bb_init_func(void *) __attribute__((weak)); -EXPORT_SYMBOL(__bb_init_func); - -extern void __gcov_init(void *) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_init); -extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_merge_add); -extern void __gcov_exit(void) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_exit); diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c index 6516ef1f8274..0224fcb36e22 100644 --- a/arch/um/kernel/kmsg_dump.c +++ b/arch/um/kernel/kmsg_dump.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kmsg_dump.h> +#include <linux/spinlock.h> #include <linux/console.h> #include <linux/string.h> #include <shared/init.h> @@ -9,8 +10,11 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { + static struct kmsg_dump_iter iter; + static DEFINE_SPINLOCK(lock); static char line[1024]; struct console *con; + unsigned long flags; size_t len = 0; /* only dump kmsg when no console is available */ @@ -29,11 +33,18 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper, if (con) return; + if (!spin_trylock_irqsave(&lock, flags)) + return; + + kmsg_dump_rewind(&iter); + printf("kmsg_dump:\n"); - while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) { + while (kmsg_dump_get_line(&iter, true, line, sizeof(line), &len)) { line[len] = '\0'; printf("%s", line); } + + spin_unlock_irqrestore(&lock, flags); } static struct kmsg_dumper kmsg_dumper = { diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9242dc91d751..8e636ce02949 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,6 @@ void __init mem_init(void) memblock_free_all(); max_low_pfn = totalram_pages(); max_pfn = max_low_pfn; - mem_init_print_info(NULL); kmalloc_ok = 1; } @@ -73,8 +72,7 @@ static void __init one_page_table_init(pmd_t *pmd) set_pmd(pmd, __pmd(_KERNPG_TABLE + (unsigned long) __pa(pte))); - if (pte != pte_offset_kernel(pmd, 0)) - BUG(); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 45d957d7004c..7a8e2b123e29 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { /* This must contain the right address - not quite the default ELF one.*/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2792879d398e..0045e1b44190 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -60,7 +60,13 @@ config X86 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM) + select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE + select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -100,6 +106,7 @@ config X86 select ARCH_SUPPORTS_LTO_CLANG if X86_64 select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64 select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS @@ -164,7 +171,9 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD + select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD select HAVE_ARCH_VMAP_STACK if X86_64 + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES select HAVE_ASM_MODVERSIONS select HAVE_CMPXCHG_DOUBLE @@ -313,9 +322,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_HAS_CPU_RELAX def_bool y -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y @@ -360,10 +366,6 @@ config X86_64_SMP def_bool y depends on X86_64 && SMP -config X86_32_LAZY_GS - def_bool y - depends on X86_32 && !STACKPROTECTOR - config ARCH_SUPPORTS_UPROBES def_bool y @@ -386,7 +388,8 @@ config CC_HAS_SANE_STACKPROTECTOR default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC)) help We have to make sure stack protector is unconditionally disabled if - the compiler produces broken code. + the compiler produces broken code or if it does not let us control + the segment on 32-bit kernels. menu "Processor type and features" @@ -571,6 +574,7 @@ config X86_UV depends on X86_EXTENDED_PLATFORM depends on NUMA depends on EFI + depends on KEXEC_CORE depends on X86_X2APIC depends on PCI help @@ -777,6 +781,7 @@ if HYPERVISOR_GUEST config PARAVIRT bool "Enable paravirtualization code" + depends on HAVE_STATIC_CALL help This changes the kernel so it can modify itself when it is run under a hypervisor, potentially improving performance significantly @@ -1406,7 +1411,7 @@ config HIGHMEM4G config HIGHMEM64G bool "64GB" - depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6 + depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6 select X86_PAE help Select this if you have a 32-bit processor and more than 4 @@ -1518,6 +1523,7 @@ config AMD_MEM_ENCRYPT select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_FORCE_DMA_UNENCRYPTED select INSTRUCTION_DECODER + select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS help Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory @@ -1931,6 +1937,7 @@ config X86_SGX depends on CRYPTO_SHA256=y select SRCU select MMU_NOTIFIER + select NUMA_KEEP_MEMINFO if NUMA help Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions that can be used by applications to set aside private regions of code @@ -2425,30 +2432,13 @@ config ARCH_HAS_ADD_PAGES def_bool y depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on X86_64 || (X86_32 && HIGHMEM) - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y - depends on MEMORY_HOTPLUG config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on X86_64 || X86_PAE - -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on X86_64 && HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on X86_64 && TRANSPARENT_HUGEPAGE - menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 2d6d5a28c3bf..c77c5d8a7b3e 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -27,12 +27,13 @@ endif REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ - -mno-mmx -mno-sse + -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) REALMODE_CFLAGS += -ffreestanding REALMODE_CFLAGS += -fno-stack-protector REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) +REALMODE_CFLAGS += $(CLANG_FLAGS) export REALMODE_CFLAGS # BITS is used as extension for files which are available in a 32 bit @@ -79,6 +80,14 @@ ifeq ($(CONFIG_X86_32),y) # temporary until string.h is fixed KBUILD_CFLAGS += -ffreestanding + + ifeq ($(CONFIG_STACKPROTECTOR),y) + ifeq ($(CONFIG_SMP),y) + KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard + else + KBUILD_CFLAGS += -mstack-protector-guard=global + endif + endif else BITS := 64 UTS_MACHINE := x86_64 @@ -129,8 +138,8 @@ ifdef CONFIG_X86_X32 x32_ld_ok := $(call try-run,\ /bin/echo -e '1: .quad 1b' | \ $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \ - $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \ - $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n) + $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMP.o" && \ + $(LD) -m elf32_x86_64 "$$TMP.o" -o "$$TMP",y,n) ifeq ($(x32_ld_ok),y) CONFIG_X86_X32_ABI := y KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index e0bc3988c3fa..6e5522aebbbd 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h +KBUILD_CFLAGS += $(CLANG_FLAGS) # sev-es.c indirectly inludes inat-table.h which is generated during # compilation and stored in $(objtree). Add the directory to the includes so diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index c4bb0f9363f5..95a223b3e56a 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -5,7 +5,7 @@ * Early support for invoking 32-bit EFI services from a 64-bit kernel. * * Because this thunking occurs before ExitBootServices() we have to - * restore the firmware's 32-bit GDT before we make EFI serivce calls, + * restore the firmware's 32-bit GDT before we make EFI service calls, * since the firmware's 32-bit IDT is still currently installed and it * needs to be able to service interrupts. * diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index e94874f4bbc1..a2347ded77ea 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -34,6 +34,7 @@ #include <asm/asm-offsets.h> #include <asm/bootparam.h> #include <asm/desc_defs.h> +#include <asm/trapnr.h> #include "pgtable.h" /* @@ -107,9 +108,19 @@ SYM_FUNC_START(startup_32) movl %eax, %gs movl %eax, %ss -/* setup a stack and make sure cpu supports long mode. */ + /* Setup a stack and load CS from current GDT */ leal rva(boot_stack_end)(%ebp), %esp + pushl $__KERNEL32_CS + leal rva(1f)(%ebp), %eax + pushl %eax + lretl +1: + + /* Setup Exception handling for SEV-ES */ + call startup32_load_idt + + /* Make sure cpu supports long mode. */ call verify_cpu testl %eax, %eax jnz .Lno_longmode @@ -172,11 +183,21 @@ SYM_FUNC_START(startup_32) */ call get_sev_encryption_bit xorl %edx, %edx +#ifdef CONFIG_AMD_MEM_ENCRYPT testl %eax, %eax jz 1f subl $32, %eax /* Encryption bit is always above bit 31 */ bts %eax, %edx /* Set encryption mask for page tables */ + /* + * Mark SEV as active in sev_status so that startup32_check_sev_cbit() + * will do a check. The sev_status memory will be fully initialized + * with the contents of MSR_AMD_SEV_STATUS later in + * set_sev_encryption_mask(). For now it is sufficient to know that SEV + * is active. + */ + movl $1, rva(sev_status)(%ebp) 1: +#endif /* Initialize Page tables to 0 */ leal rva(pgtable)(%ebx), %edi @@ -231,7 +252,7 @@ SYM_FUNC_START(startup_32) /* * Setup for the jump to 64bit mode * - * When the jump is performend we will be in long mode but + * When the jump is performed we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. @@ -261,6 +282,9 @@ SYM_FUNC_START(startup_32) movl %esi, %edx 1: #endif + /* Check if the C-bit position is correct when SEV is active */ + call startup32_check_sev_cbit + pushl $__KERNEL_CS pushl %eax @@ -694,6 +718,19 @@ SYM_DATA_START(boot_idt) .endr SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) +#ifdef CONFIG_AMD_MEM_ENCRYPT +SYM_DATA_START(boot32_idt_desc) + .word boot32_idt_end - boot32_idt - 1 + .long 0 +SYM_DATA_END(boot32_idt_desc) + .balign 8 +SYM_DATA_START(boot32_idt) + .rept 32 + .quad 0 + .endr +SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end) +#endif + #ifdef CONFIG_EFI_STUB SYM_DATA(image_offset, .long 0) #endif @@ -786,6 +823,137 @@ SYM_DATA_START_LOCAL(loaded_image_proto) SYM_DATA_END(loaded_image_proto) #endif +#ifdef CONFIG_AMD_MEM_ENCRYPT + __HEAD + .code32 +/* + * Write an IDT entry into boot32_idt + * + * Parameters: + * + * %eax: Handler address + * %edx: Vector number + * + * Physical offset is expected in %ebp + */ +SYM_FUNC_START(startup32_set_idt_entry) + push %ebx + push %ecx + + /* IDT entry address to %ebx */ + leal rva(boot32_idt)(%ebp), %ebx + shl $3, %edx + addl %edx, %ebx + + /* Build IDT entry, lower 4 bytes */ + movl %eax, %edx + andl $0x0000ffff, %edx # Target code segment offset [15:0] + movl $__KERNEL32_CS, %ecx # Target code segment selector + shl $16, %ecx + orl %ecx, %edx + + /* Store lower 4 bytes to IDT */ + movl %edx, (%ebx) + + /* Build IDT entry, upper 4 bytes */ + movl %eax, %edx + andl $0xffff0000, %edx # Target code segment offset [31:16] + orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate + + /* Store upper 4 bytes to IDT */ + movl %edx, 4(%ebx) + + pop %ecx + pop %ebx + ret +SYM_FUNC_END(startup32_set_idt_entry) +#endif + +SYM_FUNC_START(startup32_load_idt) +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* #VC handler */ + leal rva(startup32_vc_handler)(%ebp), %eax + movl $X86_TRAP_VC, %edx + call startup32_set_idt_entry + + /* Load IDT */ + leal rva(boot32_idt)(%ebp), %eax + movl %eax, rva(boot32_idt_desc+2)(%ebp) + lidt rva(boot32_idt_desc)(%ebp) +#endif + ret +SYM_FUNC_END(startup32_load_idt) + +/* + * Check for the correct C-bit position when the startup_32 boot-path is used. + * + * The check makes use of the fact that all memory is encrypted when paging is + * disabled. The function creates 64 bits of random data using the RDRAND + * instruction. RDRAND is mandatory for SEV guests, so always available. If the + * hypervisor violates that the kernel will crash right here. + * + * The 64 bits of random data are stored to a memory location and at the same + * time kept in the %eax and %ebx registers. Since encryption is always active + * when paging is off the random data will be stored encrypted in main memory. + * + * Then paging is enabled. When the C-bit position is correct all memory is + * still mapped encrypted and comparing the register values with memory will + * succeed. An incorrect C-bit position will map all memory unencrypted, so that + * the compare will use the encrypted random data and fail. + */ +SYM_FUNC_START(startup32_check_sev_cbit) +#ifdef CONFIG_AMD_MEM_ENCRYPT + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + + /* Check for non-zero sev_status */ + movl rva(sev_status)(%ebp), %eax + testl %eax, %eax + jz 4f + + /* + * Get two 32-bit random values - Don't bail out if RDRAND fails + * because it is better to prevent forward progress if no random value + * can be gathered. + */ +1: rdrand %eax + jnc 1b +2: rdrand %ebx + jnc 2b + + /* Store to memory and keep it in the registers */ + movl %eax, rva(sev_check_data)(%ebp) + movl %ebx, rva(sev_check_data+4)(%ebp) + + /* Enable paging to see if encryption is active */ + movl %cr0, %edx /* Backup %cr0 in %edx */ + movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ + movl %ecx, %cr0 + + cmpl %eax, rva(sev_check_data)(%ebp) + jne 3f + cmpl %ebx, rva(sev_check_data+4)(%ebp) + jne 3f + + movl %edx, %cr0 /* Restore previous %cr0 */ + + jmp 4f + +3: /* Check failed - hlt the machine */ + hlt + jmp 3b + +4: + popl %edx + popl %ecx + popl %ebx + popl %eax +#endif + ret +SYM_FUNC_END(startup32_check_sev_cbit) + /* * Stack and heap for uncompression */ diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c index 804a502ee0d2..9b93567d663a 100644 --- a/arch/x86/boot/compressed/idt_64.c +++ b/arch/x86/boot/compressed/idt_64.c @@ -52,3 +52,17 @@ void load_stage2_idt(void) load_boot_idt(&boot_idt_desc); } + +void cleanup_exception_handling(void) +{ + /* + * Flush GHCB from cache and map it encrypted again when running as + * SEV-ES guest. + */ + sev_es_shutdown_ghcb(); + + /* Set a null-idt, disabling #PF and #VC handling */ + boot_idt_desc.size = 0; + boot_idt_desc.address = 0; + load_boot_idt(&boot_idt_desc); +} diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index b92fffbe761f..e36690778497 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -639,9 +639,9 @@ static bool process_mem_region(struct mem_vector *region, if (slot_area_index == MAX_SLOT_AREA) { debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n"); - return 1; + return true; } - return 0; + return false; } #if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI) diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index aa561795efd1..c1e81a848b2a 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit) push %ecx push %edx - /* Check if running under a hypervisor */ - movl $1, %eax - cpuid - bt $31, %ecx /* Check the hypervisor bit */ - jnc .Lno_sev - movl $0x80000000, %eax /* CPUID to check the highest leaf */ cpuid cmpl $0x8000001f, %eax /* See if 0x8000001f is available */ @@ -67,10 +61,132 @@ SYM_FUNC_START(get_sev_encryption_bit) ret SYM_FUNC_END(get_sev_encryption_bit) +/** + * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using + * the GHCB MSR protocol + * + * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX) + * @%edx: CPUID Function + * + * Returns 0 in %eax on success, non-zero on failure + * %edx returns CPUID value on success + */ +SYM_CODE_START_LOCAL(sev_es_req_cpuid) + shll $30, %eax + orl $0x00000004, %eax + movl $MSR_AMD64_SEV_ES_GHCB, %ecx + wrmsr + rep; vmmcall # VMGEXIT + rdmsr + + /* Check response */ + movl %eax, %ecx + andl $0x3ffff000, %ecx # Bits [12-29] MBZ + jnz 2f + + /* Check return code */ + andl $0xfff, %eax + cmpl $5, %eax + jne 2f + + /* All good - return success */ + xorl %eax, %eax +1: + ret +2: + movl $-1, %eax + jmp 1b +SYM_CODE_END(sev_es_req_cpuid) + +SYM_CODE_START(startup32_vc_handler) + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + + /* Keep CPUID function in %ebx */ + movl %eax, %ebx + + /* Check if error-code == SVM_EXIT_CPUID */ + cmpl $0x72, 16(%esp) + jne .Lfail + + movl $0, %eax # Request CPUID[fn].EAX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 12(%esp) # Store result + + movl $1, %eax # Request CPUID[fn].EBX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 8(%esp) # Store result + + movl $2, %eax # Request CPUID[fn].ECX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 4(%esp) # Store result + + movl $3, %eax # Request CPUID[fn].EDX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 0(%esp) # Store result + + /* + * Sanity check CPUID results from the Hypervisor. See comment in + * do_vc_no_ghcb() for more details on why this is necessary. + */ + + /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */ + cmpl $0x80000000, %ebx + jne .Lcheck_sev + cmpl $0x8000001f, 12(%esp) + jb .Lfail + jmp .Ldone + +.Lcheck_sev: + /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */ + cmpl $0x8000001f, %ebx + jne .Ldone + btl $1, 12(%esp) + jnc .Lfail + +.Ldone: + popl %edx + popl %ecx + popl %ebx + popl %eax + + /* Remove error code */ + addl $4, %esp + + /* Jump over CPUID instruction */ + addl $2, (%esp) + + iret +.Lfail: + /* Send terminate request to Hypervisor */ + movl $0x100, %eax + xorl %edx, %edx + movl $MSR_AMD64_SEV_ES_GHCB, %ecx + wrmsr + rep; vmmcall + + /* If request fails, go to hlt loop */ + hlt + jmp .Lfail +SYM_CODE_END(startup32_vc_handler) + .code64 #include "../../kernel/sev_verify_cbit.S" - SYM_FUNC_START(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 267e7f93050e..dde042f64cca 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -430,8 +430,6 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, error("Destination address too large"); #endif #ifndef CONFIG_RELOCATABLE - if ((unsigned long)output != LOAD_PHYSICAL_ADDR) - error("Destination address does not match LOAD_PHYSICAL_ADDR"); if (virt_addr != LOAD_PHYSICAL_ADDR) error("Destination virtual address changed when not relocatable"); #endif @@ -443,11 +441,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, handle_relocations(output, output_len, virt_addr); debug_putstr("done.\nBooting the kernel.\n"); - /* - * Flush GHCB from cache and map it encrypted again when running as - * SEV-ES guest. - */ - sev_es_shutdown_ghcb(); + /* Disable exception handling before booting the kernel */ + cleanup_exception_handling(); return output; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 901ea5ebec22..e5612f035498 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -155,6 +155,12 @@ extern pteval_t __default_kernel_pte_mask; extern gate_desc boot_idt[BOOT_IDT_ENTRIES]; extern struct desc_ptr boot_idt_desc; +#ifdef CONFIG_X86_64 +void cleanup_exception_handling(void); +#else +static inline void cleanup_exception_handling(void) { } +#endif + /* IDT Entry Points */ void boot_page_fault(void); void boot_stage1_vc(void); diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c index 27826c265aab..82041bd380e5 100644 --- a/arch/x86/boot/compressed/sev-es.c +++ b/arch/x86/boot/compressed/sev-es.c @@ -78,16 +78,15 @@ static inline void sev_es_wr_ghcb_msr(u64 val) static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) { char buffer[MAX_INSN_SIZE]; - enum es_result ret; + int ret; memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); - insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1); - insn_get_length(&ctxt->insn); + ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); + if (ret < 0) + return ES_DECODE_FAILED; - ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED; - - return ret; + return ES_OK; } static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, @@ -200,14 +199,8 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) } finish: - if (result == ES_OK) { + if (result == ES_OK) vc_finish_insn(&ctxt); - } else if (result != ES_RETRY) { - /* - * For now, just halt the machine. That makes debugging easier, - * later we just call sev_es_terminate() here. - */ - while (true) - asm volatile("hlt\n"); - } + else if (result != ES_RETRY) + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); } diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b28e36b7c96b..d0959e7b809f 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -2,8 +2,6 @@ # # x86 crypto algorithms -OBJECT_FILES_NON_STANDARD := y - obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 2cf8e94d986a..98e3552b6e03 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -212,10 +212,6 @@ HashKey_8_k = 16*21 # store XOR of HashKey^8 <<1 mod poly here (for Karatsu #define arg4 %rcx #define arg5 %r8 #define arg6 %r9 -#define arg7 STACK_OFFSET+8*1(%r14) -#define arg8 STACK_OFFSET+8*2(%r14) -#define arg9 STACK_OFFSET+8*3(%r14) -#define arg10 STACK_OFFSET+8*4(%r14) #define keysize 2*15*16(arg1) i = 0 @@ -237,9 +233,6 @@ define_reg j %j .noaltmacro .endm -# need to push 4 registers into stack to maintain -STACK_OFFSET = 8*4 - TMP1 = 16*0 # Temporary storage for AAD TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register) TMP3 = 16*2 # Temporary storage for AES State 3 @@ -256,25 +249,22 @@ VARIABLE_OFFSET = 16*8 ################################ .macro FUNC_SAVE - #the number of pushes must equal STACK_OFFSET push %r12 push %r13 - push %r14 push %r15 - mov %rsp, %r14 - - + push %rbp + mov %rsp, %rbp sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes .endm .macro FUNC_RESTORE - mov %r14, %rsp + mov %rbp, %rsp + pop %rbp pop %r15 - pop %r14 pop %r13 pop %r12 .endm @@ -294,7 +284,7 @@ VARIABLE_OFFSET = 16*8 # combined for GCM encrypt and decrypt functions # clobbering all xmm registers -# clobbering r10, r11, r12, r13, r14, r15 +# clobbering r10, r11, r12, r13, r15, rax .macro GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC REP vmovdqu AadHash(arg2), %xmm8 vmovdqu HashKey(arg2), %xmm13 # xmm13 = HashKey @@ -996,7 +986,7 @@ _partial_block_done_\@: ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered -## arg1, arg3, arg4, r14 are used as a pointer only, not modified +## arg1, arg2, arg3, arg4 are used as pointers only, not modified .macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC i = (8-\num_initial_blocks) @@ -1231,7 +1221,7 @@ _initial_blocks_done\@: # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks -# arg1, arg3, arg4 are used as pointers only, not modified +# arg1, arg2, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC @@ -1944,7 +1934,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered -## arg1, arg3, arg4, r14 are used as a pointer only, not modified +## arg1, arg2, arg3, arg4 are used as pointers only, not modified .macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER i = (8-\num_initial_blocks) @@ -2186,7 +2176,7 @@ _initial_blocks_done\@: # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks -# arg1, arg3, arg4 are used as pointers only, not modified +# arg1, arg2, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 782e9712a1ec..706f70829a07 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -990,6 +990,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way) * %rdx: src (32 blocks) */ FRAME_BEGIN + subq $(16 * 32), %rsp; vzeroupper; @@ -1002,7 +1003,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way) %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); - movq %rsp, %r10; cmpq %rsi, %rdx; je .Lcbc_dec_use_stack; @@ -1015,7 +1015,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way) * dst still in-use (because dst == src), so use stack for temporary * storage. */ - subq $(16 * 32), %rsp; movq %rsp, %rax; .Lcbc_dec_continue: @@ -1025,7 +1024,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way) vpxor %ymm7, %ymm7, %ymm7; vinserti128 $1, (%rdx), %ymm7, %ymm7; vpxor (%rax), %ymm7, %ymm7; - movq %r10, %rsp; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; @@ -1047,6 +1045,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way) vzeroupper; + addq $(16 * 32), %rsp; FRAME_END ret; SYM_FUNC_END(camellia_cbc_dec_32way) diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 7c4c7b2fbf05..98cf3b4e4c9f 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -24,7 +24,7 @@ /* * Copyright 2012 Xyratex Technology Limited * - * Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation. + * Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation. */ #include <linux/init.h> #include <linux/module.h> diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 884dc767b051..ac1f303eed0f 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -53,7 +53,7 @@ .endm .macro JMPTBL_ENTRY i -.word crc_\i - crc_array +.quad crc_\i .endm .macro JNC_LESS_THAN j @@ -168,10 +168,7 @@ continue_block: xor crc2, crc2 ## branch into array - lea jump_table(%rip), %bufp - movzwq (%bufp, %rax, 2), len - lea crc_array(%rip), %bufp - lea (%bufp, len, 1), %bufp + mov jump_table(,%rax,8), %bufp JMP_NOSPEC bufp ################################################################ diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index 5af8021b98ce..6706b6cb1d0f 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -114,11 +114,11 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2) ); } -/* Computes the field substraction of two field elements */ +/* Computes the field subtraction of two field elements */ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) { asm volatile( - /* Compute the raw substraction of f1-f2 */ + /* Compute the raw subtraction of f1-f2 */ " movq 0(%1), %%r8;" " subq 0(%2), %%r8;" " movq 8(%1), %%r9;" @@ -135,7 +135,7 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) " mov $38, %%rcx;" " cmovc %%rcx, %%rax;" - /* Step 2: Substract carry*38 from the original difference */ + /* Step 2: Subtract carry*38 from the original difference */ " sub %%rax, %%r8;" " sbb $0, %%r9;" " sbb $0, %%r10;" diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 646da46e8d10..1dfb8af48a3c 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -16,7 +16,7 @@ #include <asm/simd.h> asmlinkage void poly1305_init_x86_64(void *ctx, - const u8 key[POLY1305_KEY_SIZE]); + const u8 key[POLY1305_BLOCK_SIZE]); asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, const size_t len, const u32 padbit); asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx) state->is_base2_26 = 0; } -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE]) { poly1305_init_x86_64(ctx, key); } @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], poly1305_emit_avx(ctx, mac, nonce); } -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_simd_init(&dctx->h, key); dctx->s[0] = get_unaligned_le32(&key[16]); diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1e594d60afa5..5eed620f4676 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -645,9 +645,9 @@ _loop3: RESERVE_STACK = (W_SIZE*4 + 8+24) /* Align stack */ - mov %rsp, %rbx + push %rbp + mov %rsp, %rbp and $~(0x20-1), %rsp - push %rbx sub $RESERVE_STACK, %rsp avx2_zeroupper @@ -665,8 +665,8 @@ _loop3: avx2_zeroupper - add $RESERVE_STACK, %rsp - pop %rsp + mov %rbp, %rsp + pop %rbp pop %r15 pop %r14 diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index 11efe3a45a1f..5d8415f482bd 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -59,8 +59,6 @@ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ -#define RSPSAVE %rax - /* gcc conversion */ #define FRAME_SIZE 32 /* space for 2x16 bytes */ @@ -96,7 +94,8 @@ .text .align 32 SYM_FUNC_START(sha1_ni_transform) - mov %rsp, RSPSAVE + push %rbp + mov %rsp, %rbp sub $FRAME_SIZE, %rsp and $~0xF, %rsp @@ -288,7 +287,8 @@ SYM_FUNC_START(sha1_ni_transform) pextrd $3, E0, 1*16(DIGEST_PTR) .Ldone_hash: - mov RSPSAVE, %rsp + mov %rbp, %rsp + pop %rbp ret SYM_FUNC_END(sha1_ni_transform) diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 11ff60c29c8b..4087f7432a7e 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -117,15 +117,13 @@ _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 -_RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE -_RSP = _CTX + _CTX_SIZE -STACK_SIZE = _RSP + _RSP_SIZE +STACK_SIZE = _CTX + _CTX_SIZE # rotate_Xs # Rotate values of symbols X0...X3 @@ -533,11 +531,11 @@ SYM_FUNC_START(sha256_transform_rorx) pushq %r14 pushq %r15 - mov %rsp, %rax + push %rbp + mov %rsp, %rbp + subq $STACK_SIZE, %rsp and $-32, %rsp # align rsp to 32 byte boundary - mov %rax, _RSP(%rsp) - shl $6, NUM_BLKS # convert to bytes jz done_hash @@ -704,7 +702,8 @@ only_one_block: done_hash: - mov _RSP(%rsp), %rsp + mov %rbp, %rsp + pop %rbp popq %r15 popq %r14 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 684d58c8bc4f..3d8f0fd4eea8 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -76,14 +76,10 @@ tmp0 = %rax W_SIZE = 80*8 # W[t] + K[t] | W[t+1] + K[t+1] WK_SIZE = 2*8 -RSPSAVE_SIZE = 1*8 -GPRSAVE_SIZE = 5*8 frame_W = 0 frame_WK = frame_W + W_SIZE -frame_RSPSAVE = frame_WK + WK_SIZE -frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE -frame_size = frame_GPRSAVE + GPRSAVE_SIZE +frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays @@ -281,18 +277,18 @@ SYM_FUNC_START(sha512_transform_avx) test msglen, msglen je nowork + # Save GPRs + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 + # Allocate Stack Space - mov %rsp, %rax + push %rbp + mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp - mov %rax, frame_RSPSAVE(%rsp) - - # Save GPRs - mov %rbx, frame_GPRSAVE(%rsp) - mov %r12, frame_GPRSAVE +8*1(%rsp) - mov %r13, frame_GPRSAVE +8*2(%rsp) - mov %r14, frame_GPRSAVE +8*3(%rsp) - mov %r15, frame_GPRSAVE +8*4(%rsp) updateblock: @@ -353,15 +349,16 @@ updateblock: dec msglen jnz updateblock - # Restore GPRs - mov frame_GPRSAVE(%rsp), %rbx - mov frame_GPRSAVE +8*1(%rsp), %r12 - mov frame_GPRSAVE +8*2(%rsp), %r13 - mov frame_GPRSAVE +8*3(%rsp), %r14 - mov frame_GPRSAVE +8*4(%rsp), %r15 - # Restore Stack Pointer - mov frame_RSPSAVE(%rsp), %rsp + mov %rbp, %rsp + pop %rbp + + # Restore GPRs + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbx nowork: ret diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 3a44bdcfd583..072cb0f0deae 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -102,17 +102,13 @@ SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 CTX_SIZE = 1*8 -RSPSAVE_SIZE = 1*8 -GPRSAVE_SIZE = 5*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE frame_CTX = frame_INPEND + INPEND_SIZE -frame_RSPSAVE = frame_CTX + CTX_SIZE -frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE -frame_size = frame_GPRSAVE + GPRSAVE_SIZE +frame_size = frame_CTX + CTX_SIZE ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -570,18 +566,18 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # "blocks" is the message length in SHA512 blocks ######################################################################## SYM_FUNC_START(sha512_transform_rorx) + # Save GPRs + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 + # Allocate Stack Space - mov %rsp, %rax + push %rbp + mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp - mov %rax, frame_RSPSAVE(%rsp) - - # Save GPRs - mov %rbx, 8*0+frame_GPRSAVE(%rsp) - mov %r12, 8*1+frame_GPRSAVE(%rsp) - mov %r13, 8*2+frame_GPRSAVE(%rsp) - mov %r14, 8*3+frame_GPRSAVE(%rsp) - mov %r15, 8*4+frame_GPRSAVE(%rsp) shl $7, NUM_BLKS # convert to bytes jz done_hash @@ -672,15 +668,17 @@ loop2: done_hash: -# Restore GPRs - mov 8*0+frame_GPRSAVE(%rsp), %rbx - mov 8*1+frame_GPRSAVE(%rsp), %r12 - mov 8*2+frame_GPRSAVE(%rsp), %r13 - mov 8*3+frame_GPRSAVE(%rsp), %r14 - mov 8*4+frame_GPRSAVE(%rsp), %r15 - # Restore Stack Pointer - mov frame_RSPSAVE(%rsp), %rsp + mov %rbp, %rsp + pop %rbp + + # Restore GPRs + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbx + ret SYM_FUNC_END(sha512_transform_rorx) diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index 50812af0b083..bd51c9070bed 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -74,14 +74,10 @@ tmp0 = %rax W_SIZE = 80*8 WK_SIZE = 2*8 -RSPSAVE_SIZE = 1*8 -GPRSAVE_SIZE = 5*8 frame_W = 0 frame_WK = frame_W + W_SIZE -frame_RSPSAVE = frame_WK + WK_SIZE -frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE -frame_size = frame_GPRSAVE + GPRSAVE_SIZE +frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays @@ -283,18 +279,18 @@ SYM_FUNC_START(sha512_transform_ssse3) test msglen, msglen je nowork + # Save GPRs + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 + # Allocate Stack Space - mov %rsp, %rax + push %rbp + mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp - mov %rax, frame_RSPSAVE(%rsp) - - # Save GPRs - mov %rbx, frame_GPRSAVE(%rsp) - mov %r12, frame_GPRSAVE +8*1(%rsp) - mov %r13, frame_GPRSAVE +8*2(%rsp) - mov %r14, frame_GPRSAVE +8*3(%rsp) - mov %r15, frame_GPRSAVE +8*4(%rsp) updateblock: @@ -355,15 +351,16 @@ updateblock: dec msglen jnz updateblock - # Restore GPRs - mov frame_GPRSAVE(%rsp), %rbx - mov frame_GPRSAVE +8*1(%rsp), %r12 - mov frame_GPRSAVE +8*2(%rsp), %r13 - mov frame_GPRSAVE +8*3(%rsp), %r14 - mov frame_GPRSAVE +8*4(%rsp), %r15 - # Restore Stack Pointer - mov frame_RSPSAVE(%rsp), %rsp + mov %rbp, %rsp + pop %rbp + + # Restore GPRs + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbx nowork: ret diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index fc23552afe37..bca4cea757ce 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -88,7 +88,7 @@ /* * Combined G1 & G2 function. Reordered with help of rotates to have moves - * at begining. + * at beginning. */ #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \ /* G1,1 && G2,1 */ \ diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 03725696397c..3507cf2064f1 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -117,7 +117,7 @@ static bool is_blacklisted_cpu(void) * storing blocks in 64bit registers to allow three blocks to * be processed parallel. Parallel operation then allows gaining * more performance than was trade off, on out-of-order CPUs. - * However Atom does not benefit from this parallellism and + * However Atom does not benefit from this parallelism and * should be blacklisted. */ return true; diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 4efd39aacb9f..7b2542b13ebd 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -38,6 +38,7 @@ #ifdef CONFIG_X86_64 __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs) { + add_random_kstack_offset(); nr = syscall_enter_from_user_mode(regs, nr); instrumentation_begin(); @@ -83,6 +84,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs) { unsigned int nr = syscall_32_enter(regs); + add_random_kstack_offset(); /* * Subtlety here: if ptrace pokes something larger than 2^32-1 into * orig_ax, the unsigned int return value truncates it. This may @@ -102,6 +104,7 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) unsigned int nr = syscall_32_enter(regs); int res; + add_random_kstack_offset(); /* * This cannot use syscall_enter_from_user_mode() as it has to * fetch EBP before invoking any of the syscall entry work diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index df8c017e6161..ccb9d32768f3 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -20,7 +20,7 @@ * 1C(%esp) - %ds * 20(%esp) - %es * 24(%esp) - %fs - * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS + * 28(%esp) - unused -- was %gs on old stackprotector kernels * 2C(%esp) - orig_eax * 30(%esp) - %eip * 34(%esp) - %cs @@ -40,7 +40,7 @@ #include <asm/processor-flags.h> #include <asm/irq_vectors.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/frame.h> @@ -53,83 +53,6 @@ #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) -/* - * User gs save/restore - * - * %gs is used for userland TLS and kernel only uses it for stack - * canary which is required to be at %gs:20 by gcc. Read the comment - * at the top of stackprotector.h for more info. - * - * Local labels 98 and 99 are used. - */ -#ifdef CONFIG_X86_32_LAZY_GS - - /* unfortunately push/pop can't be no-op */ -.macro PUSH_GS - pushl $0 -.endm -.macro POP_GS pop=0 - addl $(4 + \pop), %esp -.endm -.macro POP_GS_EX -.endm - - /* all the rest are no-op */ -.macro PTGS_TO_GS -.endm -.macro PTGS_TO_GS_EX -.endm -.macro GS_TO_REG reg -.endm -.macro REG_TO_PTGS reg -.endm -.macro SET_KERNEL_GS reg -.endm - -#else /* CONFIG_X86_32_LAZY_GS */ - -.macro PUSH_GS - pushl %gs -.endm - -.macro POP_GS pop=0 -98: popl %gs - .if \pop <> 0 - add $\pop, %esp - .endif -.endm -.macro POP_GS_EX -.pushsection .fixup, "ax" -99: movl $0, (%esp) - jmp 98b -.popsection - _ASM_EXTABLE(98b, 99b) -.endm - -.macro PTGS_TO_GS -98: mov PT_GS(%esp), %gs -.endm -.macro PTGS_TO_GS_EX -.pushsection .fixup, "ax" -99: movl $0, PT_GS(%esp) - jmp 98b -.popsection - _ASM_EXTABLE(98b, 99b) -.endm - -.macro GS_TO_REG reg - movl %gs, \reg -.endm -.macro REG_TO_PTGS reg - movl \reg, PT_GS(%esp) -.endm -.macro SET_KERNEL_GS reg - movl $(__KERNEL_STACK_CANARY), \reg - movl \reg, %gs -.endm - -#endif /* CONFIG_X86_32_LAZY_GS */ - /* Unconditionally switch to user cr3 */ .macro SWITCH_TO_USER_CR3 scratch_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI @@ -209,7 +132,7 @@ * * Lets build a 5 entry IRET frame after that, such that struct pt_regs * is complete and in particular regs->sp is correct. This gives us - * the original 6 enties as gap: + * the original 6 entries as gap: * * 14*4(%esp) - <previous context> * 13*4(%esp) - gap / flags @@ -282,7 +205,7 @@ .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 cld .if \skip_gs == 0 - PUSH_GS + pushl $0 .endif pushl %fs @@ -307,9 +230,6 @@ movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es -.if \skip_gs == 0 - SET_KERNEL_GS %edx -.endif /* Switch to kernel stack if necessary */ .if \switch_stacks > 0 SWITCH_TO_KERNEL_STACK @@ -348,7 +268,7 @@ 1: popl %ds 2: popl %es 3: popl %fs - POP_GS \pop + addl $(4 + \pop), %esp /* pop the unused "gs" slot */ IRET_FRAME .pushsection .fixup, "ax" 4: movl $0, (%esp) @@ -361,7 +281,6 @@ _ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(2b, 5b) _ASM_EXTABLE(3b, 6b) - POP_GS_EX .endm .macro RESTORE_ALL_NMI cr3_reg:req pop=0 @@ -430,7 +349,7 @@ * will soon execute iret and the tracer was already set to * the irqstate after the IRET: */ - DISABLE_INTERRUPTS(CLBR_ANY) + cli lss (%esp), %esp /* switch to espfix segment */ .Lend_\@: #endif /* CONFIG_X86_ESPFIX32 */ @@ -779,7 +698,7 @@ SYM_CODE_START(__switch_to_asm) #ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx - movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset + movl %ebx, PER_CPU_VAR(__stack_chk_guard) #endif #ifdef CONFIG_RETPOLINE @@ -976,7 +895,6 @@ SYM_FUNC_START(entry_SYSENTER_32) movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 1: mov PT_FS(%esp), %fs - PTGS_TO_GS popl %ebx /* pt_regs->bx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ @@ -1012,7 +930,6 @@ SYM_FUNC_START(entry_SYSENTER_32) jmp 1b .popsection _ASM_EXTABLE(1b, 2b) - PTGS_TO_GS_EX .Lsysenter_fix_flags: pushl $X86_EFLAGS_FIXED @@ -1077,7 +994,7 @@ restore_all_switch_stack: * when returning from IPI handler and when returning from * scheduler to user-space. */ - INTERRUPT_RETURN + iret .section .fixup, "ax" SYM_CODE_START(asm_iret_error) @@ -1154,11 +1071,7 @@ SYM_CODE_START_LOCAL_NOALIGN(handle_exception) SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER - /* fixup %gs */ - GS_TO_REG %ecx movl PT_GS(%esp), %edi # get the function address - REG_TO_PTGS %ecx - SET_KERNEL_GS %ecx /* fixup orig %eax */ movl PT_ORIG_EAX(%esp), %edx # get the error code diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 400908dff42e..a16a5294d55f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -305,7 +305,7 @@ SYM_CODE_END(ret_from_fork) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax - SAVE_FLAGS(CLBR_RAX) + SAVE_FLAGS testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 @@ -511,7 +511,7 @@ SYM_CODE_START(\asmsym) /* * No need to switch back to the IST stack. The current stack is either * identical to the stack in the IRET frame or the VC fall-back stack, - * so it is definitly mapped even with PTI enabled. + * so it is definitely mapped even with PTI enabled. */ jmp paranoid_exit diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index a1c9f496fca6..28a1423ce32e 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -447,3 +447,7 @@ 440 i386 process_madvise sys_process_madvise 441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 i386 mount_setattr sys_mount_setattr +443 i386 quotactl_path sys_quotactl_path +444 i386 landlock_create_ruleset sys_landlock_create_ruleset +445 i386 landlock_add_rule sys_landlock_add_rule +446 i386 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 7bf01cbe582f..ecd551b08d05 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -364,6 +364,10 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self # # Due to a historical design error, certain syscalls are numbered differently diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c index 2d0f3d8bcc25..edfe9780f6d1 100644 --- a/arch/x86/entry/vdso/vdso2c.c +++ b/arch/x86/entry/vdso/vdso2c.c @@ -218,7 +218,7 @@ int main(int argc, char **argv) /* * Figure out the struct name. If we're writing to a .so file, - * generate raw output insted. + * generate raw output instead. */ name = strdup(argv[3]); namelen = strlen(name); diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h index 1c7cfac7e64a..5264daa8859f 100644 --- a/arch/x86/entry/vdso/vdso2c.h +++ b/arch/x86/entry/vdso/vdso2c.h @@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len, if (offset + len > data_len) fail("section to extract overruns input data"); - fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len); + fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len); BITSFUNC(copy)(outfile, data + offset, len); fprintf(outfile, "\n};\n\n"); } diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index de1fff7188aa..6ddd7a937b3e 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -6,7 +6,7 @@ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> .text .globl __kernel_vsyscall @@ -29,7 +29,7 @@ __kernel_vsyscall: * anyone with an AMD CPU, for example). Nonetheless, we try to keep * it working approximately as well as it ever worked. * - * This link may eludicate some of the history: + * This link may elucidate some of the history: * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 * personally, I find it hard to understand what's going on there. * diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 825e829ffff1..235a5794296a 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) mmap_write_lock(mm); /* * Check if we have already mapped vdso blob - fail to prevent - * abusing from userspace install_speciall_mapping, which may + * abusing from userspace install_special_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, * so let's explicitly check all VMAs to be completely sure. diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S index 86a0e94f68df..99dafac992e2 100644 --- a/arch/x86/entry/vdso/vsgx.S +++ b/arch/x86/entry/vdso/vsgx.S @@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave) /* * If the return from callback is zero or negative, return immediately, - * else re-execute ENCLU with the postive return value interpreted as + * else re-execute ENCLU with the positive return value interpreted as * the requested ENCLU function. */ cmp $0, %eax diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 2c1791c4a518..9687a8aef01c 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -623,7 +623,7 @@ static void amd_pmu_disable_all(void) /* * Check each counter for overflow and wait for it to be reset by the * NMI if it has overflowed. This relies on the fact that all active - * counters are always enabled when this function is caled and + * counters are always enabled when this function is called and * ARCH_PERFMON_EVENTSEL_INT is always set. */ for (idx = 0; idx < x86_pmu.num_counters; idx++) { diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index be50ef8572cc..913745f1419b 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -14,12 +14,11 @@ #include <linux/init.h> #include <linux/cpumask.h> #include <linux/slab.h> +#include <linux/amd-iommu.h> #include "../perf_event.h" #include "iommu.h" -#define COUNTER_SHIFT 16 - /* iommu pmu conf masks */ #define GET_CSOURCE(x) ((x)->conf & 0xFFULL) #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL) @@ -81,12 +80,12 @@ static struct attribute_group amd_iommu_events_group = { }; struct amd_iommu_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *event; }; -static ssize_t _iommu_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t _iommu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct amd_iommu_event_desc *event = container_of(attr, struct amd_iommu_event_desc, attr); @@ -285,22 +284,31 @@ static void perf_iommu_start(struct perf_event *event, int flags) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; + /* + * To account for power-gating, which prevents write to + * the counter, we need to enable the counter + * before setting up counter register. + */ + perf_iommu_enable_event(event); + if (flags & PERF_EF_RELOAD) { - u64 prev_raw_count = local64_read(&hwc->prev_count); + u64 count = 0; struct amd_iommu *iommu = perf_event_2_iommu(event); + /* + * Since the IOMMU PMU only support counting mode, + * the counter always start with value zero. + */ amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, - IOMMU_PC_COUNTER_REG, &prev_raw_count); + IOMMU_PC_COUNTER_REG, &count); } - perf_iommu_enable_event(event); perf_event_update_userpage(event); - } static void perf_iommu_read(struct perf_event *event) { - u64 count, prev, delta; + u64 count; struct hw_perf_event *hwc = &event->hw; struct amd_iommu *iommu = perf_event_2_iommu(event); @@ -311,14 +319,11 @@ static void perf_iommu_read(struct perf_event *event) /* IOMMU pc counter register is only 48 bits */ count &= GENMASK_ULL(47, 0); - prev = local64_read(&hwc->prev_count); - if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev) - return; - - /* Handle 48-bit counter overflow */ - delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); - delta >>= COUNTER_SHIFT; - local64_add(delta, &event->count); + /* + * Since the counter always start with value zero, + * simply just accumulate the count for the event. + */ + local64_add(count, &event->count); } static void perf_iommu_stop(struct perf_event *event, int flags) @@ -328,15 +333,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags) if (hwc->state & PERF_HES_UPTODATE) return; + /* + * To account for power-gating, in which reading the counter would + * return zero, we need to read the register before disabling. + */ + perf_iommu_read(event); + hwc->state |= PERF_HES_UPTODATE; + perf_iommu_disable_event(event); WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; - - if (hwc->state & PERF_HES_UPTODATE) - return; - - perf_iommu_read(event); - hwc->state |= PERF_HES_UPTODATE; } static int perf_iommu_add(struct perf_event *event, int flags) diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 0e5c036fd7be..e6310c635c8b 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -17,27 +17,8 @@ #define IOMMU_PC_DEVID_MATCH_REG 0x20 #define IOMMU_PC_COUNTER_REPORT_REG 0x28 -/* maximun specified bank/counters */ +/* maximum specified bank/counters */ #define PC_MAX_SPEC_BNKS 64 #define PC_MAX_SPEC_CNTRS 16 -struct amd_iommu; - -/* amd_iommu_init.c external support functions */ -extern int amd_iommu_get_num_iommus(void); - -extern bool amd_iommu_pc_supported(void); - -extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); - -extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); - -extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); - -extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); - -extern struct amd_iommu *get_amd_iommu(int idx); - #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 7f014d450bc2..582c0ffb5e98 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = { }; #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 18df17129695..8e509325c2c3 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -45,13 +45,16 @@ #include "perf_event.h" struct x86_pmu x86_pmu __read_mostly; +static struct pmu pmu; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, + .pmu = &pmu, }; DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key); DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); +DEFINE_STATIC_KEY_FALSE(perf_is_hybrid); /* * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined @@ -151,15 +154,16 @@ again: */ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) { + struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); struct hw_perf_event_extra *reg; struct extra_reg *er; reg = &event->hw.extra_reg; - if (!x86_pmu.extra_regs) + if (!extra_regs) return 0; - for (er = x86_pmu.extra_regs; er->msr; er++) { + for (er = extra_regs; er->msr; er++) { if (er->event != (config & er->config_mask)) continue; if (event->attr.config1 & ~er->valid_mask) @@ -182,16 +186,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex); #ifdef CONFIG_X86_LOCAL_APIC +static inline int get_possible_num_counters(void) +{ + int i, num_counters = x86_pmu.num_counters; + + if (!is_hybrid()) + return num_counters; + + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) + num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters); + + return num_counters; +} + static bool reserve_pmc_hardware(void) { - int i; + int i, num_counters = get_possible_num_counters(); - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < num_counters; i++) { if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) goto perfctr_fail; } - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < num_counters; i++) { if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) goto eventsel_fail; } @@ -202,7 +219,7 @@ eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu_config_addr(i)); - i = x86_pmu.num_counters; + i = num_counters; perfctr_fail: for (i--; i >= 0; i--) @@ -213,9 +230,9 @@ perfctr_fail: static void release_pmc_hardware(void) { - int i; + int i, num_counters = get_possible_num_counters(); - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < num_counters; i++) { release_perfctr_nmi(x86_pmu_event_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i)); } @@ -228,7 +245,7 @@ static void release_pmc_hardware(void) {} #endif -static bool check_hw_exists(void) +bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed) { u64 val, val_fail = -1, val_new= ~0; int i, reg, reg_fail = -1, ret = 0; @@ -239,7 +256,7 @@ static bool check_hw_exists(void) * Check to see if the BIOS enabled any of the counters, if so * complain and bail. */ - for (i = 0; i < x86_pmu.num_counters; i++) { + for (i = 0; i < num_counters; i++) { reg = x86_pmu_config_addr(i); ret = rdmsrl_safe(reg, &val); if (ret) @@ -253,15 +270,15 @@ static bool check_hw_exists(void) } } - if (x86_pmu.num_counters_fixed) { + if (num_counters_fixed) { reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; - for (i = 0; i < x86_pmu.num_counters_fixed; i++) { - if (fixed_counter_disabled(i)) + for (i = 0; i < num_counters_fixed; i++) { + if (fixed_counter_disabled(i, pmu)) continue; - if (val & (0x03 << i*4)) { + if (val & (0x03ULL << i*4)) { bios_fail = 1; val_fail = val; reg_fail = reg; @@ -360,8 +377,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); - val = hw_cache_event_ids[cache_type][cache_op][cache_result]; - + val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; if (val == 0) return -ENOENT; @@ -369,7 +385,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; hwc->config |= val; - attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; + attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; return x86_pmu_extra_regs(val, event); } @@ -462,7 +478,7 @@ int x86_setup_perfctr(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - if (attr->type == PERF_TYPE_RAW) + if (attr->type == event->pmu->type) return x86_pmu_extra_regs(event->attr.config, event); if (attr->type == PERF_TYPE_HW_CACHE) @@ -597,7 +613,7 @@ int x86_pmu_hw_config(struct perf_event *event) if (!event->attr.exclude_kernel) event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; - if (event->attr.type == PERF_TYPE_RAW) + if (event->attr.type == event->pmu->type) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; if (event->attr.sample_period && x86_pmu.limit_period) { @@ -724,16 +740,33 @@ void x86_pmu_enable_all(int added) } } -static struct pmu pmu; - static inline int is_x86_event(struct perf_event *event) { - return event->pmu == &pmu; + int i; + + if (!is_hybrid()) + return event->pmu == &pmu; + + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { + if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) + return true; + } + + return false; } -struct pmu *x86_get_pmu(void) +struct pmu *x86_get_pmu(unsigned int cpu) { - return &pmu; + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + + /* + * All CPUs of the hybrid type have been offline. + * The x86_get_pmu() should not be invoked. + */ + if (WARN_ON_ONCE(!cpuc->pmu)) + return &pmu; + + return cpuc->pmu; } /* * Event scheduler state: @@ -765,7 +798,7 @@ struct perf_sched { }; /* - * Initialize interator that runs through all events and counters. + * Initialize iterator that runs through all events and counters. */ static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, int num, int wmin, int wmax, int gpmax) @@ -936,6 +969,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { + int num_counters = hybrid(cpuc->pmu, num_counters); struct event_constraint *c; struct perf_event *e; int n0, i, wmin, wmax, unsched = 0; @@ -1011,7 +1045,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) /* slow path */ if (i != n) { - int gpmax = x86_pmu.num_counters; + int gpmax = num_counters; /* * Do not allow scheduling of more than half the available @@ -1032,7 +1066,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) * the extra Merge events needed by large increment events. */ if (x86_pmu.flags & PMU_FL_PAIR) { - gpmax = x86_pmu.num_counters - cpuc->n_pair; + gpmax = num_counters - cpuc->n_pair; WARN_ON(gpmax <= 0); } @@ -1096,8 +1130,9 @@ static void del_nr_metric_event(struct cpu_hw_events *cpuc, static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, int max_count, int n) { + union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); - if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) + if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) return -EINVAL; if (n >= max_count + cpuc->n_metric) @@ -1118,10 +1153,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, */ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) { + int num_counters = hybrid(cpuc->pmu, num_counters); + int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); struct perf_event *event; int n, max_count; - max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; + max_count = num_counters + num_counters_fixed; /* current number of events already accepted */ n = cpuc->n_events; @@ -1480,7 +1517,6 @@ static void x86_pmu_start(struct perf_event *event, int flags) cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); - __set_bit(idx, cpuc->running); static_call(x86_pmu_enable)(event); perf_event_update_userpage(event); } @@ -1489,18 +1525,19 @@ void perf_event_print_debug(void) { u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; u64 pebs, debugctl; - struct cpu_hw_events *cpuc; + int cpu = smp_processor_id(); + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + int num_counters = hybrid(cpuc->pmu, num_counters); + int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); + struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); unsigned long flags; - int cpu, idx; + int idx; - if (!x86_pmu.num_counters) + if (!num_counters) return; local_irq_save(flags); - cpu = smp_processor_id(); - cpuc = &per_cpu(cpu_hw_events, cpu); - if (x86_pmu.version >= 2) { rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); @@ -1512,7 +1549,7 @@ void perf_event_print_debug(void) pr_info("CPU#%d: status: %016llx\n", cpu, status); pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); - if (x86_pmu.pebs_constraints) { + if (pebs_constraints) { rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); } @@ -1523,7 +1560,7 @@ void perf_event_print_debug(void) } pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < num_counters; idx++) { rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); rdmsrl(x86_pmu_event_addr(idx), pmc_count); @@ -1536,8 +1573,8 @@ void perf_event_print_debug(void) pr_info("CPU#%d: gen-PMC%d left: %016llx\n", cpu, idx, prev_left); } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { - if (fixed_counter_disabled(idx)) + for (idx = 0; idx < num_counters_fixed; idx++) { + if (fixed_counter_disabled(idx, cpuc->pmu)) continue; rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); @@ -1573,6 +1610,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) static void x86_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); int i; /* @@ -1612,7 +1650,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) } cpuc->event_constraint[i-1] = NULL; --cpuc->n_events; - if (x86_pmu.intel_cap.perf_metrics) + if (intel_cap.perf_metrics) del_nr_metric_event(cpuc, event); perf_event_update_userpage(event); @@ -1822,6 +1860,49 @@ ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, pmu_attr->event_str_noht); } +ssize_t events_hybrid_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_hybrid_attr *pmu_attr = + container_of(attr, struct perf_pmu_events_hybrid_attr, attr); + struct x86_hybrid_pmu *pmu; + const char *str, *next_str; + int i; + + if (hweight64(pmu_attr->pmu_type) == 1) + return sprintf(page, "%s", pmu_attr->event_str); + + /* + * Hybrid PMUs may support the same event name, but with different + * event encoding, e.g., the mem-loads event on an Atom PMU has + * different event encoding from a Core PMU. + * + * The event_str includes all event encodings. Each event encoding + * is divided by ";". The order of the event encodings must follow + * the order of the hybrid PMU index. + */ + pmu = container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + + str = pmu_attr->event_str; + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { + if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) + continue; + if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { + next_str = strchr(str, ';'); + if (next_str) + return snprintf(page, next_str - str + 1, "%s", str); + else + return sprintf(page, "%s", str); + } + str = strchr(str, ';'); + str++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(events_hybrid_sysfs_show); + EVENT_ATTR(cpu-cycles, CPU_CYCLES ); EVENT_ATTR(instructions, INSTRUCTIONS ); EVENT_ATTR(cache-references, CACHE_REFERENCES ); @@ -1948,6 +2029,37 @@ static void _x86_pmu_read(struct perf_event *event) x86_perf_event_update(event); } +void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, + u64 intel_ctrl) +{ + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.cntval_bits); + pr_info("... generic registers: %d\n", num_counters); + pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); + pr_info("... fixed-purpose events: %lu\n", + hweight64((((1ULL << num_counters_fixed) - 1) + << INTEL_PMC_IDX_FIXED) & intel_ctrl)); + pr_info("... event mask: %016Lx\n", intel_ctrl); +} + +/* + * The generic code is not hybrid friendly. The hybrid_pmu->pmu + * of the first registered PMU is unconditionally assigned to + * each possible cpuctx->ctx.pmu. + * Update the correct hybrid PMU to the cpuctx->ctx.pmu. + */ +void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu) +{ + struct perf_cpu_context *cpuctx; + + if (!pmu->pmu_cpu_context) + return; + + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + cpuctx->ctx.pmu = pmu; +} + static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; @@ -1981,7 +2093,7 @@ static int __init init_hw_perf_events(void) pmu_check_apic(); /* sanity check that the hardware exists or is emulated */ - if (!check_hw_exists()) + if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed)) return 0; pr_cont("%s PMU driver.\n", x86_pmu.name); @@ -2008,15 +2120,11 @@ static int __init init_hw_perf_events(void) pmu.attr_update = x86_pmu.attr_update; - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.cntval_bits); - pr_info("... generic registers: %d\n", x86_pmu.num_counters); - pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose events: %lu\n", - hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1) - << INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl)); - pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); + if (!is_hybrid()) { + x86_pmu_show_pmu_cap(x86_pmu.num_counters, + x86_pmu.num_counters_fixed, + x86_pmu.intel_ctrl); + } if (!x86_pmu.read) x86_pmu.read = _x86_pmu_read; @@ -2046,9 +2154,46 @@ static int __init init_hw_perf_events(void) if (err) goto out1; - err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); - if (err) - goto out2; + if (!is_hybrid()) { + err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + if (err) + goto out2; + } else { + u8 cpu_type = get_this_hybrid_cpu_type(); + struct x86_hybrid_pmu *hybrid_pmu; + int i, j; + + if (!cpu_type && x86_pmu.get_hybrid_cpu_type) + cpu_type = x86_pmu.get_hybrid_cpu_type(); + + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { + hybrid_pmu = &x86_pmu.hybrid_pmu[i]; + + hybrid_pmu->pmu = pmu; + hybrid_pmu->pmu.type = -1; + hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; + hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; + hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; + + err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, + (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); + if (err) + break; + + if (cpu_type == hybrid_pmu->cpu_type) + x86_pmu_update_cpu_context(&hybrid_pmu->pmu, raw_smp_processor_id()); + } + + if (i < x86_pmu.num_hybrid_pmus) { + for (j = 0; j < i; j++) + perf_pmu_unregister(&x86_pmu.hybrid_pmu[j].pmu); + pr_warn("Failed to register hybrid PMUs\n"); + kfree(x86_pmu.hybrid_pmu); + x86_pmu.hybrid_pmu = NULL; + x86_pmu.num_hybrid_pmus = 0; + goto out2; + } + } return 0; @@ -2173,16 +2318,27 @@ static void free_fake_cpuc(struct cpu_hw_events *cpuc) kfree(cpuc); } -static struct cpu_hw_events *allocate_fake_cpuc(void) +static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu) { struct cpu_hw_events *cpuc; - int cpu = raw_smp_processor_id(); + int cpu; cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); if (!cpuc) return ERR_PTR(-ENOMEM); cpuc->is_fake = 1; + if (is_hybrid()) { + struct x86_hybrid_pmu *h_pmu; + + h_pmu = hybrid_pmu(event_pmu); + if (cpumask_empty(&h_pmu->supported_cpus)) + goto error; + cpu = cpumask_first(&h_pmu->supported_cpus); + } else + cpu = raw_smp_processor_id(); + cpuc->pmu = event_pmu; + if (intel_cpuc_prepare(cpuc, cpu)) goto error; @@ -2201,7 +2357,7 @@ static int validate_event(struct perf_event *event) struct event_constraint *c; int ret = 0; - fake_cpuc = allocate_fake_cpuc(); + fake_cpuc = allocate_fake_cpuc(event->pmu); if (IS_ERR(fake_cpuc)) return PTR_ERR(fake_cpuc); @@ -2235,7 +2391,27 @@ static int validate_group(struct perf_event *event) struct cpu_hw_events *fake_cpuc; int ret = -EINVAL, n; - fake_cpuc = allocate_fake_cpuc(); + /* + * Reject events from different hybrid PMUs. + */ + if (is_hybrid()) { + struct perf_event *sibling; + struct pmu *pmu = NULL; + + if (is_x86_event(leader)) + pmu = leader->pmu; + + for_each_sibling_event(sibling, leader) { + if (!is_x86_event(sibling)) + continue; + if (!pmu) + pmu = sibling->pmu; + else if (pmu != sibling->pmu) + return ret; + } + } + + fake_cpuc = allocate_fake_cpuc(event->pmu); if (IS_ERR(fake_cpuc)) return PTR_ERR(fake_cpuc); /* @@ -2263,35 +2439,26 @@ out: static int x86_pmu_event_init(struct perf_event *event) { - struct pmu *tmp; + struct x86_hybrid_pmu *pmu = NULL; int err; - switch (event->attr.type) { - case PERF_TYPE_RAW: - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - break; - - default: + if ((event->attr.type != event->pmu->type) && + (event->attr.type != PERF_TYPE_HARDWARE) && + (event->attr.type != PERF_TYPE_HW_CACHE)) return -ENOENT; + + if (is_hybrid() && (event->cpu != -1)) { + pmu = hybrid_pmu(event->pmu); + if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus)) + return -ENOENT; } err = __x86_pmu_event_init(event); if (!err) { - /* - * we temporarily connect event to its pmu - * such that validate_group() can classify - * it as an x86 event using is_x86_event() - */ - tmp = event->pmu; - event->pmu = &pmu; - if (event->group_leader != event) err = validate_group(event); else err = validate_event(event); - - event->pmu = tmp; } if (err) { if (event->destroy) @@ -2475,6 +2642,14 @@ static int x86_pmu_aux_output_match(struct perf_event *event) return 0; } +static int x86_pmu_filter_match(struct perf_event *event) +{ + if (x86_pmu.filter_match) + return x86_pmu.filter_match(event); + + return 1; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2502,6 +2677,8 @@ static struct pmu pmu = { .check_period = x86_pmu_check_period, .aux_output_match = x86_pmu_aux_output_match, + + .filter_match = x86_pmu_filter_match, }; void arch_perf_update_userpage(struct perf_event *event, @@ -2770,6 +2947,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs) void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) { cap->version = x86_pmu.version; + /* + * KVM doesn't support the hybrid PMU yet. + * Return the common value in global x86_pmu, + * which available for all cores. + */ cap->num_counters_gp = x86_pmu.num_counters; cap->num_counters_fixed = x86_pmu.num_counters_fixed; cap->bit_width_gp = x86_pmu.cntval_bits; diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile index e67a5886336c..10bde6c5abb2 100644 --- a/arch/x86/events/intel/Makefile +++ b/arch/x86/events/intel/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o -intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o +intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o uncore_discovery.o obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o intel-cstate-objs := cstate.o diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 731dd8d0dbb1..6320d2cfd9d3 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -594,7 +594,7 @@ static __init int bts_init(void) * we cannot use the user mapping since it will not be available * if we're not running the owning process. * - * With PTI we can't use the kernal map either, because its not + * With PTI we can't use the kernel map either, because its not * there when we run userspace. * * For now, disable this driver when using PTI. diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7bbb5bb98d8c..2521d03de5e0 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -137,7 +137,7 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */ + INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ @@ -2076,6 +2076,14 @@ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +static struct extra_reg intel_grt_extra_regs[] __read_mostly = { + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), + EVENT_EXTRA_END +}; + #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ #define KNL_MCDRAM_LOCAL BIT_ULL(21) @@ -2153,10 +2161,11 @@ static void intel_pmu_disable_all(void) static void __intel_pmu_enable_all(int added, bool pmi) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); intel_pmu_lbr_enable_all(pmi); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, - x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); + intel_ctrl & ~cpuc->intel_ctrl_guest_mask); if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { struct perf_event *event = @@ -2186,7 +2195,7 @@ static void intel_pmu_enable_all(int added) * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either * in sequence on the same PMC or on different PMCs. * - * In practise it appears some of these events do in fact count, and + * In practice it appears some of these events do in fact count, and * we need to program all 4 events. */ static void intel_pmu_nhm_workaround(void) @@ -2429,13 +2438,23 @@ static int icl_set_topdown_event_period(struct perf_event *event) return 0; } +static int adl_set_topdown_event_period(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->cpu_type != hybrid_big) + return 0; + + return icl_set_topdown_event_period(event); +} + static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) { u32 val; /* * The metric is reported as an 8bit integer fraction - * suming up to 0xff. + * summing up to 0xff. * slots-in-metric = (Metric / 0xff) * slots */ val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; @@ -2569,6 +2588,17 @@ static u64 icl_update_topdown_event(struct perf_event *event) x86_pmu.num_topdown_events - 1); } +static u64 adl_update_topdown_event(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->cpu_type != hybrid_big) + return 0; + + return icl_update_topdown_event(event); +} + + static void intel_pmu_read_topdown_event(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -2709,22 +2739,25 @@ int intel_pmu_save_and_restart(struct perf_event *event) static void intel_pmu_reset(void) { struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); + int num_counters = hybrid(cpuc->pmu, num_counters); unsigned long flags; int idx; - if (!x86_pmu.num_counters) + if (!num_counters) return; local_irq_save(flags); pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for (idx = 0; idx < num_counters; idx++) { wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { - if (fixed_counter_disabled(idx)) + for (idx = 0; idx < num_counters_fixed; idx++) { + if (fixed_counter_disabled(idx, cpuc->pmu)) continue; wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); } @@ -2753,6 +2786,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int bit; int handled = 0; + u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); inc_irq_stat(apic_perf_irqs); @@ -2776,7 +2810,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) * processing loop coming after that the function, otherwise * phony regular samples may be generated in the sampling buffer * not marked with the EXACT tag. Another possibility is to have - * one PEBS event and at least one non-PEBS event whic hoverflows + * one PEBS event and at least one non-PEBS event which overflows * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will * not be set, yet the overflow status bit for the PEBS counter will * be on Skylake. @@ -2798,7 +2832,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) handled++; x86_pmu.drain_pebs(regs, &data); - status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; + status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; /* * PMI throttle may be triggered, which stops the PEBS event. @@ -2824,7 +2858,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) } /* - * Intel Perf mertrics + * Intel Perf metrics */ if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { handled++; @@ -2961,8 +2995,10 @@ intel_vlbr_constraints(struct perf_event *event) return NULL; } -static int intel_alt_er(int idx, u64 config) +static int intel_alt_er(struct cpu_hw_events *cpuc, + int idx, u64 config) { + struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); int alt_idx = idx; if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) @@ -2974,7 +3010,7 @@ static int intel_alt_er(int idx, u64 config) if (idx == EXTRA_REG_RSP_1) alt_idx = EXTRA_REG_RSP_0; - if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask) + if (config & ~extra_regs[alt_idx].valid_mask) return idx; return alt_idx; @@ -2982,15 +3018,16 @@ static int intel_alt_er(int idx, u64 config) static void intel_fixup_er(struct perf_event *event, int idx) { + struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); event->hw.extra_reg.idx = idx; if (idx == EXTRA_REG_RSP_0) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; + event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; } else if (idx == EXTRA_REG_RSP_1) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; + event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; } } @@ -3066,7 +3103,7 @@ again: */ c = NULL; } else { - idx = intel_alt_er(idx, reg->config); + idx = intel_alt_er(cpuc, idx, reg->config); if (idx != reg->idx) { raw_spin_unlock_irqrestore(&era->lock, flags); goto again; @@ -3131,10 +3168,11 @@ struct event_constraint * x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { + struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); struct event_constraint *c; - if (x86_pmu.event_constraints) { - for_each_event_constraint(c, x86_pmu.event_constraints) { + if (event_constraints) { + for_each_event_constraint(c, event_constraints) { if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; @@ -3142,7 +3180,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } } - return &unconstrained; + return &hybrid_var(cpuc->pmu, unconstrained); } static struct event_constraint * @@ -3646,6 +3684,23 @@ static inline bool is_mem_loads_aux_event(struct perf_event *event) return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); } +static inline bool require_mem_loads_aux_event(struct perf_event *event) +{ + if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) + return false; + + if (is_hybrid()) + return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; + + return true; +} + +static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) +{ + union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); + + return test_bit(idx, (unsigned long *)&intel_cap->capabilities); +} static int intel_pmu_hw_config(struct perf_event *event) { @@ -3659,6 +3714,9 @@ static int intel_pmu_hw_config(struct perf_event *event) return ret; if (event->attr.precise_ip) { + if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) + return -EINVAL; + if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & @@ -3699,7 +3757,8 @@ static int intel_pmu_hw_config(struct perf_event *event) event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; } - if (event->attr.type != PERF_TYPE_RAW) + if ((event->attr.type == PERF_TYPE_HARDWARE) || + (event->attr.type == PERF_TYPE_HW_CACHE)) return 0; /* @@ -3712,7 +3771,7 @@ static int intel_pmu_hw_config(struct perf_event *event) * with a slots event as group leader. When the slots event * is used in a metrics group, it too cannot support sampling. */ - if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) { + if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { if (event->attr.config1 || event->attr.config2) return -EINVAL; @@ -3763,7 +3822,7 @@ static int intel_pmu_hw_config(struct perf_event *event) * event. The rule is to simplify the implementation of the check. * That's because perf cannot have a complete group at the moment. */ - if (x86_pmu.flags & PMU_FL_MEM_LOADS_AUX && + if (require_mem_loads_aux_event(event) && (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && is_mem_loads_event(event)) { struct perf_event *leader = event->group_leader; @@ -3798,10 +3857,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; + u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; - arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; - arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask; + arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask; if (x86_pmu.flags & PMU_FL_PEBS_ALL) arr[0].guest &= ~cpuc->pebs_enabled; else @@ -4039,6 +4099,39 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static struct event_constraint * +adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->cpu_type == hybrid_big) + return spr_get_event_constraints(cpuc, idx, event); + else if (pmu->cpu_type == hybrid_small) + return tnt_get_event_constraints(cpuc, idx, event); + + WARN_ON(1); + return &emptyconstraint; +} + +static int adl_hw_config(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->cpu_type == hybrid_big) + return hsw_hw_config(event); + else if (pmu->cpu_type == hybrid_small) + return intel_pmu_hw_config(event); + + WARN_ON(1); + return -EOPNOTSUPP; +} + +static u8 adl_get_hybrid_cpu_type(void) +{ + return hybrid_big; +} + /* * Broadwell: * @@ -4142,7 +4235,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) { cpuc->pebs_record_size = x86_pmu.pebs_record_size; - if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { + if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { cpuc->shared_regs = allocate_shared_regs(cpu); if (!cpuc->shared_regs) goto err; @@ -4196,12 +4289,62 @@ static void flip_smm_bit(void *data) } } +static bool init_hybrid_pmu(int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + u8 cpu_type = get_this_hybrid_cpu_type(); + struct x86_hybrid_pmu *pmu = NULL; + int i; + + if (!cpu_type && x86_pmu.get_hybrid_cpu_type) + cpu_type = x86_pmu.get_hybrid_cpu_type(); + + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { + if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { + pmu = &x86_pmu.hybrid_pmu[i]; + break; + } + } + if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { + cpuc->pmu = NULL; + return false; + } + + /* Only check and dump the PMU information for the first CPU */ + if (!cpumask_empty(&pmu->supported_cpus)) + goto end; + + if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) + return false; + + pr_info("%s PMU driver: ", pmu->name); + + if (pmu->intel_cap.pebs_output_pt_available) + pr_cont("PEBS-via-PT "); + + pr_cont("\n"); + + x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed, + pmu->intel_ctrl); + +end: + cpumask_set_cpu(cpu, &pmu->supported_cpus); + cpuc->pmu = &pmu->pmu; + + x86_pmu_update_cpu_context(&pmu->pmu, cpu); + + return true; +} + static void intel_pmu_cpu_starting(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); int core_id = topology_core_id(cpu); int i; + if (is_hybrid() && !init_hybrid_pmu(cpu)) + return; + init_debug_store_on_cpu(cpu); /* * Deal with CPUs that don't clear their LBRs on power-up. @@ -4219,8 +4362,16 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.version > 1) flip_smm_bit(&x86_pmu.attr_freeze_on_smi); - /* Disable perf metrics if any added CPU doesn't support it. */ - if (x86_pmu.intel_cap.perf_metrics) { + /* + * Disable perf metrics if any added CPU doesn't support it. + * + * Turn off the check for a hybrid architecture, because the + * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate + * the architecture features. The perf metrics is a model-specific + * feature for now. The corresponding bit should always be 0 on + * a hybrid platform, e.g., Alder Lake. + */ + if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { union perf_capabilities perf_cap; rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); @@ -4307,7 +4458,12 @@ void intel_cpuc_finish(struct cpu_hw_events *cpuc) static void intel_pmu_cpu_dead(int cpu) { - intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + + intel_cpuc_finish(cpuc); + + if (is_hybrid() && cpuc->pmu) + cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -4336,6 +4492,14 @@ static int intel_pmu_aux_output_match(struct perf_event *event) return is_intel_pt_event(event); } +static int intel_pmu_filter_match(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + unsigned int cpu = smp_processor_id(); + + return cpumask_test_cpu(cpu, &pmu->supported_cpus); +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -4513,7 +4677,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), @@ -4591,7 +4755,7 @@ static bool check_msr(unsigned long msr, u64 mask) /* * Disable the check for real HW, so we don't - * mess with potentionaly enabled registers: + * mess with potentially enabled registers: */ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return true; @@ -4656,7 +4820,7 @@ static __init void intel_arch_events_quirk(void) { int bit; - /* disable event that reported as not presend by cpuid */ + /* disable event that reported as not present by cpuid */ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; pr_warn("CPUID marked event: \'%s\' unavailable\n", @@ -4876,7 +5040,7 @@ static void update_tfa_sched(void *ignored) * and if so force schedule out for all event types all contexts */ if (test_bit(3, cpuc->active_mask)) - perf_pmu_resched(x86_get_pmu()); + perf_pmu_resched(x86_get_pmu(smp_processor_id())); } static ssize_t show_sysctl_tfa(struct device *cdev, @@ -5038,8 +5202,299 @@ static const struct attribute_group *attr_update[] = { NULL, }; +EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big); +EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big); +EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big); +EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big); +EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big); + +static struct attribute *adl_hybrid_events_attrs[] = { + EVENT_PTR(slots_adl), + EVENT_PTR(td_retiring_adl), + EVENT_PTR(td_bad_spec_adl), + EVENT_PTR(td_fe_bound_adl), + EVENT_PTR(td_be_bound_adl), + EVENT_PTR(td_heavy_ops_adl), + EVENT_PTR(td_br_mis_adl), + EVENT_PTR(td_fetch_lat_adl), + EVENT_PTR(td_mem_bound_adl), + NULL, +}; + +/* Must be in IDX order */ +EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); +EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big); + +static struct attribute *adl_hybrid_mem_attrs[] = { + EVENT_PTR(mem_ld_adl), + EVENT_PTR(mem_st_adl), + EVENT_PTR(mem_ld_aux_adl), + NULL, +}; + +EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); +EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); +EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); +EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big); +EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big); +EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); +EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big); +EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big); + +static struct attribute *adl_hybrid_tsx_attrs[] = { + EVENT_PTR(tx_start_adl), + EVENT_PTR(tx_abort_adl), + EVENT_PTR(tx_commit_adl), + EVENT_PTR(tx_capacity_read_adl), + EVENT_PTR(tx_capacity_write_adl), + EVENT_PTR(tx_conflict_adl), + EVENT_PTR(cycles_t_adl), + EVENT_PTR(cycles_ct_adl), + NULL, +}; + +FORMAT_ATTR_HYBRID(in_tx, hybrid_big); +FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); +FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); +FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); +FORMAT_ATTR_HYBRID(frontend, hybrid_big); + +static struct attribute *adl_hybrid_extra_attr_rtm[] = { + FORMAT_HYBRID_PTR(in_tx), + FORMAT_HYBRID_PTR(in_tx_cp), + FORMAT_HYBRID_PTR(offcore_rsp), + FORMAT_HYBRID_PTR(ldlat), + FORMAT_HYBRID_PTR(frontend), + NULL, +}; + +static struct attribute *adl_hybrid_extra_attr[] = { + FORMAT_HYBRID_PTR(offcore_rsp), + FORMAT_HYBRID_PTR(ldlat), + FORMAT_HYBRID_PTR(frontend), + NULL, +}; + +static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) +{ + struct device *dev = kobj_to_dev(kobj); + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + struct perf_pmu_events_hybrid_attr *pmu_attr = + container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); + + return pmu->cpu_type & pmu_attr->pmu_type; +} + +static umode_t hybrid_events_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; +} + +static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) +{ + int cpu = cpumask_first(&pmu->supported_cpus); + + return (cpu >= nr_cpu_ids) ? -1 : cpu; +} + +static umode_t hybrid_tsx_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + int cpu = hybrid_find_supported_cpu(pmu); + + return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; +} + +static umode_t hybrid_format_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + struct perf_pmu_format_hybrid_attr *pmu_attr = + container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); + int cpu = hybrid_find_supported_cpu(pmu); + + return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; +} + +static struct attribute_group hybrid_group_events_td = { + .name = "events", + .is_visible = hybrid_events_is_visible, +}; + +static struct attribute_group hybrid_group_events_mem = { + .name = "events", + .is_visible = hybrid_events_is_visible, +}; + +static struct attribute_group hybrid_group_events_tsx = { + .name = "events", + .is_visible = hybrid_tsx_is_visible, +}; + +static struct attribute_group hybrid_group_format_extra = { + .name = "format", + .is_visible = hybrid_format_is_visible, +}; + +static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + + return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus); +} + +static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); +static struct attribute *intel_hybrid_cpus_attrs[] = { + &dev_attr_cpus.attr, + NULL, +}; + +static struct attribute_group hybrid_group_cpus = { + .attrs = intel_hybrid_cpus_attrs, +}; + +static const struct attribute_group *hybrid_attr_update[] = { + &hybrid_group_events_td, + &hybrid_group_events_mem, + &hybrid_group_events_tsx, + &group_caps_gen, + &group_caps_lbr, + &hybrid_group_format_extra, + &group_default, + &hybrid_group_cpus, + NULL, +}; + static struct attribute *empty_attrs; +static void intel_pmu_check_num_counters(int *num_counters, + int *num_counters_fixed, + u64 *intel_ctrl, u64 fixed_mask) +{ + if (*num_counters > INTEL_PMC_MAX_GENERIC) { + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", + *num_counters, INTEL_PMC_MAX_GENERIC); + *num_counters = INTEL_PMC_MAX_GENERIC; + } + *intel_ctrl = (1ULL << *num_counters) - 1; + + if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) { + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", + *num_counters_fixed, INTEL_PMC_MAX_FIXED); + *num_counters_fixed = INTEL_PMC_MAX_FIXED; + } + + *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; +} + +static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, + int num_counters, + int num_counters_fixed, + u64 intel_ctrl) +{ + struct event_constraint *c; + + if (!event_constraints) + return; + + /* + * event on fixed counter2 (REF_CYCLES) only works on this + * counter, so do not extend mask to generic counters + */ + for_each_event_constraint(c, event_constraints) { + /* + * Don't extend the topdown slots and metrics + * events to the generic counters. + */ + if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { + /* + * Disable topdown slots and metrics events, + * if slots event is not in CPUID. + */ + if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) + c->idxmsk64 = 0; + c->weight = hweight64(c->idxmsk64); + continue; + } + + if (c->cmask == FIXED_EVENT_FLAGS) { + /* Disabled fixed counters which are not in CPUID */ + c->idxmsk64 &= intel_ctrl; + + if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) + c->idxmsk64 |= (1ULL << num_counters) - 1; + } + c->idxmsk64 &= + ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); + c->weight = hweight64(c->idxmsk64); + } +} + +static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) +{ + struct extra_reg *er; + + /* + * Access extra MSR may cause #GP under certain circumstances. + * E.g. KVM doesn't support offcore event + * Check all extra_regs here. + */ + if (!extra_regs) + return; + + for (er = extra_regs; er->msr; er++) { + er->extra_msr_access = check_msr(er->msr, 0x11UL); + /* Disable LBR select mapping */ + if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) + x86_pmu.lbr_sel_map = NULL; + } +} + +static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) +{ + struct x86_hybrid_pmu *pmu; + int i; + + for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { + pmu = &x86_pmu.hybrid_pmu[i]; + + intel_pmu_check_num_counters(&pmu->num_counters, + &pmu->num_counters_fixed, + &pmu->intel_ctrl, + fixed_mask); + + if (pmu->intel_cap.perf_metrics) { + pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; + } + + if (pmu->intel_cap.pebs_output_pt_available) + pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + + intel_pmu_check_event_constraints(pmu->event_constraints, + pmu->num_counters, + pmu->num_counters_fixed, + pmu->intel_ctrl); + + intel_pmu_check_extra_regs(pmu->extra_regs); + } +} + __init int intel_pmu_init(void) { struct attribute **extra_skl_attr = &empty_attrs; @@ -5050,12 +5505,11 @@ __init int intel_pmu_init(void) union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; - struct event_constraint *c; unsigned int fixed_mask; - struct extra_reg *er; bool pmem = false; int version, i; char *name; + struct x86_hybrid_pmu *pmu; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -5650,6 +6104,99 @@ __init int intel_pmu_init(void) name = "sapphire_rapids"; break; + case INTEL_FAM6_ALDERLAKE: + case INTEL_FAM6_ALDERLAKE_L: + /* + * Alder Lake has 2 types of CPU, core and atom. + * + * Initialize the common PerfMon capabilities here. + */ + x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, + sizeof(struct x86_hybrid_pmu), + GFP_KERNEL); + if (!x86_pmu.hybrid_pmu) + return -ENOMEM; + static_branch_enable(&perf_is_hybrid); + x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; + + x86_pmu.late_ack = true; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + x86_pmu.flags |= PMU_FL_PEBS_ALL; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; + x86_pmu.lbr_pt_coexist = true; + intel_pmu_pebs_data_source_skl(false); + x86_pmu.num_topdown_events = 8; + x86_pmu.update_topdown_event = adl_update_topdown_event; + x86_pmu.set_topdown_event_period = adl_set_topdown_event_period; + + x86_pmu.filter_match = intel_pmu_filter_match; + x86_pmu.get_event_constraints = adl_get_event_constraints; + x86_pmu.hw_config = adl_hw_config; + x86_pmu.limit_period = spr_limit_period; + x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; + /* + * The rtm_abort_event is used to check whether to enable GPRs + * for the RTM abort event. Atom doesn't have the RTM abort + * event. There is no harmful to set it in the common + * x86_pmu.rtm_abort_event. + */ + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); + + td_attr = adl_hybrid_events_attrs; + mem_attr = adl_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; + + /* Initialize big core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + pmu->name = "cpu_core"; + pmu->cpu_type = hybrid_big; + pmu->num_counters = x86_pmu.num_counters + 2; + pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; + pmu->intel_cap.perf_metrics = 1; + pmu->intel_cap.pebs_output_pt_available = 0; + + memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); + memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); + pmu->event_constraints = intel_spr_event_constraints; + pmu->pebs_constraints = intel_spr_pebs_event_constraints; + pmu->extra_regs = intel_spr_extra_regs; + + /* Initialize Atom core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + pmu->name = "cpu_atom"; + pmu->cpu_type = hybrid_small; + pmu->num_counters = x86_pmu.num_counters; + pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + pmu->max_pebs_events = x86_pmu.max_pebs_events; + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; + pmu->intel_cap.perf_metrics = 0; + pmu->intel_cap.pebs_output_pt_available = 1; + + memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); + memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); + pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + pmu->event_constraints = intel_slm_event_constraints; + pmu->pebs_constraints = intel_grt_pebs_event_constraints; + pmu->extra_regs = intel_grt_extra_regs; + pr_cont("Alderlake Hybrid events, "); + name = "alderlake_hybrid"; + break; + default: switch (x86_pmu.version) { case 1: @@ -5670,68 +6217,36 @@ __init int intel_pmu_init(void) snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); + if (!is_hybrid()) { + group_events_td.attrs = td_attr; + group_events_mem.attrs = mem_attr; + group_events_tsx.attrs = tsx_attr; + group_format_extra.attrs = extra_attr; + group_format_extra_skl.attrs = extra_skl_attr; - group_events_td.attrs = td_attr; - group_events_mem.attrs = mem_attr; - group_events_tsx.attrs = tsx_attr; - group_format_extra.attrs = extra_attr; - group_format_extra_skl.attrs = extra_skl_attr; - - x86_pmu.attr_update = attr_update; - - if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { - WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", - x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); - x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; - } - x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1; + x86_pmu.attr_update = attr_update; + } else { + hybrid_group_events_td.attrs = td_attr; + hybrid_group_events_mem.attrs = mem_attr; + hybrid_group_events_tsx.attrs = tsx_attr; + hybrid_group_format_extra.attrs = extra_attr; - if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { - WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", - x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED); - x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; + x86_pmu.attr_update = hybrid_attr_update; } - x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED; + intel_pmu_check_num_counters(&x86_pmu.num_counters, + &x86_pmu.num_counters_fixed, + &x86_pmu.intel_ctrl, + (u64)fixed_mask); /* AnyThread may be deprecated on arch perfmon v5 or later */ if (x86_pmu.intel_cap.anythread_deprecated) x86_pmu.format_attrs = intel_arch_formats_attr; - if (x86_pmu.event_constraints) { - /* - * event on fixed counter2 (REF_CYCLES) only works on this - * counter, so do not extend mask to generic counters - */ - for_each_event_constraint(c, x86_pmu.event_constraints) { - /* - * Don't extend the topdown slots and metrics - * events to the generic counters. - */ - if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { - /* - * Disable topdown slots and metrics events, - * if slots event is not in CPUID. - */ - if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl)) - c->idxmsk64 = 0; - c->weight = hweight64(c->idxmsk64); - continue; - } - - if (c->cmask == FIXED_EVENT_FLAGS) { - /* Disabled fixed counters which are not in CPUID */ - c->idxmsk64 &= x86_pmu.intel_ctrl; - - if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; - } - c->idxmsk64 &= - ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); - c->weight = hweight64(c->idxmsk64); - } - } - + intel_pmu_check_event_constraints(x86_pmu.event_constraints, + x86_pmu.num_counters, + x86_pmu.num_counters_fixed, + x86_pmu.intel_ctrl); /* * Access LBR MSR may cause #GP under certain circumstances. * E.g. KVM doesn't support LBR MSR @@ -5749,19 +6264,7 @@ __init int intel_pmu_init(void) if (x86_pmu.lbr_nr) pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); - /* - * Access extra MSR may cause #GP under certain circumstances. - * E.g. KVM doesn't support offcore event - * Check all extra_regs here. - */ - if (x86_pmu.extra_regs) { - for (er = x86_pmu.extra_regs; er->msr; er++) { - er->extra_msr_access = check_msr(er->msr, 0x11UL); - /* Disable LBR select mapping */ - if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) - x86_pmu.lbr_sel_map = NULL; - } - } + intel_pmu_check_extra_regs(x86_pmu.extra_regs); /* Support full width counters using alternative MSR range */ if (x86_pmu.intel_cap.full_width_write) { @@ -5770,9 +6273,12 @@ __init int intel_pmu_init(void) pr_cont("full-width counters, "); } - if (x86_pmu.intel_cap.perf_metrics) + if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + if (is_hybrid()) + intel_pmu_check_hybrid_pmus((u64)fixed_mask); + return 0; } diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 407eee5f6f95..433399069e27 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -40,7 +40,7 @@ * Model specific counters: * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 - * Available model: SLM,AMT,GLM,CNL,TNT + * Available model: SLM,AMT,GLM,CNL,TNT,ADL * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -51,46 +51,49 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, - * TNT,RKL + * TNT,RKL,ADL * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, - * ICL,TGL,RKL + * ICL,TGL,RKL,ADL * Scope: Core * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * perf code: 0x00 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, - * KBL,CML,ICL,TGL,TNT,RKL + * KBL,CML,ICL,TGL,TNT,RKL,ADL * Scope: Package (physical package) * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, - * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL + * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL, + * ADL * Scope: Package (physical package) * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, - * TNT,RKL + * TNT,RKL,ADL * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, - * KBL,CML,ICL,TGL,RKL + * KBL,CML,ICL,TGL,RKL,ADL * Scope: Package (physical package) * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. * perf code: 0x04 - * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL + * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, + * ADL * Scope: Package (physical package) * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. * perf code: 0x05 - * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL + * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, + * ADL * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, - * TNT,RKL + * TNT,RKL,ADL * Scope: Package (physical package) * */ @@ -563,6 +566,20 @@ static const struct cstate_model icl_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model adl_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES) | + BIT(PERF_CSTATE_CORE_C7_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | + BIT(PERF_CSTATE_PKG_C3_RES) | + BIT(PERF_CSTATE_PKG_C6_RES) | + BIT(PERF_CSTATE_PKG_C7_RES) | + BIT(PERF_CSTATE_PKG_C8_RES) | + BIT(PERF_CSTATE_PKG_C9_RES) | + BIT(PERF_CSTATE_PKG_C10_RES), +}; + static const struct cstate_model slm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -650,6 +667,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 7ebae1826403..1ec8fd311f38 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -779,6 +779,13 @@ struct event_constraint intel_glm_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_grt_pebs_event_constraints[] = { + /* Allow all events as PEBS with no flags */ + INTEL_PLD_CONSTRAINT(0x5d0, 0xf), + INTEL_PSD_CONSTRAINT(0x6d0, 0xf), + EVENT_CONSTRAINT_END +}; + struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ @@ -959,13 +966,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = { struct event_constraint *intel_pebs_constraints(struct perf_event *event) { + struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); struct event_constraint *c; if (!event->attr.precise_ip) return NULL; - if (x86_pmu.pebs_constraints) { - for_each_event_constraint(c, x86_pmu.pebs_constraints) { + if (pebs_constraints) { + for_each_event_constraint(c, pebs_constraints) { if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; @@ -1007,6 +1015,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in) static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) { struct debug_store *ds = cpuc->ds; + int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); + int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); u64 threshold; int reserved; @@ -1014,9 +1024,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) return; if (x86_pmu.flags & PMU_FL_PEBS_ALL) - reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed; + reserved = max_pebs_events + num_counters_fixed; else - reserved = x86_pmu.max_pebs_events; + reserved = max_pebs_events; if (cpuc->n_pebs == cpuc->n_large_pebs) { threshold = ds->pebs_absolute_maximum - @@ -1353,14 +1363,13 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) is_64bit = kernel_ip(to) || any_64bit_mode(regs); #endif insn_init(&insn, kaddr, size, is_64bit); - insn_get_length(&insn); + /* - * Make sure there was not a problem decoding the - * instruction and getting the length. This is - * doubly important because we have an infinite - * loop if insn.length=0. + * Make sure there was not a problem decoding the instruction. + * This is doubly important because we have an infinite loop if + * insn.length=0. */ - if (!insn.length) + if (insn_get_length(&insn)) break; to += insn.length; @@ -1805,7 +1814,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) * * [-period, 0] * - * the difference between two consequtive reads is: + * the difference between two consecutive reads is: * * A) value2 - value1; * when no overflows have happened in between, @@ -2010,7 +2019,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d */ if (!pebs_status && cpuc->pebs_enabled && !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) - pebs_status = cpuc->pebs_enabled; + pebs_status = p->status = cpuc->pebs_enabled; bit = find_first_bit((unsigned long *)&pebs_status, x86_pmu.max_pebs_events); @@ -2072,6 +2081,8 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); + int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); struct debug_store *ds = cpuc->ds; struct perf_event *event; void *base, *at, *top; @@ -2086,9 +2097,9 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ds->pebs_index = ds->pebs_buffer_base; - mask = ((1ULL << x86_pmu.max_pebs_events) - 1) | - (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); - size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + mask = ((1ULL << max_pebs_events) - 1) | + (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); + size = INTEL_PMC_IDX_FIXED + num_counters_fixed; if (unlikely(base >= top)) { intel_pmu_pebs_event_update_no_drain(cpuc, size); @@ -2192,7 +2203,7 @@ void __init intel_ds_init(void) PERF_SAMPLE_TIME; x86_pmu.flags |= PMU_FL_PEBS_ALL; pebs_qual = "-baseline"; - x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; + x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; } else { /* Only basic record supported */ x86_pmu.large_pebs_flags &= @@ -2205,9 +2216,9 @@ void __init intel_ds_init(void) } pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual); - if (x86_pmu.intel_cap.pebs_output_pt_available) { + if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) { pr_cont("PEBS-via-PT, "); - x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; } break; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 21890dacfcfe..76dbab6ac9fb 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -705,7 +705,7 @@ void intel_pmu_lbr_add(struct perf_event *event) void release_lbr_buffers(void) { - struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache; + struct kmem_cache *kmem_cache; struct cpu_hw_events *cpuc; int cpu; @@ -714,6 +714,7 @@ void release_lbr_buffers(void) for_each_possible_cpu(cpu) { cpuc = per_cpu_ptr(&cpu_hw_events, cpu); + kmem_cache = x86_get_pmu(cpu)->task_ctx_cache; if (kmem_cache && cpuc->lbr_xsave) { kmem_cache_free(kmem_cache, cpuc->lbr_xsave); cpuc->lbr_xsave = NULL; @@ -1198,7 +1199,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) /* * The LBR logs any address in the IP, even if the IP just * faulted. This means userspace can control the from address. - * Ensure we don't blindy read any address by validating it is + * Ensure we don't blindly read any address by validating it is * a known text address. */ if (kernel_text_address(from)) { @@ -1224,8 +1225,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs()); #endif insn_init(&insn, addr, bytes_read, is64); - insn_get_opcode(&insn); - if (!insn.opcode.got) + if (insn_get_opcode(&insn)) return X86_BR_ABORT; switch (insn.opcode.bytes[0]) { @@ -1262,8 +1262,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_INT; break; case 0xe8: /* call near rel */ - insn_get_immediate(&insn); - if (insn.immediate1.value == 0) { + if (insn_get_immediate(&insn) || insn.immediate1.value == 0) { /* zero length call */ ret = X86_BR_ZERO_CALL; break; @@ -1279,7 +1278,9 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_JMP; break; case 0xff: /* call near absolute, call far absolute ind */ - insn_get_modrm(&insn); + if (insn_get_modrm(&insn)) + return X86_BR_ABORT; + ext = (insn.modrm.bytes[0] >> 3) & 0x7; switch (ext) { case 2: /* near ind call */ @@ -1609,7 +1610,7 @@ void intel_pmu_lbr_init_hsw(void) x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_map = hsw_lbr_sel_map; - x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); + x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); if (lbr_from_signext_quirk_needed()) static_branch_enable(&lbr_from_quirk_key); @@ -1629,7 +1630,7 @@ __init void intel_pmu_lbr_init_skl(void) x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_map = hsw_lbr_sel_map; - x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); + x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); /* * SW branch filter usage: @@ -1726,7 +1727,7 @@ static bool is_arch_lbr_xsave_available(void) void __init intel_pmu_arch_lbr_init(void) { - struct pmu *pmu = x86_get_pmu(); + struct pmu *pmu = x86_get_pmu(smp_processor_id()); union cpuid28_eax eax; union cpuid28_ebx ebx; union cpuid28_ecx ecx; diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index a4cc66005ce8..7951a5dc73b6 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -24,7 +24,7 @@ struct p4_event_bind { unsigned int escr_msr[2]; /* ESCR MSR for this event */ unsigned int escr_emask; /* valid ESCR EventMask bits */ unsigned int shared; /* event is shared across threads */ - char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ + char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on absence */ }; struct p4_pebs_bind { @@ -45,7 +45,7 @@ struct p4_pebs_bind { * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of * event configuration to find out which values are to be * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT - * resgisters + * registers */ static struct p4_pebs_bind p4_pebs_bind_map[] = { P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001), @@ -947,7 +947,7 @@ static void p4_pmu_enable_pebs(u64 config) (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); } -static void p4_pmu_enable_event(struct perf_event *event) +static void __p4_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int thread = p4_ht_config_thread(hwc->config); @@ -983,6 +983,16 @@ static void p4_pmu_enable_event(struct perf_event *event) (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); } +static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(X86_PMC_IDX_MAX)], p4_running); + +static void p4_pmu_enable_event(struct perf_event *event) +{ + int idx = event->hw.idx; + + __set_bit(idx, per_cpu(p4_running, smp_processor_id())); + __p4_pmu_enable_event(event); +} + static void p4_pmu_enable_all(int added) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -992,7 +1002,7 @@ static void p4_pmu_enable_all(int added) struct perf_event *event = cpuc->events[idx]; if (!test_bit(idx, cpuc->active_mask)) continue; - p4_pmu_enable_event(event); + __p4_pmu_enable_event(event); } } @@ -1012,7 +1022,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) if (!test_bit(idx, cpuc->active_mask)) { /* catch in-flight IRQs */ - if (__test_and_clear_bit(idx, cpuc->running)) + if (__test_and_clear_bit(idx, per_cpu(p4_running, smp_processor_id()))) handled++; continue; } @@ -1313,7 +1323,7 @@ static __initconst const struct x86_pmu p4_pmu = { .get_event_constraints = x86_get_event_constraints, /* * IF HT disabled we may need to use all - * ARCH_P4_MAX_CCCR counters simulaneously + * ARCH_P4_MAX_CCCR counters simultaneously * though leave it restricted at moment assuming * HT is on */ diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index e94af4a54d0d..915847655c06 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -362,7 +362,7 @@ static bool pt_event_valid(struct perf_event *event) /* * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config - * clears the assomption that BranchEn must always be enabled, + * clears the assumption that BranchEn must always be enabled, * as was the case with the first implementation of PT. * If this bit is not set, the legacy behavior is preserved * for compatibility with the older userspace. diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 33c8180d5a87..df7b07d7fdcb 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -4,8 +4,13 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "uncore.h" +#include "uncore_discovery.h" -static struct intel_uncore_type *empty_uncore[] = { NULL, }; +static bool uncore_no_discover; +module_param(uncore_no_discover, bool, 0); +MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism " + "(default: enable the discovery mechanism)."); +struct intel_uncore_type *empty_uncore[] = { NULL, }; struct intel_uncore_type **uncore_msr_uncores = empty_uncore; struct intel_uncore_type **uncore_pci_uncores = empty_uncore; struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; @@ -48,6 +53,18 @@ int uncore_pcibus_to_dieid(struct pci_bus *bus) return die_id; } +int uncore_die_to_segment(int die) +{ + struct pci_bus *bus = NULL; + + /* Find first pci bus which attributes to specified die. */ + while ((bus = pci_find_next_bus(bus)) && + (die != uncore_pcibus_to_dieid(bus))) + ; + + return bus ? pci_domain_nr(bus) : -EINVAL; +} + static void uncore_free_pcibus_map(void) { struct pci2phy_map *map, *tmp; @@ -829,6 +846,34 @@ static const struct attribute_group uncore_pmu_attr_group = { .attrs = uncore_pmu_attrs, }; +static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) +{ + struct intel_uncore_type *type = pmu->type; + + /* + * No uncore block name in discovery table. + * Use uncore_type_&typeid_&boxid as name. + */ + if (!type->name) { + if (type->num_boxes == 1) + sprintf(pmu->name, "uncore_type_%u", type->type_id); + else { + sprintf(pmu->name, "uncore_type_%u_%d", + type->type_id, type->box_ids[pmu->pmu_idx]); + } + return; + } + + if (type->num_boxes == 1) { + if (strlen(type->name) > 0) + sprintf(pmu->name, "uncore_%s", type->name); + else + sprintf(pmu->name, "uncore"); + } else + sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx); + +} + static int uncore_pmu_register(struct intel_uncore_pmu *pmu) { int ret; @@ -855,15 +900,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu) pmu->pmu.attr_update = pmu->type->attr_update; } - if (pmu->type->num_boxes == 1) { - if (strlen(pmu->type->name) > 0) - sprintf(pmu->name, "uncore_%s", pmu->type->name); - else - sprintf(pmu->name, "uncore"); - } else { - sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, - pmu->pmu_idx); - } + uncore_get_pmu_name(pmu); ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (!ret) @@ -904,6 +941,10 @@ static void uncore_type_exit(struct intel_uncore_type *type) kfree(type->pmus); type->pmus = NULL; } + if (type->box_ids) { + kfree(type->box_ids); + type->box_ids = NULL; + } kfree(type->events_group); type->events_group = NULL; } @@ -1003,10 +1044,37 @@ static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die) return 0; } +static struct intel_uncore_pmu * +uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev) +{ + struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_type *type; + u64 box_ctl; + int i, die; + + for (; *types; types++) { + type = *types; + for (die = 0; die < __uncore_max_dies; die++) { + for (i = 0; i < type->num_boxes; i++) { + if (!type->box_ctls[die]) + continue; + box_ctl = type->box_ctls[die] + type->pci_offsets[i]; + if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) && + pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) && + pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl)) + return &type->pmus[i]; + } + } + } + + return NULL; +} + /* * Find the PMU of a PCI device. * @pdev: The PCI device. * @ids: The ID table of the available PCI devices with a PMU. + * If NULL, search the whole uncore_pci_uncores. */ static struct intel_uncore_pmu * uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) @@ -1016,6 +1084,9 @@ uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) kernel_ulong_t data; unsigned int devfn; + if (!ids) + return uncore_pci_find_dev_pmu_from_types(pdev); + while (ids && ids->vendor) { if ((ids->vendor == pdev->vendor) && (ids->device == pdev->device)) { @@ -1174,7 +1245,8 @@ static void uncore_pci_remove(struct pci_dev *pdev) } static int uncore_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) + unsigned long action, void *data, + const struct pci_device_id *ids) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); @@ -1185,7 +1257,7 @@ static int uncore_bus_notify(struct notifier_block *nb, if (action != BUS_NOTIFY_DEL_DEVICE) return NOTIFY_DONE; - pmu = uncore_pci_find_dev_pmu(pdev, uncore_pci_sub_driver->id_table); + pmu = uncore_pci_find_dev_pmu(pdev, ids); if (!pmu) return NOTIFY_DONE; @@ -1197,8 +1269,15 @@ static int uncore_bus_notify(struct notifier_block *nb, return NOTIFY_OK; } -static struct notifier_block uncore_notifier = { - .notifier_call = uncore_bus_notify, +static int uncore_pci_sub_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + return uncore_bus_notify(nb, action, data, + uncore_pci_sub_driver->id_table); +} + +static struct notifier_block uncore_pci_sub_notifier = { + .notifier_call = uncore_pci_sub_bus_notify, }; static void uncore_pci_sub_driver_init(void) @@ -1239,13 +1318,55 @@ static void uncore_pci_sub_driver_init(void) ids++; } - if (notify && bus_register_notifier(&pci_bus_type, &uncore_notifier)) + if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier)) notify = false; if (!notify) uncore_pci_sub_driver = NULL; } +static int uncore_pci_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + return uncore_bus_notify(nb, action, data, NULL); +} + +static struct notifier_block uncore_pci_notifier = { + .notifier_call = uncore_pci_bus_notify, +}; + + +static void uncore_pci_pmus_register(void) +{ + struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_type *type; + struct intel_uncore_pmu *pmu; + struct pci_dev *pdev; + u64 box_ctl; + int i, die; + + for (; *types; types++) { + type = *types; + for (die = 0; die < __uncore_max_dies; die++) { + for (i = 0; i < type->num_boxes; i++) { + if (!type->box_ctls[die]) + continue; + box_ctl = type->box_ctls[die] + type->pci_offsets[i]; + pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl), + UNCORE_DISCOVERY_PCI_BUS(box_ctl), + UNCORE_DISCOVERY_PCI_DEVFN(box_ctl)); + if (!pdev) + continue; + pmu = &type->pmus[i]; + + uncore_pci_pmu_register(pdev, type, pmu, die); + } + } + } + + bus_register_notifier(&pci_bus_type, &uncore_pci_notifier); +} + static int __init uncore_pci_init(void) { size_t size; @@ -1262,12 +1383,15 @@ static int __init uncore_pci_init(void) if (ret) goto errtype; - uncore_pci_driver->probe = uncore_pci_probe; - uncore_pci_driver->remove = uncore_pci_remove; + if (uncore_pci_driver) { + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; - ret = pci_register_driver(uncore_pci_driver); - if (ret) - goto errtype; + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + } else + uncore_pci_pmus_register(); if (uncore_pci_sub_driver) uncore_pci_sub_driver_init(); @@ -1290,8 +1414,11 @@ static void uncore_pci_exit(void) if (pcidrv_registered) { pcidrv_registered = false; if (uncore_pci_sub_driver) - bus_unregister_notifier(&pci_bus_type, &uncore_notifier); - pci_unregister_driver(uncore_pci_driver); + bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier); + if (uncore_pci_driver) + pci_unregister_driver(uncore_pci_driver); + else + bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier); uncore_types_exit(uncore_pci_uncores); kfree(uncore_extra_pci_dev); uncore_free_pcibus_map(); @@ -1625,6 +1752,11 @@ static const struct intel_uncore_init_fun rkl_uncore_init __initconst = { .pci_init = skl_uncore_pci_init, }; +static const struct intel_uncore_init_fun adl_uncore_init __initconst = { + .cpu_init = adl_uncore_cpu_init, + .mmio_init = tgl_uncore_mmio_init, +}; + static const struct intel_uncore_init_fun icx_uncore_init __initconst = { .cpu_init = icx_uncore_cpu_init, .pci_init = icx_uncore_pci_init, @@ -1637,6 +1769,12 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = { .mmio_init = snr_uncore_mmio_init, }; +static const struct intel_uncore_init_fun generic_uncore_init __initconst = { + .cpu_init = intel_uncore_generic_uncore_cpu_init, + .pci_init = intel_uncore_generic_uncore_pci_init, + .mmio_init = intel_uncore_generic_uncore_mmio_init, +}; + static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init), @@ -1673,6 +1811,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), {}, }; @@ -1684,17 +1824,21 @@ static int __init intel_uncore_init(void) struct intel_uncore_init_fun *uncore_init; int pret = 0, cret = 0, mret = 0, ret; - id = x86_match_cpu(intel_uncore_match); - if (!id) - return -ENODEV; - if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return -ENODEV; __uncore_max_dies = topology_max_packages() * topology_max_die_per_package(); - uncore_init = (struct intel_uncore_init_fun *)id->driver_data; + id = x86_match_cpu(intel_uncore_match); + if (!id) { + if (!uncore_no_discover && intel_uncore_has_discovery_tables()) + uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init; + else + return -ENODEV; + } else + uncore_init = (struct intel_uncore_init_fun *)id->driver_data; + if (uncore_init->pci_init) { pret = uncore_init->pci_init(); if (!pret) @@ -1711,8 +1855,10 @@ static int __init intel_uncore_init(void) mret = uncore_mmio_init(); } - if (cret && pret && mret) - return -ENODEV; + if (cret && pret && mret) { + ret = -ENODEV; + goto free_discovery; + } /* Install hotplug callbacks to setup the targets for each package */ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, @@ -1727,6 +1873,8 @@ err: uncore_types_exit(uncore_msr_uncores); uncore_types_exit(uncore_mmio_uncores); uncore_pci_exit(); +free_discovery: + intel_uncore_clear_discovery_tables(); return ret; } module_init(intel_uncore_init); @@ -1737,5 +1885,6 @@ static void __exit intel_uncore_exit(void) uncore_types_exit(uncore_msr_uncores); uncore_types_exit(uncore_mmio_uncores); uncore_pci_exit(); + intel_uncore_clear_discovery_tables(); } module_exit(intel_uncore_exit); diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index a3c6e1643ad2..291791002997 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -42,6 +42,7 @@ struct intel_uncore_pmu; struct intel_uncore_box; struct uncore_event_desc; struct freerunning_counters; +struct intel_uncore_topology; struct intel_uncore_type { const char *name; @@ -50,6 +51,7 @@ struct intel_uncore_type { int perf_ctr_bits; int fixed_ctr_bits; int num_freerunning_types; + int type_id; unsigned perf_ctr; unsigned event_ctl; unsigned event_mask; @@ -57,6 +59,7 @@ struct intel_uncore_type { unsigned fixed_ctr; unsigned fixed_ctl; unsigned box_ctl; + u64 *box_ctls; /* Unit ctrl addr of the first box of each die */ union { unsigned msr_offset; unsigned mmio_offset; @@ -65,7 +68,12 @@ struct intel_uncore_type { unsigned num_shared_regs:8; unsigned single_fixed:1; unsigned pair_ctr_ctl:1; - unsigned *msr_offsets; + union { + unsigned *msr_offsets; + unsigned *pci_offsets; + unsigned *mmio_offsets; + }; + unsigned *box_ids; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; @@ -80,7 +88,7 @@ struct intel_uncore_type { * to identify which platform component each PMON block of that type is * supposed to monitor. */ - u64 *topology; + struct intel_uncore_topology *topology; /* * Optional callbacks for managing mapping of Uncore units to PMONs */ @@ -169,6 +177,11 @@ struct freerunning_counters { unsigned *box_offsets; }; +struct intel_uncore_topology { + u64 configuration; + int segment; +}; + struct pci2phy_map { struct list_head list; int segment; @@ -177,6 +190,7 @@ struct pci2phy_map { struct pci2phy_map *__find_pci2phy_map(int segment); int uncore_pcibus_to_dieid(struct pci_bus *bus); +int uncore_die_to_segment(int die); ssize_t uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf); @@ -547,6 +561,7 @@ uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); +extern struct intel_uncore_type *empty_uncore[]; extern struct intel_uncore_type **uncore_msr_uncores; extern struct intel_uncore_type **uncore_pci_uncores; extern struct intel_uncore_type **uncore_mmio_uncores; @@ -567,6 +582,7 @@ void snb_uncore_cpu_init(void); void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); void icl_uncore_cpu_init(void); +void adl_uncore_cpu_init(void); void tgl_uncore_cpu_init(void); void tgl_uncore_mmio_init(void); void tgl_l_uncore_mmio_init(void); diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c new file mode 100644 index 000000000000..aba9bff95413 --- /dev/null +++ b/arch/x86/events/intel/uncore_discovery.c @@ -0,0 +1,622 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Support Intel uncore PerfMon discovery mechanism. + * Copyright(c) 2021 Intel Corporation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "uncore.h" +#include "uncore_discovery.h" + +static struct rb_root discovery_tables = RB_ROOT; +static int num_discovered_types[UNCORE_ACCESS_MAX]; + +static bool has_generic_discovery_table(void) +{ + struct pci_dev *dev; + int dvsec; + + dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL); + if (!dev) + return false; + + /* A discovery table device has the unique capability ID. */ + dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY); + pci_dev_put(dev); + if (dvsec) + return true; + + return false; +} + +static int logical_die_id; + +static int get_device_die_id(struct pci_dev *dev) +{ + int cpu, node = pcibus_to_node(dev->bus); + + /* + * If the NUMA info is not available, assume that the logical die id is + * continuous in the order in which the discovery table devices are + * detected. + */ + if (node < 0) + return logical_die_id++; + + for_each_cpu(cpu, cpumask_of_node(node)) { + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->initialized && cpu_to_node(cpu) == node) + return c->logical_die_id; + } + + /* + * All CPUs of a node may be offlined. For this case, + * the PCI and MMIO type of uncore blocks which are + * enumerated by the device will be unavailable. + */ + return -1; +} + +#define __node_2_type(cur) \ + rb_entry((cur), struct intel_uncore_discovery_type, node) + +static inline int __type_cmp(const void *key, const struct rb_node *b) +{ + struct intel_uncore_discovery_type *type_b = __node_2_type(b); + const u16 *type_id = key; + + if (type_b->type > *type_id) + return -1; + else if (type_b->type < *type_id) + return 1; + + return 0; +} + +static inline struct intel_uncore_discovery_type * +search_uncore_discovery_type(u16 type_id) +{ + struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp); + + return (node) ? __node_2_type(node) : NULL; +} + +static inline bool __type_less(struct rb_node *a, const struct rb_node *b) +{ + return (__node_2_type(a)->type < __node_2_type(b)->type); +} + +static struct intel_uncore_discovery_type * +add_uncore_discovery_type(struct uncore_unit_discovery *unit) +{ + struct intel_uncore_discovery_type *type; + + if (unit->access_type >= UNCORE_ACCESS_MAX) { + pr_warn("Unsupported access type %d\n", unit->access_type); + return NULL; + } + + type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL); + if (!type) + return NULL; + + type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL); + if (!type->box_ctrl_die) + goto free_type; + + type->access_type = unit->access_type; + num_discovered_types[type->access_type]++; + type->type = unit->box_type; + + rb_add(&type->node, &discovery_tables, __type_less); + + return type; + +free_type: + kfree(type); + + return NULL; + +} + +static struct intel_uncore_discovery_type * +get_uncore_discovery_type(struct uncore_unit_discovery *unit) +{ + struct intel_uncore_discovery_type *type; + + type = search_uncore_discovery_type(unit->box_type); + if (type) + return type; + + return add_uncore_discovery_type(unit); +} + +static void +uncore_insert_box_info(struct uncore_unit_discovery *unit, + int die, bool parsed) +{ + struct intel_uncore_discovery_type *type; + unsigned int *box_offset, *ids; + int i; + + if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset)) + return; + + if (parsed) { + type = search_uncore_discovery_type(unit->box_type); + if (WARN_ON_ONCE(!type)) + return; + /* Store the first box of each die */ + if (!type->box_ctrl_die[die]) + type->box_ctrl_die[die] = unit->ctl; + return; + } + + type = get_uncore_discovery_type(unit); + if (!type) + return; + + box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); + if (!box_offset) + return; + + ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); + if (!ids) + goto free_box_offset; + + /* Store generic information for the first box */ + if (!type->num_boxes) { + type->box_ctrl = unit->ctl; + type->box_ctrl_die[die] = unit->ctl; + type->num_counters = unit->num_regs; + type->counter_width = unit->bit_width; + type->ctl_offset = unit->ctl_offset; + type->ctr_offset = unit->ctr_offset; + *ids = unit->box_id; + goto end; + } + + for (i = 0; i < type->num_boxes; i++) { + ids[i] = type->ids[i]; + box_offset[i] = type->box_offset[i]; + + if (WARN_ON_ONCE(unit->box_id == ids[i])) + goto free_ids; + } + ids[i] = unit->box_id; + box_offset[i] = unit->ctl - type->box_ctrl; + kfree(type->ids); + kfree(type->box_offset); +end: + type->ids = ids; + type->box_offset = box_offset; + type->num_boxes++; + return; + +free_ids: + kfree(ids); + +free_box_offset: + kfree(box_offset); + +} + +static int parse_discovery_table(struct pci_dev *dev, int die, + u32 bar_offset, bool *parsed) +{ + struct uncore_global_discovery global; + struct uncore_unit_discovery unit; + void __iomem *io_addr; + resource_size_t addr; + unsigned long size; + u32 val; + int i; + + pci_read_config_dword(dev, bar_offset, &val); + + if (val & UNCORE_DISCOVERY_MASK) + return -EINVAL; + + addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK); + size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE; + io_addr = ioremap(addr, size); + if (!io_addr) + return -ENOMEM; + + /* Read Global Discovery State */ + memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery)); + if (uncore_discovery_invalid_unit(global)) { + pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n", + global.table1, global.ctl, global.table3); + iounmap(io_addr); + return -EINVAL; + } + iounmap(io_addr); + + size = (1 + global.max_units) * global.stride * 8; + io_addr = ioremap(addr, size); + if (!io_addr) + return -ENOMEM; + + /* Parsing Unit Discovery State */ + for (i = 0; i < global.max_units; i++) { + memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8), + sizeof(struct uncore_unit_discovery)); + + if (uncore_discovery_invalid_unit(unit)) + continue; + + if (unit.access_type >= UNCORE_ACCESS_MAX) + continue; + + uncore_insert_box_info(&unit, die, *parsed); + } + + *parsed = true; + iounmap(io_addr); + return 0; +} + +bool intel_uncore_has_discovery_tables(void) +{ + u32 device, val, entry_id, bar_offset; + int die, dvsec = 0, ret = true; + struct pci_dev *dev = NULL; + bool parsed = false; + + if (has_generic_discovery_table()) + device = UNCORE_DISCOVERY_TABLE_DEVICE; + else + device = PCI_ANY_ID; + + /* + * Start a new search and iterates through the list of + * the discovery table devices. + */ + while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) { + while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) { + pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val); + entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK; + if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON) + continue; + + pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val); + + if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) { + ret = false; + goto err; + } + bar_offset = UNCORE_DISCOVERY_BIR_BASE + + (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP; + + die = get_device_die_id(dev); + if (die < 0) + continue; + + parse_discovery_table(dev, die, bar_offset, &parsed); + } + } + + /* None of the discovery tables are available */ + if (!parsed) + ret = false; +err: + pci_dev_put(dev); + + return ret; +} + +void intel_uncore_clear_discovery_tables(void) +{ + struct intel_uncore_discovery_type *type, *next; + + rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) { + kfree(type->box_ctrl_die); + kfree(type); + } +} + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31"); + +static struct attribute *generic_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh.attr, + NULL, +}; + +static const struct attribute_group generic_uncore_format_group = { + .name = "format", + .attrs = generic_uncore_formats_attr, +}; + +static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) +{ + wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); +} + +static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); +} + +static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(uncore_msr_box_ctl(box), 0); +} + +static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, 0); +} + +static struct intel_uncore_ops generic_uncore_msr_ops = { + .init_box = intel_generic_uncore_msr_init_box, + .disable_box = intel_generic_uncore_msr_disable_box, + .enable_box = intel_generic_uncore_msr_enable_box, + .disable_event = intel_generic_uncore_msr_disable_event, + .enable_event = intel_generic_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT); +} + +static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ); +} + +static void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, 0); +} + +static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, 0); +} + +static u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); + + return count; +} + +static struct intel_uncore_ops generic_uncore_pci_ops = { + .init_box = intel_generic_uncore_pci_init_box, + .disable_box = intel_generic_uncore_pci_disable_box, + .enable_box = intel_generic_uncore_pci_enable_box, + .disable_event = intel_generic_uncore_pci_disable_event, + .enable_event = intel_generic_uncore_pci_enable_event, + .read_counter = intel_generic_uncore_pci_read_counter, +}; + +#define UNCORE_GENERIC_MMIO_SIZE 0x4000 + +static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box) +{ + struct intel_uncore_type *type = box->pmu->type; + + if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets) + return 0; + + return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx]; +} + +static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) +{ + unsigned int box_ctl = generic_uncore_mmio_box_ctl(box); + struct intel_uncore_type *type = box->pmu->type; + resource_size_t addr; + + if (!box_ctl) { + pr_warn("Uncore type %d box %d: Invalid box control address.\n", + type->type_id, type->box_ids[box->pmu->pmu_idx]); + return; + } + + addr = box_ctl; + box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE); + if (!box->io_addr) { + pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n", + type->type_id, type->box_ids[box->pmu->pmu_idx], + (unsigned long long)addr); + return; + } + + writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr); +} + +static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box) +{ + if (!box->io_addr) + return; + + writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr); +} + +static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box) +{ + if (!box->io_addr) + return; + + writel(0, box->io_addr); +} + +static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(0, box->io_addr + hwc->config_base); +} + +static struct intel_uncore_ops generic_uncore_mmio_ops = { + .init_box = intel_generic_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = intel_generic_uncore_mmio_disable_box, + .enable_box = intel_generic_uncore_mmio_enable_box, + .disable_event = intel_generic_uncore_mmio_disable_event, + .enable_event = intel_generic_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static bool uncore_update_uncore_type(enum uncore_access_type type_id, + struct intel_uncore_type *uncore, + struct intel_uncore_discovery_type *type) +{ + uncore->type_id = type->type; + uncore->num_boxes = type->num_boxes; + uncore->num_counters = type->num_counters; + uncore->perf_ctr_bits = type->counter_width; + uncore->box_ids = type->ids; + + switch (type_id) { + case UNCORE_ACCESS_MSR: + uncore->ops = &generic_uncore_msr_ops; + uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset; + uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset; + uncore->box_ctl = (unsigned int)type->box_ctrl; + uncore->msr_offsets = type->box_offset; + break; + case UNCORE_ACCESS_PCI: + uncore->ops = &generic_uncore_pci_ops; + uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset; + uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset; + uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl); + uncore->box_ctls = type->box_ctrl_die; + uncore->pci_offsets = type->box_offset; + break; + case UNCORE_ACCESS_MMIO: + uncore->ops = &generic_uncore_mmio_ops; + uncore->perf_ctr = (unsigned int)type->ctr_offset; + uncore->event_ctl = (unsigned int)type->ctl_offset; + uncore->box_ctl = (unsigned int)type->box_ctrl; + uncore->box_ctls = type->box_ctrl_die; + uncore->mmio_offsets = type->box_offset; + uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE; + break; + default: + return false; + } + + return true; +} + +static struct intel_uncore_type ** +intel_uncore_generic_init_uncores(enum uncore_access_type type_id) +{ + struct intel_uncore_discovery_type *type; + struct intel_uncore_type **uncores; + struct intel_uncore_type *uncore; + struct rb_node *node; + int i = 0; + + uncores = kcalloc(num_discovered_types[type_id] + 1, + sizeof(struct intel_uncore_type *), GFP_KERNEL); + if (!uncores) + return empty_uncore; + + for (node = rb_first(&discovery_tables); node; node = rb_next(node)) { + type = rb_entry(node, struct intel_uncore_discovery_type, node); + if (type->access_type != type_id) + continue; + + uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL); + if (!uncore) + break; + + uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK; + uncore->format_group = &generic_uncore_format_group; + + if (!uncore_update_uncore_type(type_id, uncore, type)) { + kfree(uncore); + continue; + } + uncores[i++] = uncore; + } + + return uncores; +} + +void intel_uncore_generic_uncore_cpu_init(void) +{ + uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR); +} + +int intel_uncore_generic_uncore_pci_init(void) +{ + uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI); + + return 0; +} + +void intel_uncore_generic_uncore_mmio_init(void) +{ + uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO); +} diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h new file mode 100644 index 000000000000..1d652939a01c --- /dev/null +++ b/arch/x86/events/intel/uncore_discovery.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* Generic device ID of a discovery table device */ +#define UNCORE_DISCOVERY_TABLE_DEVICE 0x09a7 +/* Capability ID for a discovery table device */ +#define UNCORE_EXT_CAP_ID_DISCOVERY 0x23 +/* First DVSEC offset */ +#define UNCORE_DISCOVERY_DVSEC_OFFSET 0x8 +/* Mask of the supported discovery entry type */ +#define UNCORE_DISCOVERY_DVSEC_ID_MASK 0xffff +/* PMON discovery entry type ID */ +#define UNCORE_DISCOVERY_DVSEC_ID_PMON 0x1 +/* Second DVSEC offset */ +#define UNCORE_DISCOVERY_DVSEC2_OFFSET 0xc +/* Mask of the discovery table BAR offset */ +#define UNCORE_DISCOVERY_DVSEC2_BIR_MASK 0x7 +/* Discovery table BAR base offset */ +#define UNCORE_DISCOVERY_BIR_BASE 0x10 +/* Discovery table BAR step */ +#define UNCORE_DISCOVERY_BIR_STEP 0x4 +/* Mask of the discovery table offset */ +#define UNCORE_DISCOVERY_MASK 0xf +/* Global discovery table size */ +#define UNCORE_DISCOVERY_GLOBAL_MAP_SIZE 0x20 + +#define UNCORE_DISCOVERY_PCI_DOMAIN(data) ((data >> 28) & 0x7) +#define UNCORE_DISCOVERY_PCI_BUS(data) ((data >> 20) & 0xff) +#define UNCORE_DISCOVERY_PCI_DEVFN(data) ((data >> 12) & 0xff) +#define UNCORE_DISCOVERY_PCI_BOX_CTRL(data) (data & 0xfff) + + +#define uncore_discovery_invalid_unit(unit) \ + (!unit.table1 || !unit.ctl || !unit.table3 || \ + unit.table1 == -1ULL || unit.ctl == -1ULL || \ + unit.table3 == -1ULL) + +#define GENERIC_PMON_CTL_EV_SEL_MASK 0x000000ff +#define GENERIC_PMON_CTL_UMASK_MASK 0x0000ff00 +#define GENERIC_PMON_CTL_EDGE_DET (1 << 18) +#define GENERIC_PMON_CTL_INVERT (1 << 23) +#define GENERIC_PMON_CTL_TRESH_MASK 0xff000000 +#define GENERIC_PMON_RAW_EVENT_MASK (GENERIC_PMON_CTL_EV_SEL_MASK | \ + GENERIC_PMON_CTL_UMASK_MASK | \ + GENERIC_PMON_CTL_EDGE_DET | \ + GENERIC_PMON_CTL_INVERT | \ + GENERIC_PMON_CTL_TRESH_MASK) + +#define GENERIC_PMON_BOX_CTL_FRZ (1 << 0) +#define GENERIC_PMON_BOX_CTL_RST_CTRL (1 << 8) +#define GENERIC_PMON_BOX_CTL_RST_CTRS (1 << 9) +#define GENERIC_PMON_BOX_CTL_INT (GENERIC_PMON_BOX_CTL_RST_CTRL | \ + GENERIC_PMON_BOX_CTL_RST_CTRS) + +enum uncore_access_type { + UNCORE_ACCESS_MSR = 0, + UNCORE_ACCESS_MMIO, + UNCORE_ACCESS_PCI, + + UNCORE_ACCESS_MAX, +}; + +struct uncore_global_discovery { + union { + u64 table1; + struct { + u64 type : 8, + stride : 8, + max_units : 10, + __reserved_1 : 36, + access_type : 2; + }; + }; + + u64 ctl; /* Global Control Address */ + + union { + u64 table3; + struct { + u64 status_offset : 8, + num_status : 16, + __reserved_2 : 40; + }; + }; +}; + +struct uncore_unit_discovery { + union { + u64 table1; + struct { + u64 num_regs : 8, + ctl_offset : 8, + bit_width : 8, + ctr_offset : 8, + status_offset : 8, + __reserved_1 : 22, + access_type : 2; + }; + }; + + u64 ctl; /* Unit Control Address */ + + union { + u64 table3; + struct { + u64 box_type : 16, + box_id : 16, + __reserved_2 : 32; + }; + }; +}; + +struct intel_uncore_discovery_type { + struct rb_node node; + enum uncore_access_type access_type; + u64 box_ctrl; /* Unit ctrl addr of the first box */ + u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */ + u16 type; /* Type ID of the uncore block */ + u8 num_counters; + u8 counter_width; + u8 ctl_offset; /* Counter Control 0 offset */ + u8 ctr_offset; /* Counter 0 offset */ + u16 num_boxes; /* number of boxes for the uncore block */ + unsigned int *ids; /* Box IDs */ + unsigned int *box_offset; /* Box offset */ +}; + +bool intel_uncore_has_discovery_tables(void); +void intel_uncore_clear_discovery_tables(void); +void intel_uncore_generic_uncore_cpu_init(void); +int intel_uncore_generic_uncore_pci_init(void); +void intel_uncore_generic_uncore_mmio_init(void); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 51271288499e..0f63706cdadf 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -62,6 +62,8 @@ #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 +#define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 +#define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -131,12 +133,33 @@ #define ICL_UNC_ARB_PER_CTR 0x3b1 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3 +/* ADL uncore global control */ +#define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0 +#define ADL_UNC_FIXED_CTR_CTRL 0x2fde +#define ADL_UNC_FIXED_CTR 0x2fdf + +/* ADL Cbo register */ +#define ADL_UNC_CBO_0_PER_CTR0 0x2002 +#define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000 +#define ADL_UNC_CTL_THRESHOLD 0x3f000000 +#define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ + SNB_UNC_CTL_UMASK_MASK | \ + SNB_UNC_CTL_EDGE_DET | \ + SNB_UNC_CTL_INVERT | \ + ADL_UNC_CTL_THRESHOLD) + +/* ADL ARB register */ +#define ADL_UNC_ARB_PER_CTR0 0x2FD2 +#define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0 +#define ADL_UNC_ARB_MSR_OFFSET 0x8 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29"); /* Sandy Bridge uncore support */ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) @@ -422,6 +445,106 @@ void tgl_uncore_cpu_init(void) skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box; } +static void adl_uncore_msr_init_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) + wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); +} + +static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); +} + +static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) + wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); +} + +static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) + wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); +} + +static struct intel_uncore_ops adl_uncore_msr_ops = { + .init_box = adl_uncore_msr_init_box, + .enable_box = adl_uncore_msr_enable_box, + .disable_box = adl_uncore_msr_disable_box, + .exit_box = adl_uncore_msr_exit_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct attribute *adl_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_threshold.attr, + NULL, +}; + +static const struct attribute_group adl_uncore_format_group = { + .name = "format", + .attrs = adl_uncore_formats_attr, +}; + +static struct intel_uncore_type adl_uncore_cbox = { + .name = "cbox", + .num_counters = 2, + .perf_ctr_bits = 44, + .perf_ctr = ADL_UNC_CBO_0_PER_CTR0, + .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0, + .event_mask = ADL_UNC_RAW_EVENT_MASK, + .msr_offset = ICL_UNC_CBO_MSR_OFFSET, + .ops = &adl_uncore_msr_ops, + .format_group = &adl_uncore_format_group, +}; + +static struct intel_uncore_type adl_uncore_arb = { + .name = "arb", + .num_counters = 2, + .num_boxes = 2, + .perf_ctr_bits = 44, + .perf_ctr = ADL_UNC_ARB_PER_CTR0, + .event_ctl = ADL_UNC_ARB_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = ADL_UNC_ARB_MSR_OFFSET, + .constraints = snb_uncore_arb_constraints, + .ops = &adl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + +static struct intel_uncore_type adl_uncore_clockbox = { + .name = "clock", + .num_counters = 1, + .num_boxes = 1, + .fixed_ctr_bits = 48, + .fixed_ctr = ADL_UNC_FIXED_CTR, + .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL, + .single_fixed = 1, + .event_mask = SNB_UNC_CTL_EV_SEL_MASK, + .format_group = &icl_uncore_clock_format_group, + .ops = &adl_uncore_msr_ops, + .event_descs = icl_uncore_events, +}; + +static struct intel_uncore_type *adl_msr_uncores[] = { + &adl_uncore_cbox, + &adl_uncore_arb, + &adl_uncore_clockbox, + NULL, +}; + +void adl_uncore_cpu_init(void) +{ + adl_uncore_cbox.num_boxes = icl_get_cbox_num(); + uncore_msr_uncores = adl_msr_uncores; +} + enum { SNB_PCI_UNCORE_IMC, }; @@ -1203,6 +1326,14 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index b79951d0707c..63f097289a84 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -280,17 +280,17 @@ * | [63] | 00h | VALID - When set, indicates the CPU bus * numbers have been initialized. (RO) * |[62:48]| --- | Reserved - * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned + * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned * CPUBUSNO(5). (RO) - * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned + * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned * CPUBUSNO(4). (RO) - * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned + * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned * CPUBUSNO(3). (RO) - * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned + * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned * CPUBUSNO(2). (RO) - * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned + * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned * CPUBUSNO(1). (RO) - * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned + * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned * CPUBUSNO(0). (RO) */ #define SKX_MSR_CPU_BUS_NUMBER 0x300 @@ -1159,7 +1159,6 @@ enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, BDX_PCI_QPI_PORT2_FILTER, - HSWEP_PCI_PCU_3, }; static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) @@ -2857,22 +2856,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = { NULL, }; -void hswep_uncore_cpu_init(void) +#define HSWEP_PCU_DID 0x2fc0 +#define HSWEP_PCU_CAPID4_OFFET 0x94 +#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) + +static bool hswep_has_limit_sbox(unsigned int device) { - int pkg = boot_cpu_data.logical_proc_id; + struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + u32 capid4; + + if (!dev) + return false; + + pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); + if (!hswep_get_chop(capid4)) + return true; + return false; +} + +void hswep_uncore_cpu_init(void) +{ if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; /* Detect 6-8 core systems with only two SBOXes */ - if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { - u32 capid4; - - pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3], - 0x94, &capid4); - if (((capid4 >> 6) & 0x3) == 0) - hswep_uncore_sbox.num_boxes = 2; - } + if (hswep_has_limit_sbox(HSWEP_PCU_DID)) + hswep_uncore_sbox.num_boxes = 2; uncore_msr_uncores = hswep_msr_uncores; } @@ -3135,11 +3145,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, SNBEP_PCI_QPI_PORT1_FILTER), }, - { /* PCU.3 (for Capability registers) */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - HSWEP_PCI_PCU_3), - }, { /* end: all zeroes */ } }; @@ -3231,27 +3236,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { EVENT_CONSTRAINT_END }; +#define BDX_PCU_DID 0x6fc0 + void bdx_uncore_cpu_init(void) { - int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id); - if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; - /* BDX-DE doesn't have SBOX */ - if (boot_cpu_data.x86_model == 86) { - uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; /* Detect systems with no SBOXes */ - } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { - struct pci_dev *pdev; - u32 capid4; - - pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; - pci_read_config_dword(pdev, 0x94, &capid4); - if (((capid4 >> 6) & 0x3) == 0) - bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; - } + if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } @@ -3472,11 +3468,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, BDX_PCI_QPI_PORT2_FILTER), }, - { /* PCU.3 (for Capability registers) */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - HSWEP_PCI_PCU_3), - }, { /* end: all zeroes */ } }; @@ -3684,7 +3675,8 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) { - return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE); + return pmu->type->topology[die].configuration >> + (pmu->pmu_idx * BUS_NUM_STRIDE); } static umode_t @@ -3697,19 +3689,14 @@ skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) } static ssize_t skx_iio_mapping_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { - struct pci_bus *bus = pci_find_next_bus(NULL); - struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev); + struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); long die = (long)ea->var; - /* - * Current implementation is for single segment configuration hence it's - * safe to take the segment value from the first available root bus. - */ - return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus), - skx_iio_stack(uncore_pmu, die)); + return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment, + skx_iio_stack(pmu, die)); } static int skx_msr_cpu_bus_read(int cpu, u64 *topology) @@ -3746,34 +3733,32 @@ static int die_to_cpu(int die) static int skx_iio_get_topology(struct intel_uncore_type *type) { - int i, ret; - struct pci_bus *bus = NULL; + int die, ret = -EPERM; - /* - * Verified single-segment environments only; disabled for multiple - * segment topologies for now except VMD domains. - * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. - */ - while ((bus = pci_find_next_bus(bus)) - && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff)) - ; - if (bus) - return -EPERM; - - type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL); + type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), + GFP_KERNEL); if (!type->topology) return -ENOMEM; - for (i = 0; i < uncore_max_dies(); i++) { - ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]); - if (ret) { - kfree(type->topology); - type->topology = NULL; - return ret; - } + for (die = 0; die < uncore_max_dies(); die++) { + ret = skx_msr_cpu_bus_read(die_to_cpu(die), + &type->topology[die].configuration); + if (ret) + break; + + ret = uncore_die_to_segment(die); + if (ret < 0) + break; + + type->topology[die].segment = ret; } - return 0; + if (ret < 0) { + kfree(type->topology); + type->topology = NULL; + } + + return ret; } static struct attribute_group skx_iio_mapping_group = { @@ -3794,7 +3779,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) struct dev_ext_attribute *eas = NULL; ret = skx_iio_get_topology(type); - if (ret) + if (ret < 0) goto clear_attr_update; ret = -ENOMEM; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 680404c58cb1..c853b28efa33 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -100,6 +100,8 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_TIGERLAKE_L: case INTEL_FAM6_TIGERLAKE: case INTEL_FAM6_ROCKETLAKE: + case INTEL_FAM6_ALDERLAKE: + case INTEL_FAM6_ALDERLAKE_L: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 53b2b5fc23bc..27fa85e7d4fd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -15,6 +15,7 @@ #include <linux/perf_event.h> #include <asm/intel_ds.h> +#include <asm/cpu.h> /* To enable MSR tracing please use the generic trace points. */ @@ -228,7 +229,6 @@ struct cpu_hw_events { */ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; int enabled; int n_events; /* the # of events in the below arrays */ @@ -327,6 +327,8 @@ struct cpu_hw_events { int n_pair; /* Large increment events */ void *kfree_on_online[X86_PERF_KFREE_MAX]; + + struct pmu *pmu; }; #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ @@ -630,6 +632,71 @@ enum { x86_lbr_exclusive_max, }; +struct x86_hybrid_pmu { + struct pmu pmu; + const char *name; + u8 cpu_type; + cpumask_t supported_cpus; + union perf_capabilities intel_cap; + u64 intel_ctrl; + int max_pebs_events; + int num_counters; + int num_counters_fixed; + struct event_constraint unconstrained; + + u64 hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + u64 hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + struct event_constraint *event_constraints; + struct event_constraint *pebs_constraints; + struct extra_reg *extra_regs; +}; + +static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) +{ + return container_of(pmu, struct x86_hybrid_pmu, pmu); +} + +extern struct static_key_false perf_is_hybrid; +#define is_hybrid() static_branch_unlikely(&perf_is_hybrid) + +#define hybrid(_pmu, _field) \ +(*({ \ + typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ + \ + if (is_hybrid() && (_pmu)) \ + __Fp = &hybrid_pmu(_pmu)->_field; \ + \ + __Fp; \ +})) + +#define hybrid_var(_pmu, _var) \ +(*({ \ + typeof(&_var) __Fp = &_var; \ + \ + if (is_hybrid() && (_pmu)) \ + __Fp = &hybrid_pmu(_pmu)->_var; \ + \ + __Fp; \ +})) + +enum hybrid_pmu_type { + hybrid_big = 0x40, + hybrid_small = 0x20, + + hybrid_big_small = hybrid_big | hybrid_small, +}; + +#define X86_HYBRID_PMU_ATOM_IDX 0 +#define X86_HYBRID_PMU_CORE_IDX 1 + +#define X86_HYBRID_NUM_PMUS 2 + /* * struct x86_pmu - generic x86 pmu */ @@ -816,6 +883,19 @@ struct x86_pmu { int (*check_period) (struct perf_event *event, u64 period); int (*aux_output_match) (struct perf_event *event); + + int (*filter_match)(struct perf_event *event); + /* + * Hybrid support + * + * Most PMU capabilities are the same among different hybrid PMUs. + * The global x86_pmu saves the architecture capabilities, which + * are available for all PMUs. The hybrid_pmu only includes the + * unique capabilities. + */ + int num_hybrid_pmus; + struct x86_hybrid_pmu *hybrid_pmu; + u8 (*get_hybrid_cpu_type) (void); }; struct x86_perf_task_context_opt { @@ -905,7 +985,23 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \ .event_str_ht = ht, \ } -struct pmu *x86_get_pmu(void); +#define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ +static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ + .id = 0, \ + .event_str = str, \ + .pmu_type = _pmu, \ +} + +#define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) + +#define FORMAT_ATTR_HYBRID(_name, _pmu) \ +static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ + .attr = __ATTR_RO(_name), \ + .pmu_type = _pmu, \ +} + +struct pmu *x86_get_pmu(unsigned int cpu); extern struct x86_pmu x86_pmu __read_mostly; static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) @@ -964,6 +1060,9 @@ static inline int x86_pmu_rdpmc_index(int index) return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; } +bool check_hw_exists(struct pmu *pmu, int num_counters, + int num_counters_fixed); + int x86_add_exclusive(unsigned int what); void x86_del_exclusive(unsigned int what); @@ -1027,6 +1126,11 @@ void x86_pmu_enable_event(struct perf_event *event); int x86_pmu_handle_irq(struct pt_regs *regs); +void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, + u64 intel_ctrl); + +void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu); + extern struct event_constraint emptyconstraint; extern struct event_constraint unconstrained; @@ -1067,10 +1171,15 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); +ssize_t events_hybrid_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); -static inline bool fixed_counter_disabled(int i) +static inline bool fixed_counter_disabled(int i, struct pmu *pmu) { - return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); + u64 intel_ctrl = hybrid(pmu, intel_ctrl); + + return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); } #ifdef CONFIG_CPU_SUP_AMD @@ -1154,6 +1263,8 @@ extern struct event_constraint intel_glm_pebs_event_constraints[]; extern struct event_constraint intel_glp_pebs_event_constraints[]; +extern struct event_constraint intel_grt_pebs_event_constraints[]; + extern struct event_constraint intel_nehalem_pebs_event_constraints[]; extern struct event_constraint intel_westmere_pebs_event_constraints[]; diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index f42a70496a24..84a1042c3b01 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -800,6 +800,8 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr), X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h), X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h), diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index e68827e604ad..949d845c922b 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -494,7 +494,7 @@ static __init void zhaoxin_arch_events_quirk(void) { int bit; - /* disable event that reported as not presend by cpuid */ + /* disable event that reported as not present by cpuid */ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { zx_pmon_event_map[zx_arch_events_map[bit].id] = 0; pr_warn("CPUID marked event: \'%s\' unavailable\n", diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 284e73661a18..90e682a92820 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -60,9 +60,11 @@ static u32 hv_apic_read(u32 reg) switch (reg) { case APIC_EOI: rdmsr(HV_X64_MSR_EOI, reg_val, hi); + (void)hi; return reg_val; case APIC_TASKPRI: rdmsr(HV_X64_MSR_TPR, reg_val, hi); + (void)hi; return reg_val; default: @@ -103,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) struct hv_send_ipi_ex *ipi_arg; unsigned long flags; int nr_bank = 0; - int ret = 1; + u64 status = HV_STATUS_INVALID_PARAMETER; if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) return false; @@ -128,19 +130,19 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) if (!nr_bank) ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; - ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank, + status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank, ipi_arg, NULL); ipi_mask_ex_done: local_irq_restore(flags); - return ((ret == 0) ? true : false); + return hv_result_success(status); } static bool __send_ipi_mask(const struct cpumask *mask, int vector) { int cur_cpu, vcpu; struct hv_send_ipi ipi_arg; - int ret = 1; + u64 status; trace_hyperv_send_ipi_mask(mask, vector); @@ -184,9 +186,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask); } - ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector, + status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector, ipi_arg.cpu_mask); - return ((ret == 0) ? true : false); + return hv_result_success(status); do_ex_hypercall: return __send_ipi_mask_ex(mask, vector); @@ -195,6 +197,7 @@ do_ex_hypercall: static bool __send_ipi_one(int cpu, int vector) { int vp = hv_cpu_number_to_vp_number(cpu); + u64 status; trace_hyperv_send_ipi_one(cpu, vector); @@ -207,7 +210,8 @@ static bool __send_ipi_one(int cpu, int vector) if (vp >= 64) return __send_ipi_mask_ex(cpumask_of(cpu), vector); - return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); + status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); + return hv_result_success(status); } static void hv_send_ipi(int cpu, int vector) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index b81047dec1da..bb0ae4b5c00f 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -54,28 +54,6 @@ EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); u32 hv_max_vp_index; EXPORT_SYMBOL_GPL(hv_max_vp_index); -void *hv_alloc_hyperv_page(void) -{ - BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE); - - return (void *)__get_free_page(GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page); - -void *hv_alloc_hyperv_zeroed_page(void) -{ - BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE); - - return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} -EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); - -void hv_free_hyperv_page(unsigned long addr) -{ - free_page(addr); -} -EXPORT_SYMBOL_GPL(hv_free_hyperv_page); - static int hv_cpu_init(unsigned int cpu) { u64 msr_vp_index; @@ -97,7 +75,7 @@ static int hv_cpu_init(unsigned int cpu) *output_arg = page_address(pg + 1); } - hv_get_vp_index(msr_vp_index); + msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX); hv_vp_index[smp_processor_id()] = msr_vp_index; @@ -162,7 +140,7 @@ EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation); static inline bool hv_reenlightenment_available(void) { /* - * Check for required features and priviliges to make TSC frequency + * Check for required features and privileges to make TSC frequency * change notifications work. */ return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && @@ -292,7 +270,7 @@ static int hv_suspend(void) /* * Reset the hypercall page as it is going to be invalidated - * accross hibernation. Setting hv_hypercall_pg to NULL ensures + * across hibernation. Setting hv_hypercall_pg to NULL ensures * that any subsequent hypercall operation fails safely instead of * crashing due to an access of an invalid page. The hypercall page * pointer is restored on resume. @@ -349,7 +327,7 @@ static void __init hv_stimer_setup_percpu_clockev(void) * Ignore any errors in setting up stimer clockevents * as we can run with the LAPIC timer as a fallback. */ - (void)hv_stimer_alloc(); + (void)hv_stimer_alloc(false); /* * Still register the LAPIC timer, because the direct-mode STIMER is @@ -369,7 +347,7 @@ static void __init hv_get_partition_id(void) local_irq_save(flags); output_page = *this_cpu_ptr(hyperv_pcpu_output_arg); status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page); - if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { + if (!hv_result_success(status)) { /* No point in proceeding if this failed */ pr_err("Failed to get partition ID: %lld\n", status); BUG(); @@ -520,6 +498,8 @@ void __init hyperv_init(void) x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain; #endif + /* Query the VMs extended capability once, so that it can be cached. */ + hv_query_ext_cap(0); return; remove_cpuhp_state: @@ -593,33 +573,6 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) } EXPORT_SYMBOL_GPL(hyperv_report_panic); -/** - * hyperv_report_panic_msg - report panic message to Hyper-V - * @pa: physical address of the panic page containing the message - * @size: size of the message in the page - */ -void hyperv_report_panic_msg(phys_addr_t pa, size_t size) -{ - /* - * P3 to contain the physical address of the panic page & P4 to - * contain the size of the panic data in that page. Rest of the - * registers are no-op when the NOTIFY_MSG flag is set. - */ - wrmsrl(HV_X64_MSR_CRASH_P0, 0); - wrmsrl(HV_X64_MSR_CRASH_P1, 0); - wrmsrl(HV_X64_MSR_CRASH_P2, 0); - wrmsrl(HV_X64_MSR_CRASH_P3, pa); - wrmsrl(HV_X64_MSR_CRASH_P4, size); - - /* - * Let Hyper-V know there is crash data available along with - * the panic message. - */ - wrmsrl(HV_X64_MSR_CRASH_CTL, - (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG)); -} -EXPORT_SYMBOL_GPL(hyperv_report_panic_msg); - bool hv_is_hyperv_initialized(void) { union hv_x64_msr_hypercall_contents hypercall_msr; @@ -650,7 +603,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); enum hv_isolation_type hv_get_isolation_type(void) { - if (!(ms_hyperv.features_b & HV_ISOLATION)) + if (!(ms_hyperv.priv_high & HV_ISOLATION)) return HV_ISOLATION_TYPE_NONE; return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b); } @@ -661,3 +614,50 @@ bool hv_is_isolation_supported(void) return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE; } EXPORT_SYMBOL_GPL(hv_is_isolation_supported); + +/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */ +bool hv_query_ext_cap(u64 cap_query) +{ + /* + * The address of the 'hv_extended_cap' variable will be used as an + * output parameter to the hypercall below and so it should be + * compatible with 'virt_to_phys'. Which means, it's address should be + * directly mapped. Use 'static' to keep it compatible; stack variables + * can be virtually mapped, making them imcompatible with + * 'virt_to_phys'. + * Hypercall input/output addresses should also be 8-byte aligned. + */ + static u64 hv_extended_cap __aligned(8); + static bool hv_extended_cap_queried; + u64 status; + + /* + * Querying extended capabilities is an extended hypercall. Check if the + * partition supports extended hypercall, first. + */ + if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS)) + return false; + + /* Extended capabilities do not change at runtime. */ + if (hv_extended_cap_queried) + return hv_extended_cap & cap_query; + + status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, + &hv_extended_cap); + + /* + * The query extended capabilities hypercall should not fail under + * any normal circumstances. Avoid repeatedly making the hypercall, on + * error. + */ + hv_extended_cap_queried = true; + status &= HV_HYPERCALL_RESULT_MASK; + if (status != HV_STATUS_SUCCESS) { + pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n", + status); + return false; + } + + return hv_extended_cap & cap_query; +} +EXPORT_SYMBOL_GPL(hv_query_ext_cap); diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c index 60461e598239..68a0843d4750 100644 --- a/arch/x86/hyperv/hv_proc.c +++ b/arch/x86/hyperv/hv_proc.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> -#include <linux/version.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/clockchips.h> @@ -93,10 +92,9 @@ int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY, page_count, 0, input_page, NULL); local_irq_restore(flags); - - if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { + if (!hv_result_success(status)) { pr_err("Failed to deposit pages: %lld\n", status); - ret = status; + ret = hv_result(status); goto err_free_allocations; } @@ -122,7 +120,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id) struct hv_add_logical_processor_out *output; u64 status; unsigned long flags; - int ret = 0; + int ret = HV_STATUS_SUCCESS; int pxm = node_to_pxm(node); /* @@ -148,13 +146,11 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id) input, output); local_irq_restore(flags); - status &= HV_HYPERCALL_RESULT_MASK; - - if (status != HV_STATUS_INSUFFICIENT_MEMORY) { - if (status != HV_STATUS_SUCCESS) { + if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { + if (!hv_result_success(status)) { pr_err("%s: cpu %u apic ID %u, %lld\n", __func__, lp_index, apic_id, status); - ret = status; + ret = hv_result(status); } break; } @@ -169,7 +165,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) struct hv_create_vp *input; u64 status; unsigned long irq_flags; - int ret = 0; + int ret = HV_STATUS_SUCCESS; int pxm = node_to_pxm(node); /* Root VPs don't seem to need pages deposited */ @@ -200,13 +196,11 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL); local_irq_restore(irq_flags); - status &= HV_HYPERCALL_RESULT_MASK; - - if (status != HV_STATUS_INSUFFICIENT_MEMORY) { - if (status != HV_STATUS_SUCCESS) { + if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { + if (!hv_result_success(status)) { pr_err("%s: vcpu %u, lp %u, %lld\n", __func__, vp_index, flags, status); - ret = status; + ret = hv_result(status); } break; } diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c index f3270c1fc48c..91cfe698bde0 100644 --- a/arch/x86/hyperv/hv_spinlock.c +++ b/arch/x86/hyperv/hv_spinlock.c @@ -25,7 +25,6 @@ static void hv_qlock_kick(int cpu) static void hv_qlock_wait(u8 *byte, u8 val) { - unsigned long msr_val; unsigned long flags; if (in_nmi()) @@ -48,8 +47,13 @@ static void hv_qlock_wait(u8 *byte, u8 val) /* * Only issue the rdmsrl() when the lock state has not changed. */ - if (READ_ONCE(*byte) == val) + if (READ_ONCE(*byte) == val) { + unsigned long msr_val; + rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val); + + (void)msr_val; + } local_irq_restore(flags); } diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c index 4421a8d92e23..514fc64e23d5 100644 --- a/arch/x86/hyperv/irqdomain.c +++ b/arch/x86/hyperv/irqdomain.c @@ -63,10 +63,10 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level, local_irq_restore(flags); - if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) + if (!hv_result_success(status)) pr_err("%s: hypercall failed, status %lld\n", __func__, status); - return status & HV_HYPERCALL_RESULT_MASK; + return hv_result(status); } static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) @@ -88,7 +88,7 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL); local_irq_restore(flags); - return status & HV_HYPERCALL_RESULT_MASK; + return hv_result(status); } #ifdef CONFIG_PCI_MSI diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 2c87350c1fb0..bd13736d0c05 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -52,16 +52,16 @@ static inline int fill_gva_list(u64 gva_list[], int offset, return gva_n - offset; } -static void hyperv_flush_tlb_others(const struct cpumask *cpus, - const struct flush_tlb_info *info) +static void hyperv_flush_tlb_multi(const struct cpumask *cpus, + const struct flush_tlb_info *info) { int cpu, vcpu, gva_n, max_gvas; struct hv_tlb_flush **flush_pcpu; struct hv_tlb_flush *flush; - u64 status = U64_MAX; + u64 status; unsigned long flags; - trace_hyperv_mmu_flush_tlb_others(cpus, info); + trace_hyperv_mmu_flush_tlb_multi(cpus, info); if (!hv_hypercall_pg) goto do_native; @@ -161,10 +161,10 @@ do_ex_hypercall: check_status: local_irq_restore(flags); - if (!(status & HV_HYPERCALL_RESULT_MASK)) + if (hv_result_success(status)) return; do_native: - native_flush_tlb_others(cpus, info); + native_flush_tlb_multi(cpus, info); } static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, @@ -176,7 +176,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, u64 status; if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) - return U64_MAX; + return HV_STATUS_INVALID_PARAMETER; flush_pcpu = (struct hv_tlb_flush_ex **) this_cpu_ptr(hyperv_pcpu_input_arg); @@ -201,7 +201,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K; nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus); if (nr_bank < 0) - return U64_MAX; + return HV_STATUS_INVALID_PARAMETER; /* * We can flush not more than max_gvas with one hypercall. Flush the @@ -239,6 +239,6 @@ void hyperv_setup_mmu_ops(void) return; pr_info("Using hypercall for remote TLB flush\n"); - pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others; + pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi; pv_ops.mmu.tlb_remove_table = tlb_remove_table; } diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index dd0a843f766d..5d70968c8538 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -47,7 +47,7 @@ int hyperv_flush_guest_mapping(u64 as) flush, NULL); local_irq_restore(flags); - if (!(status & HV_HYPERCALL_RESULT_MASK)) + if (hv_result_success(status)) ret = 0; fault: @@ -92,7 +92,7 @@ int hyperv_flush_guest_mapping_range(u64 as, { struct hv_guest_mapping_flush_list **flush_pcpu; struct hv_guest_mapping_flush_list *flush; - u64 status = 0; + u64 status; unsigned long flags; int ret = -ENOTSUPP; int gpa_n = 0; @@ -125,10 +125,10 @@ int hyperv_flush_guest_mapping_range(u64 as, local_irq_restore(flags); - if (!(status & HV_HYPERCALL_RESULT_MASK)) + if (hv_result_success(status)) ret = 0; else - ret = status; + ret = hv_result(status); fault: trace_hyperv_nested_flush_guest_mapping_range(as, ret); return ret; diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h index 62da760d6d5a..cd7b14322035 100644 --- a/arch/x86/include/asm/agp.h +++ b/arch/x86/include/asm/agp.h @@ -9,7 +9,7 @@ * Functions to keep the agpgart mappings coherent with the MMU. The * GART gives the CPU a physical alias of pages in memory. The alias * region is mapped uncacheable. Make sure there are no conflicting - * mappings with different cachability attributes for the same + * mappings with different cacheability attributes for the same * page. This avoids data corruption on some CPUs. */ diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h deleted file mode 100644 index 464034db299f..000000000000 --- a/arch/x86/include/asm/alternative-asm.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_ALTERNATIVE_ASM_H -#define _ASM_X86_ALTERNATIVE_ASM_H - -#ifdef __ASSEMBLY__ - -#include <asm/asm.h> - -#ifdef CONFIG_SMP - .macro LOCK_PREFIX -672: lock - .pushsection .smp_locks,"a" - .balign 4 - .long 672b - . - .popsection - .endm -#else - .macro LOCK_PREFIX - .endm -#endif - -/* - * objtool annotation to ignore the alternatives and only consider the original - * instruction(s). - */ -.macro ANNOTATE_IGNORE_ALTERNATIVE - .Lannotate_\@: - .pushsection .discard.ignore_alts - .long .Lannotate_\@ - . - .popsection -.endm - -/* - * Issue one struct alt_instr descriptor entry (need to put it into - * the section .altinstructions, see below). This entry contains - * enough information for the alternatives patching code to patch an - * instruction. See apply_alternatives(). - */ -.macro altinstruction_entry orig alt feature orig_len alt_len pad_len - .long \orig - . - .long \alt - . - .word \feature - .byte \orig_len - .byte \alt_len - .byte \pad_len -.endm - -/* - * Define an alternative between two instructions. If @feature is - * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. - */ -.macro ALTERNATIVE oldinstr, newinstr, feature -140: - \oldinstr -141: - .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90 -142: - - .pushsection .altinstructions,"a" - altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b - .popsection - - .pushsection .altinstr_replacement,"ax" -143: - \newinstr -144: - .popsection -.endm - -#define old_len 141b-140b -#define new_len1 144f-143f -#define new_len2 145f-144f - -/* - * gas compatible max based on the idea from: - * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax - * - * The additional "-" is needed because gas uses a "true" value of -1. - */ -#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) - - -/* - * Same as ALTERNATIVE macro above but for two alternatives. If CPU - * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has - * @feature2, it replaces @oldinstr with @feature2. - */ -.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 -140: - \oldinstr -141: - .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ - (alt_max_short(new_len1, new_len2) - (old_len)),0x90 -142: - - .pushsection .altinstructions,"a" - altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b - altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b - .popsection - - .pushsection .altinstr_replacement,"ax" -143: - \newinstr1 -144: - \newinstr2 -145: - .popsection -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_X86_ALTERNATIVE_ASM_H */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13adca37c99a..a3c2315aca12 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -2,13 +2,17 @@ #ifndef _ASM_X86_ALTERNATIVE_H #define _ASM_X86_ALTERNATIVE_H -#ifndef __ASSEMBLY__ - #include <linux/types.h> -#include <linux/stddef.h> #include <linux/stringify.h> #include <asm/asm.h> +#define ALTINSTR_FLAG_INV (1 << 15) +#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV) + +#ifndef __ASSEMBLY__ + +#include <linux/stddef.h> + /* * Alternative inline assembly for SMP. * @@ -61,7 +65,6 @@ struct alt_instr { u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction */ - u8 padlen; /* length of build-time padding */ } __packed; /* @@ -100,7 +103,6 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alt_end_marker "663" #define alt_slen "662b-661b" -#define alt_pad_len alt_end_marker"b-662b" #define alt_total_slen alt_end_marker"b-661b" #define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" @@ -147,10 +149,9 @@ static inline int alternatives_text_reserved(void *start, void *end) " .long " b_replacement(num)"f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ " .byte " alt_total_slen "\n" /* source len */ \ - " .byte " alt_rlen(num) "\n" /* replacement len */ \ - " .byte " alt_pad_len "\n" /* pad len */ + " .byte " alt_rlen(num) "\n" /* replacement len */ -#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ +#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \ "# ALT: replacement " #num "\n" \ b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n" @@ -161,7 +162,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_ENTRY(feature, 1) \ ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ - ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ALTINSTR_REPLACEMENT(newinstr, 1) \ ".popsection\n" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ @@ -171,10 +172,15 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_ENTRY(feature2, 2) \ ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ - ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ - ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ALTINSTR_REPLACEMENT(newinstr1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, 2) \ ".popsection\n" +/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */ +#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \ + ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \ + newinstr_yes, feature) + #define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \ OLDINSTR_3(oldinsn, 1, 2, 3) \ ".pushsection .altinstructions,\"a\"\n" \ @@ -183,9 +189,9 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_ENTRY(feat3, 3) \ ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ - ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \ - ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \ - ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \ + ALTINSTR_REPLACEMENT(newinsn1, 1) \ + ALTINSTR_REPLACEMENT(newinsn2, 2) \ + ALTINSTR_REPLACEMENT(newinsn3, 3) \ ".popsection\n" /* @@ -206,15 +212,15 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory") +#define alternative_ternary(oldinstr, feature, newinstr_yes, newinstr_no) \ + asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) ::: "memory") + /* * Alternative inline assembly with input. * * Peculiarities: * No memory clobber here. * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. * Leaving an unused argument 0 to keep API compatibility. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ @@ -271,6 +277,115 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr +#else /* __ASSEMBLY__ */ + +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +672: lock + .pushsection .smp_locks,"a" + .balign 4 + .long 672b - . + .popsection + .endm +#else + .macro LOCK_PREFIX + .endm +#endif + +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +.macro ANNOTATE_IGNORE_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.ignore_alts + .long .Lannotate_\@ - . + .popsection +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .word \feature + .byte \orig_len + .byte \alt_len +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature +140: + \oldinstr +141: + .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90 +142: + + .pushsection .altinstructions,"a" + altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f + .popsection + + .pushsection .altinstr_replacement,"ax" +143: + \newinstr +144: + .popsection +.endm + +#define old_len 141b-140b +#define new_len1 144f-143f +#define new_len2 145f-144f + +/* + * gas compatible max based on the idea from: + * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax + * + * The additional "-" is needed because gas uses a "true" value of -1. + */ +#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) + + +/* + * Same as ALTERNATIVE macro above but for two alternatives. If CPU + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has + * @feature2, it replaces @oldinstr with @feature2. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 +140: + \oldinstr +141: + .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_short(new_len1, new_len2) - (old_len)),0x90 +142: + + .pushsection .altinstructions,"a" + altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f + altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f + .popsection + + .pushsection .altinstr_replacement,"ax" +143: + \newinstr1 +144: + \newinstr2 +145: + .popsection +.endm + +/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */ +#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \ + ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \ + newinstr_yes, feature + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 51e2bf27cc9b..4cb726c71ed8 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -19,18 +19,19 @@ extern void cmpxchg8b_emu(void); #ifdef CONFIG_RETPOLINE -#define DECL_INDIRECT_THUNK(reg) \ +#undef GEN +#define GEN(reg) \ extern asmlinkage void __x86_indirect_thunk_ ## reg (void); - -#define DECL_RETPOLINE(reg) \ - extern asmlinkage void __x86_retpoline_ ## reg (void); +#include <asm/GEN-for-each-reg.h> #undef GEN -#define GEN(reg) DECL_INDIRECT_THUNK(reg) +#define GEN(reg) \ + extern asmlinkage void __x86_indirect_alt_call_ ## reg (void); #include <asm/GEN-for-each-reg.h> #undef GEN -#define GEN(reg) DECL_RETPOLINE(reg) +#define GEN(reg) \ + extern asmlinkage void __x86_indirect_alt_jmp_ ## reg (void); #include <asm/GEN-for-each-reg.h> #endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 297fa12e7e27..84b87538a15d 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -7,18 +7,9 @@ /* * Despite that some emulators terminate on UD2, we use it for WARN(). - * - * Since various instruction decoders/specs disagree on the encoding of - * UD0/UD1. */ - -#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ -#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ #define ASM_UD2 ".byte 0x0f, 0x0b" - -#define INSN_UD0 0xff0f #define INSN_UD2 0x0b0f - #define LEN_UD2 2 #ifdef CONFIG_GENERIC_BUG diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 4d4ec5cbdc51..94fbe6ae7431 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -22,7 +22,7 @@ extern void __add_wrong_size(void) /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to * -1 because sizeof will never return -1, thereby making those switch - * case statements guaranteeed dead code which the compiler will + * case statements guaranteed dead code which the compiler will * eliminate, and allowing the "missing symbol in the default case" to * indicate a usage error. */ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index da78ccbd493b..33d41e350c79 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -41,12 +41,14 @@ unsigned int x86_family(unsigned int sig); unsigned int x86_model(unsigned int sig); unsigned int x86_stepping(unsigned int sig); #ifdef CONFIG_CPU_SUP_INTEL -extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c); +extern void __init sld_setup(struct cpuinfo_x86 *c); extern void switch_to_sld(unsigned long tifn); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); +extern void handle_bus_lock(struct pt_regs *regs); +u8 get_this_hybrid_cpu_type(void); #else -static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {} +static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline void switch_to_sld(unsigned long tifn) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) { @@ -57,6 +59,13 @@ static inline bool handle_guest_split_lock(unsigned long ip) { return false; } + +static inline void handle_bus_lock(struct pt_regs *regs) {} + +static inline u8 get_this_hybrid_cpu_type(void) +{ + return 0; +} #endif #ifdef CONFIG_IA32_FEAT_CTL void init_ia32_feat_ctl(struct cpuinfo_x86 *c); diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 1728d4ce5730..16a51e7288d5 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -8,6 +8,7 @@ #include <asm/asm.h> #include <linux/bitops.h> +#include <asm/alternative.h> enum cpuid_leafs { @@ -175,39 +176,15 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline bool _static_cpu_has(u16 bit) { - asm_volatile_goto("1: jmp 6f\n" - "2:\n" - ".skip -(((5f-4f) - (2b-1b)) > 0) * " - "((5f-4f) - (2b-1b)),0x90\n" - "3:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 4f - .\n" /* repl offset */ - " .word %P[always]\n" /* always replace */ - " .byte 3b - 1b\n" /* src len */ - " .byte 5f - 4f\n" /* repl len */ - " .byte 3b - 2b\n" /* pad len */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "4: jmp %l[t_no]\n" - "5:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 0\n" /* no replacement */ - " .word %P[feature]\n" /* feature bit */ - " .byte 3b - 1b\n" /* src len */ - " .byte 0\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .altinstr_aux,\"ax\"\n" - "6:\n" - " testb %[bitnum],%[cap_byte]\n" - " jnz %l[t_yes]\n" - " jmp %l[t_no]\n" - ".previous\n" + asm_volatile_goto( + ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]") + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" : : [feature] "i" (bit), - [always] "i" (X86_FEATURE_ALWAYS), [bitnum] "i" (1 << (bit & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cc96e26d69f7..ac37830ae941 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -84,7 +84,7 @@ /* CPU types for specific tunings: */ #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */ #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ @@ -236,6 +236,8 @@ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ +#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */ +#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ @@ -290,6 +292,8 @@ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ +#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ +#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ @@ -336,6 +340,7 @@ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */ #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ @@ -354,6 +359,7 @@ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ @@ -374,6 +380,7 @@ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ +#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */ #define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 9224d40cdefe..7d7500806af8 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -283,12 +283,12 @@ extern u32 elf_hwcap2; * * The decision process for determining the results are: * - * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 | - * ELF: | | | | + * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 | + * ELF: | | | | * ---------------------|------------|------------------|----------------| - * missing PT_GNU_STACK | exec-all | exec-all | exec-none | - * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack | - * PT_GNU_STACK == RW | exec-none | exec-none | exec-none | + * missing PT_GNU_STACK | exec-all | exec-all | exec-none | + * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack | + * PT_GNU_STACK == RW | exec-none | exec-none | exec-none | * * exec-all : all PROT_READ user mappings are executable, except when * backed by files on a noexec-filesystem. diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 2b87b191b3b8..14ebd2196569 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -2,6 +2,7 @@ #ifndef _ASM_X86_ENTRY_COMMON_H #define _ASM_X86_ENTRY_COMMON_H +#include <linux/randomize_kstack.h> #include <linux/user-return-notifier.h> #include <asm/nospec-branch.h> @@ -70,6 +71,21 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, */ current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED); #endif + + /* + * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), + * but not enough for x86 stack utilization comfort. To keep + * reasonable stack head room, reduce the maximum offset to 8 bits. + * + * The actual entropy will be further reduced by the compiler when + * applying stack alignment constraints (see cc_stack_align4/8 in + * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32) + * low bits from any entropy chosen here. + * + * Therefore, final stack offset entropy will be 5 (x86_64) or + * 6 (ia32) bits. + */ + choose_random_kstack_offset(rdtsc() & 0xFF); } #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h index d43717b423cb..6ec3fc969ad5 100644 --- a/arch/x86/include/asm/floppy.h +++ b/arch/x86/include/asm/floppy.h @@ -74,7 +74,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) int lcount; char *lptr; - st = 1; for (lcount = virtual_dma_count, lptr = virtual_dma_addr; lcount; lcount--, lptr++) { st = inb(virtual_dma_port + FD_STATUS); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e6cd3fee562b..606f5cc579b2 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -156,7 +156,7 @@ enum hv_isolation_type { #define HV_X64_MSR_HYPERCALL 0x40000001 /* MSR used to provide vcpu index */ -#define HV_X64_MSR_VP_INDEX 0x40000002 +#define HV_REGISTER_VP_INDEX 0x40000002 /* MSR used to reset the guest OS. */ #define HV_X64_MSR_RESET 0x40000003 @@ -165,10 +165,10 @@ enum hv_isolation_type { #define HV_X64_MSR_VP_RUNTIME 0x40000010 /* MSR used to read the per-partition time reference counter */ -#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +#define HV_REGISTER_TIME_REF_COUNT 0x40000020 /* A partition's reference time stamp counter (TSC) page */ -#define HV_X64_MSR_REFERENCE_TSC 0x40000021 +#define HV_REGISTER_REFERENCE_TSC 0x40000021 /* MSR used to retrieve the TSC frequency */ #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 @@ -183,50 +183,50 @@ enum hv_isolation_type { #define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 /* Define synthetic interrupt controller model specific registers. */ -#define HV_X64_MSR_SCONTROL 0x40000080 -#define HV_X64_MSR_SVERSION 0x40000081 -#define HV_X64_MSR_SIEFP 0x40000082 -#define HV_X64_MSR_SIMP 0x40000083 -#define HV_X64_MSR_EOM 0x40000084 -#define HV_X64_MSR_SINT0 0x40000090 -#define HV_X64_MSR_SINT1 0x40000091 -#define HV_X64_MSR_SINT2 0x40000092 -#define HV_X64_MSR_SINT3 0x40000093 -#define HV_X64_MSR_SINT4 0x40000094 -#define HV_X64_MSR_SINT5 0x40000095 -#define HV_X64_MSR_SINT6 0x40000096 -#define HV_X64_MSR_SINT7 0x40000097 -#define HV_X64_MSR_SINT8 0x40000098 -#define HV_X64_MSR_SINT9 0x40000099 -#define HV_X64_MSR_SINT10 0x4000009A -#define HV_X64_MSR_SINT11 0x4000009B -#define HV_X64_MSR_SINT12 0x4000009C -#define HV_X64_MSR_SINT13 0x4000009D -#define HV_X64_MSR_SINT14 0x4000009E -#define HV_X64_MSR_SINT15 0x4000009F +#define HV_REGISTER_SCONTROL 0x40000080 +#define HV_REGISTER_SVERSION 0x40000081 +#define HV_REGISTER_SIEFP 0x40000082 +#define HV_REGISTER_SIMP 0x40000083 +#define HV_REGISTER_EOM 0x40000084 +#define HV_REGISTER_SINT0 0x40000090 +#define HV_REGISTER_SINT1 0x40000091 +#define HV_REGISTER_SINT2 0x40000092 +#define HV_REGISTER_SINT3 0x40000093 +#define HV_REGISTER_SINT4 0x40000094 +#define HV_REGISTER_SINT5 0x40000095 +#define HV_REGISTER_SINT6 0x40000096 +#define HV_REGISTER_SINT7 0x40000097 +#define HV_REGISTER_SINT8 0x40000098 +#define HV_REGISTER_SINT9 0x40000099 +#define HV_REGISTER_SINT10 0x4000009A +#define HV_REGISTER_SINT11 0x4000009B +#define HV_REGISTER_SINT12 0x4000009C +#define HV_REGISTER_SINT13 0x4000009D +#define HV_REGISTER_SINT14 0x4000009E +#define HV_REGISTER_SINT15 0x4000009F /* * Synthetic Timer MSRs. Four timers per vcpu. */ -#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 -#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 -#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 -#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 -#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 -#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 -#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 -#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 +#define HV_REGISTER_STIMER0_CONFIG 0x400000B0 +#define HV_REGISTER_STIMER0_COUNT 0x400000B1 +#define HV_REGISTER_STIMER1_CONFIG 0x400000B2 +#define HV_REGISTER_STIMER1_COUNT 0x400000B3 +#define HV_REGISTER_STIMER2_CONFIG 0x400000B4 +#define HV_REGISTER_STIMER2_COUNT 0x400000B5 +#define HV_REGISTER_STIMER3_CONFIG 0x400000B6 +#define HV_REGISTER_STIMER3_COUNT 0x400000B7 /* Hyper-V guest idle MSR */ #define HV_X64_MSR_GUEST_IDLE 0x400000F0 /* Hyper-V guest crash notification MSR's */ -#define HV_X64_MSR_CRASH_P0 0x40000100 -#define HV_X64_MSR_CRASH_P1 0x40000101 -#define HV_X64_MSR_CRASH_P2 0x40000102 -#define HV_X64_MSR_CRASH_P3 0x40000103 -#define HV_X64_MSR_CRASH_P4 0x40000104 -#define HV_X64_MSR_CRASH_CTL 0x40000105 +#define HV_REGISTER_CRASH_P0 0x40000100 +#define HV_REGISTER_CRASH_P1 0x40000101 +#define HV_REGISTER_CRASH_P2 0x40000102 +#define HV_REGISTER_CRASH_P3 0x40000103 +#define HV_REGISTER_CRASH_P4 0x40000104 +#define HV_REGISTER_CRASH_CTL 0x40000105 /* TSC emulation after migration */ #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 @@ -236,6 +236,32 @@ enum hv_isolation_type { /* TSC invariant control */ #define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118 +/* Register name aliases for temporary compatibility */ +#define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT +#define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG +#define HV_X64_MSR_STIMER1_COUNT HV_REGISTER_STIMER1_COUNT +#define HV_X64_MSR_STIMER1_CONFIG HV_REGISTER_STIMER1_CONFIG +#define HV_X64_MSR_STIMER2_COUNT HV_REGISTER_STIMER2_COUNT +#define HV_X64_MSR_STIMER2_CONFIG HV_REGISTER_STIMER2_CONFIG +#define HV_X64_MSR_STIMER3_COUNT HV_REGISTER_STIMER3_COUNT +#define HV_X64_MSR_STIMER3_CONFIG HV_REGISTER_STIMER3_CONFIG +#define HV_X64_MSR_SCONTROL HV_REGISTER_SCONTROL +#define HV_X64_MSR_SVERSION HV_REGISTER_SVERSION +#define HV_X64_MSR_SIMP HV_REGISTER_SIMP +#define HV_X64_MSR_SIEFP HV_REGISTER_SIEFP +#define HV_X64_MSR_VP_INDEX HV_REGISTER_VP_INDEX +#define HV_X64_MSR_EOM HV_REGISTER_EOM +#define HV_X64_MSR_SINT0 HV_REGISTER_SINT0 +#define HV_X64_MSR_SINT15 HV_REGISTER_SINT15 +#define HV_X64_MSR_CRASH_P0 HV_REGISTER_CRASH_P0 +#define HV_X64_MSR_CRASH_P1 HV_REGISTER_CRASH_P1 +#define HV_X64_MSR_CRASH_P2 HV_REGISTER_CRASH_P2 +#define HV_X64_MSR_CRASH_P3 HV_REGISTER_CRASH_P3 +#define HV_X64_MSR_CRASH_P4 HV_REGISTER_CRASH_P4 +#define HV_X64_MSR_CRASH_CTL HV_REGISTER_CRASH_CTL +#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT +#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC + /* * Declare the MSR used to setup pages used to communicate with the hypervisor. */ @@ -288,35 +314,6 @@ struct hv_tsc_emulation_status { #define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 #define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 - -/* Define hypervisor message types. */ -enum hv_message_type { - HVMSG_NONE = 0x00000000, - - /* Memory access messages. */ - HVMSG_UNMAPPED_GPA = 0x80000000, - HVMSG_GPA_INTERCEPT = 0x80000001, - - /* Timer notification messages. */ - HVMSG_TIMER_EXPIRED = 0x80000010, - - /* Error messages. */ - HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, - HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, - HVMSG_UNSUPPORTED_FEATURE = 0x80000022, - - /* Trace buffer complete messages. */ - HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, - - /* Platform-specific processor intercept messages. */ - HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, - HVMSG_X64_MSR_INTERCEPT = 0x80010001, - HVMSG_X64_CPUID_INTERCEPT = 0x80010002, - HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, - HVMSG_X64_APIC_EOI = 0x80010004, - HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 -}; - struct hv_nested_enlightenments_control { struct { __u32 directhypercall:1; diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 5eb3bdf36a41..73d45b0dfff2 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -547,7 +547,7 @@ SYM_CODE_END(spurious_entries_start) /* * Dummy trap number so the low level ASM macro vector number checks do not * match which results in emitting plain IDTENTRY stubs without bells and - * whistels. + * whistles. */ #define X86_TRAP_OTHER 0xFFFF @@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, xenpv_exc_machine_check); #endif /* NMI */ + +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL) +/* + * Special NOIST entry point for VMX which invokes this on the kernel + * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI + * 'executing' marker. + * + * On 32bit this just uses the regular NMI entry point because 32-bit does + * not have ISTs. + */ +DECLARE_IDTENTRY(X86_TRAP_NMI, exc_nmi_noist); +#else +#define asm_exc_nmi_noist asm_exc_nmi +#endif + DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); #ifdef CONFIG_XEN_PV DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 4cf2ad521f65..b56c5741581a 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -6,7 +6,7 @@ * * Written by Masami Hiramatsu <mhiramat@redhat.com> */ -#include <asm/inat_types.h> +#include <asm/inat_types.h> /* __ignore_sync_check__ */ /* * Internal bits. Don't use bitmasks directly, because these bits are diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 98b4dae5e8bc..91d7182ad2d6 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -25,7 +25,7 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); -bool insn_decode(struct insn *insn, struct pt_regs *regs, - unsigned char buf[MAX_INSN_SIZE], int buf_size); +bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size); #endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 95a448fbb44c..05a6ab940f45 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -9,7 +9,7 @@ #include <asm/byteorder.h> /* insn_attr_t is defined in inat.h */ -#include <asm/inat.h> +#include <asm/inat.h> /* __ignore_sync_check__ */ #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) @@ -132,13 +132,25 @@ struct insn { #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64); -extern void insn_get_prefixes(struct insn *insn); -extern void insn_get_opcode(struct insn *insn); -extern void insn_get_modrm(struct insn *insn); -extern void insn_get_sib(struct insn *insn); -extern void insn_get_displacement(struct insn *insn); -extern void insn_get_immediate(struct insn *insn); -extern void insn_get_length(struct insn *insn); +extern int insn_get_prefixes(struct insn *insn); +extern int insn_get_opcode(struct insn *insn); +extern int insn_get_modrm(struct insn *insn); +extern int insn_get_sib(struct insn *insn); +extern int insn_get_displacement(struct insn *insn); +extern int insn_get_immediate(struct insn *insn); +extern int insn_get_length(struct insn *insn); + +enum insn_mode { + INSN_MODE_32, + INSN_MODE_64, + /* Mode is determined by the current kernel build. */ + INSN_MODE_KERN, + INSN_NUM_MODES, +}; + +extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m); + +#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN) /* Attribute will be determined after getting ModRM (for opcode groups) */ static inline void insn_get_attribute(struct insn *insn) @@ -149,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn) /* Instruction uses RIP-relative addressing */ extern int insn_rip_relative(struct insn *insn); -/* Init insn for kernel text */ -static inline void kernel_insn_init(struct insn *insn, - const void *kaddr, int buf_len) -{ -#ifdef CONFIG_X86_64 - insn_init(insn, kaddr, buf_len, 1); -#else /* CONFIG_X86_32 */ - insn_init(insn, kaddr, buf_len, 0); -#endif -} - static inline int insn_is_avx(struct insn *insn) { if (!insn->prefixes.got) @@ -179,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn) return !!insn->emulate_prefix_size; } -/* Ensure this instruction is decoded completely */ -static inline int insn_complete(struct insn *insn) -{ - return insn->opcode.got && insn->modrm.got && insn->sib.got && - insn->displacement.got && insn->immediate.got; -} - static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 9abe842dbd84..955b06d6325a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -32,7 +32,9 @@ * _EP - 2 socket server parts * _EX - 4+ socket server parts * - * The #define line may optionally include a comment including platform names. + * The #define line may optionally include a comment including platform or core + * names. An exception is made for skylake/kabylake where steppings seem to have gotten + * their own names :-( */ /* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */ @@ -69,35 +71,41 @@ #define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_FAM6_BROADWELL_D 0x56 -#define INTEL_FAM6_SKYLAKE_L 0x4E -#define INTEL_FAM6_SKYLAKE 0x5E -#define INTEL_FAM6_SKYLAKE_X 0x55 -#define INTEL_FAM6_KABYLAKE_L 0x8E -#define INTEL_FAM6_KABYLAKE 0x9E +#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */ +#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */ +#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */ +/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */ +/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */ -#define INTEL_FAM6_CANNONLAKE_L 0x66 +#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */ +/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */ +/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */ +/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */ -#define INTEL_FAM6_ICELAKE_X 0x6A -#define INTEL_FAM6_ICELAKE_D 0x6C -#define INTEL_FAM6_ICELAKE 0x7D -#define INTEL_FAM6_ICELAKE_L 0x7E -#define INTEL_FAM6_ICELAKE_NNPI 0x9D +#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */ +/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */ -#define INTEL_FAM6_TIGERLAKE_L 0x8C -#define INTEL_FAM6_TIGERLAKE 0x8D +#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */ +#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */ -#define INTEL_FAM6_COMETLAKE 0xA5 -#define INTEL_FAM6_COMETLAKE_L 0xA6 +#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */ -#define INTEL_FAM6_ROCKETLAKE 0xA7 +#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */ +#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */ +#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */ +#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */ +#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */ -#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F +#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */ -/* Hybrid Core/Atom Processors */ +#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */ -#define INTEL_FAM6_LAKEFIELD 0x8A -#define INTEL_FAM6_ALDERLAKE 0x97 -#define INTEL_FAM6_ALDERLAKE_L 0x9A +#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */ +#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */ +#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Willow Cove */ + +#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ +#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ /* "Small Core" Processors (Atom) */ diff --git a/arch/x86/include/asm/intel_pconfig.h b/arch/x86/include/asm/intel_pconfig.h index 3cb002b1d0f9..994638ef171b 100644 --- a/arch/x86/include/asm/intel_pconfig.h +++ b/arch/x86/include/asm/intel_pconfig.h @@ -38,7 +38,7 @@ enum pconfig_leaf { #define MKTME_INVALID_ENC_ALG 4 #define MKTME_DEVICE_BUSY 5 -/* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */ +/* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */ struct mktme_key_program { u16 keyid; u32 keyid_ctrl; diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h index 423b788f495e..ebe8d2ea44fe 100644 --- a/arch/x86/include/asm/intel_pt.h +++ b/arch/x86/include/asm/intel_pt.h @@ -3,7 +3,7 @@ #define _ASM_X86_INTEL_PT_H #define PT_CPUID_LEAVES 2 -#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ +#define PT_CPUID_REGS_NUM 4 /* number of registers (eax, ebx, ecx, edx) */ enum pt_capabilities { PT_CAP_max_subleaf = 0, diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index d726459d08e5..841a5d104afa 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -159,7 +159,7 @@ static inline void *phys_to_virt(phys_addr_t address) /* * ISA I/O bus memory addresses are 1:1 with the physical address. * However, we truncate the address to unsigned int to avoid undesirable - * promitions in legacy drivers. + * promotions in legacy drivers. */ static inline unsigned int isa_virt_to_bus(volatile void *address) { diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 9b2a0ff76c73..562854c60808 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -190,7 +190,7 @@ /* * Macro to invoke __do_softirq on the irq stack. This is only called from - * task context when bottom halfs are about to be reenabled and soft + * task context when bottom halves are about to be reenabled and soft * interrupts are pending to be processed. The interrupt stack cannot be in * use here. */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 144d70ea4393..c5ce9845c999 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -109,18 +109,13 @@ static __always_inline unsigned long arch_local_irq_save(void) } #else -#define ENABLE_INTERRUPTS(x) sti -#define DISABLE_INTERRUPTS(x) cli - #ifdef CONFIG_X86_64 #ifdef CONFIG_DEBUG_ENTRY -#define SAVE_FLAGS(x) pushfq; popq %rax +#define SAVE_FLAGS pushfq; popq %rax #endif #define INTERRUPT_RETURN jmp native_iret -#else -#define INTERRUPT_RETURN iret #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 06c3cc22a058..610a05374c02 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -6,12 +6,6 @@ #define JUMP_LABEL_NOP_SIZE 5 -#ifdef CONFIG_X86_64 -# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC -#else -# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC -#endif - #include <asm/asm.h> #include <asm/nops.h> @@ -20,10 +14,10 @@ #include <linux/stringify.h> #include <linux/types.h> -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm_volatile_goto("1:" - ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" + ".byte " __stringify(BYTES_NOP5) "\n\t" ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" @@ -36,7 +30,7 @@ l_yes: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm_volatile_goto("1:" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" @@ -63,7 +57,7 @@ l_yes: .long \target - .Lstatic_jump_after_\@ .Lstatic_jump_after_\@: .else - .byte STATIC_KEY_INIT_NOP + .byte BYTES_NOP5 .endif .pushsection __jump_table, "aw" _ASM_ALIGN @@ -75,7 +69,7 @@ l_yes: .macro STATIC_JUMP_IF_FALSE target, key, def .Lstatic_jump_\@: .if \def - .byte STATIC_KEY_INIT_NOP + .byte BYTES_NOP5 .else /* Equivalent to "jmp.d32 \target" */ .byte 0xe9 diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 6802c59e8252..0a6e34b07017 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -150,11 +150,6 @@ struct kimage_arch { pud_t *pud; pmd_t *pmd; pte_t *pte; - - /* Core ELF header buffer */ - void *elf_headers; - unsigned long elf_headers_sz; - unsigned long elf_load_addr; }; #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index 97bbb4a9083a..05b48b33baf0 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) else set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - /* Flush this CPU's TLB. */ + /* + * Flush this CPU's TLB, assuming whoever did the allocation/free is + * likely to continue running on this CPU. + */ + preempt_disable(); flush_tlb_one_kernel(addr); + preempt_enable(); return true; } diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d20a3d6be36e..bd7f5886a789 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -65,10 +65,22 @@ struct arch_specific_insn { * a post_handler). */ unsigned boostable:1; - unsigned if_modifier:1; - unsigned is_call:1; - unsigned is_pushf:1; - unsigned is_abs_ip:1; + unsigned char size; /* The size of insn */ + union { + unsigned char opcode; + struct { + unsigned char type; + } jcc; + struct { + unsigned char type; + unsigned char asize; + } loop; + struct { + unsigned char reg; + } indirect; + }; + s32 rel32; /* relative offset must be s32, s16, or s8 */ + void (*emulate_op)(struct kprobe *p, struct pt_regs *regs); /* Number of bytes of text poked */ int tp_len; }; @@ -107,7 +119,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); extern int kprobe_int3_handler(struct pt_regs *regs); -extern int kprobe_debug_handler(struct pt_regs *regs); #else diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9bc091ecaaeb..cbbcee0a84f9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -221,12 +221,22 @@ enum x86_intercept_stage; #define DR7_FIXED_1 0x00000400 #define DR7_VOLATILE 0xffff2bff +#define KVM_GUESTDBG_VALID_MASK \ + (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_SINGLESTEP | \ + KVM_GUESTDBG_USE_HW_BP | \ + KVM_GUESTDBG_USE_SW_BP | \ + KVM_GUESTDBG_INJECT_BP | \ + KVM_GUESTDBG_INJECT_DB) + + #define PFERR_PRESENT_BIT 0 #define PFERR_WRITE_BIT 1 #define PFERR_USER_BIT 2 #define PFERR_RSVD_BIT 3 #define PFERR_FETCH_BIT 4 #define PFERR_PK_BIT 5 +#define PFERR_SGX_BIT 15 #define PFERR_GUEST_FINAL_BIT 32 #define PFERR_GUEST_PAGE_BIT 33 @@ -236,6 +246,7 @@ enum x86_intercept_stage; #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) #define PFERR_PK_MASK (1U << PFERR_PK_BIT) +#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT) #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) @@ -884,12 +895,29 @@ struct kvm_hv_syndbg { u64 options; }; +/* Current state of Hyper-V TSC page clocksource */ +enum hv_tsc_page_status { + /* TSC page was not set up or disabled */ + HV_TSC_PAGE_UNSET = 0, + /* TSC page MSR was written by the guest, update pending */ + HV_TSC_PAGE_GUEST_CHANGED, + /* TSC page MSR was written by KVM userspace, update pending */ + HV_TSC_PAGE_HOST_CHANGED, + /* TSC page was properly set up and is currently active */ + HV_TSC_PAGE_SET, + /* TSC page is currently being updated and therefore is inactive */ + HV_TSC_PAGE_UPDATING, + /* TSC page was set up with an inaccessible GPA */ + HV_TSC_PAGE_BROKEN, +}; + /* Hyper-V emulation context */ struct kvm_hv { struct mutex hv_lock; u64 hv_guest_os_id; u64 hv_hypercall; u64 hv_tsc_page; + enum hv_tsc_page_status hv_tsc_page_status; /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; @@ -931,6 +959,12 @@ enum kvm_irqchip_mode { KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ }; +struct kvm_x86_msr_filter { + u8 count; + bool default_allow:1; + struct msr_bitmap_range ranges[16]; +}; + #define APICV_INHIBIT_REASON_DISABLE 0 #define APICV_INHIBIT_REASON_HYPERV 1 #define APICV_INHIBIT_REASON_NESTED 2 @@ -1025,16 +1059,14 @@ struct kvm_arch { bool guest_can_read_msr_platform_info; bool exception_payload_enabled; + bool bus_lock_detection_enabled; + /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ u32 user_space_msr_mask; + struct kvm_x86_msr_filter __rcu *msr_filter; - struct { - u8 count; - bool default_allow:1; - struct msr_bitmap_range ranges[16]; - } msr_filter; - - bool bus_lock_detection_enabled; + /* Guest can access the SGX PROVISIONKEY. */ + bool sgx_provisioning_allowed; struct kvm_pmu_event_filter __rcu *pmu_event_filter; struct task_struct *nx_lpage_recovery_thread; @@ -1050,25 +1082,36 @@ struct kvm_arch { bool tdp_mmu_enabled; /* - * List of struct kvmp_mmu_pages being used as roots. + * List of struct kvm_mmu_pages being used as roots. * All struct kvm_mmu_pages in the list should have * tdp_mmu_page set. - * All struct kvm_mmu_pages in the list should have a positive - * root_count except when a thread holds the MMU lock and is removing - * an entry from the list. + * + * For reads, this list is protected by: + * the MMU lock in read mode + RCU or + * the MMU lock in write mode + * + * For writes, this list is protected by: + * the MMU lock in read mode + the tdp_mmu_pages_lock or + * the MMU lock in write mode + * + * Roots will remain in the list until their tdp_mmu_root_count + * drops to zero, at which point the thread that decremented the + * count to zero should removed the root from the list and clean + * it up, freeing the root after an RCU grace period. */ struct list_head tdp_mmu_roots; /* * List of struct kvmp_mmu_pages not being used as roots. * All struct kvm_mmu_pages in the list should have - * tdp_mmu_page set and a root_count of 0. + * tdp_mmu_page set and a tdp_mmu_root_count of 0. */ struct list_head tdp_mmu_pages; /* * Protects accesses to the following fields when the MMU lock * is held in read mode: + * - tdp_mmu_roots (above) * - tdp_mmu_pages (above) * - the link field of struct kvm_mmu_pages used by the TDP MMU * - lpage_disallowed_mmu_pages @@ -1125,6 +1168,9 @@ struct kvm_vcpu_stat { u64 req_event; u64 halt_poll_success_ns; u64 halt_poll_fail_ns; + u64 nested_run; + u64 directed_yield_attempted; + u64 directed_yield_successful; }; struct x86_instruction_info; @@ -1251,8 +1297,8 @@ struct kvm_x86_ops { int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); - void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd, - int pgd_level); + void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, + int root_level); bool (*has_wbinvd_exit)(void); @@ -1321,6 +1367,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); int (*get_msr_feature)(struct kvm_msr_entry *entry); @@ -1339,6 +1386,7 @@ struct kvm_x86_ops { struct kvm_x86_nested_ops { int (*check_events)(struct kvm_vcpu *vcpu); bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); + void (*triple_fault)(struct kvm_vcpu *vcpu); int (*get_state)(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, unsigned user_data_size); @@ -1410,9 +1458,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); void kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); -void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, - u64 acc_track_mask, u64 me_mask); void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, @@ -1422,8 +1467,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot); void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot); -void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot); void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); @@ -1470,7 +1513,7 @@ extern u64 kvm_mce_cap_supported; /* * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing * userspace I/O) to indicate that the emulation context - * should be resued as is, i.e. skip initialization of + * should be reused as is, i.e. skip initialization of * emulation context, instruction fetch and decode. * * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. @@ -1495,7 +1538,7 @@ extern u64 kvm_mce_cap_supported; * * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware * backdoor emulation, which is opt in via module param. - * VMware backoor emulation handles select instructions + * VMware backdoor emulation handles select instructions * and reinjects the #GP for all other cases. * * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which @@ -1520,6 +1563,11 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data); int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu); int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu); +int kvm_emulate_as_nop(struct kvm_vcpu *vcpu); +int kvm_emulate_invd(struct kvm_vcpu *vcpu); +int kvm_emulate_mwait(struct kvm_vcpu *vcpu); +int kvm_handle_invalid_op(struct kvm_vcpu *vcpu); +int kvm_emulate_monitor(struct kvm_vcpu *vcpu); int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); @@ -1548,14 +1596,14 @@ void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); -bool kvm_rdpmc(struct kvm_vcpu *vcpu); +int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); @@ -1596,9 +1644,6 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); -int kvm_mmu_load(struct kvm_vcpu *vcpu); -void kvm_mmu_unload(struct kvm_vcpu *vcpu); -void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ulong roots_to_free); gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, @@ -1717,11 +1762,7 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 31c4df123aa0..9c80c68d75b5 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -20,7 +20,6 @@ extern u64 sme_me_mask; extern u64 sev_status; -extern bool sev_enabled; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, unsigned long decrypted_kernel_vaddr, diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index ccf60a809a17..67ff0d637e55 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -9,70 +9,29 @@ #include <asm/hyperv-tlfs.h> #include <asm/nospec-branch.h> #include <asm/paravirt.h> +#include <asm/mshyperv.h> typedef int (*hyperv_fill_flush_list_func)( struct hv_guest_mapping_flush_list *flush, void *data); -#define hv_init_timer(timer, tick) \ - wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick) -#define hv_init_timer_config(timer, val) \ - wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val) - -#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val) -#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val) - -#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val) -#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val) - -#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val) -#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val) - -#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) - -#define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0) - -#define hv_get_synint_state(int_num, val) \ - rdmsrl(HV_X64_MSR_SINT0 + int_num, val) -#define hv_set_synint_state(int_num, val) \ - wrmsrl(HV_X64_MSR_SINT0 + int_num, val) -#define hv_recommend_using_aeoi() \ - (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)) +static inline void hv_set_register(unsigned int reg, u64 value) +{ + wrmsrl(reg, value); +} -#define hv_get_crash_ctl(val) \ - rdmsrl(HV_X64_MSR_CRASH_CTL, val) +static inline u64 hv_get_register(unsigned int reg) +{ + u64 value; -#define hv_get_time_ref_count(val) \ - rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val) + rdmsrl(reg, value); + return value; +} -#define hv_get_reference_tsc(val) \ - rdmsrl(HV_X64_MSR_REFERENCE_TSC, val) -#define hv_set_reference_tsc(val) \ - wrmsrl(HV_X64_MSR_REFERENCE_TSC, val) -#define hv_set_clocksource_vdso(val) \ - ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK) -#define hv_enable_vdso_clocksource() \ - vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); #define hv_get_raw_timer() rdtsc_ordered() -#define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR - -/* - * Reference to pv_ops must be inline so objtool - * detection of noinstr violations can work correctly. - */ -static __always_inline void hv_setup_sched_clock(void *sched_clock) -{ -#ifdef CONFIG_PARAVIRT - pv_ops.time.sched_clock = sched_clock; -#endif -} void hyperv_vector_handler(struct pt_regs *regs); -static inline void hv_enable_stimer0_percpu_irq(int irq) {} -static inline void hv_disable_stimer0_percpu_irq(int irq) {} - - #if IS_ENABLED(CONFIG_HYPERV) extern int hyperv_init_cpuhp; @@ -189,38 +148,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) return hv_status; } -/* - * Rep hypercalls. Callers of this functions are supposed to ensure that - * rep_count and varhead_size comply with Hyper-V hypercall definition. - */ -static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, - void *input, void *output) -{ - u64 control = code; - u64 status; - u16 rep_comp; - - control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; - control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; - - do { - status = hv_do_hypercall(control, input, output); - if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) - return status; - - /* Bits 32-43 of status have 'Reps completed' data. */ - rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >> - HV_HYPERCALL_REP_COMP_OFFSET; - - control &= ~HV_HYPERCALL_REP_START_MASK; - control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; - - touch_nmi_watchdog(); - } while (rep_comp < rep_count); - - return status; -} - extern struct hv_vp_assist_page **hv_vp_assist_page; static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) @@ -233,9 +160,6 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) void __init hyperv_init(void); void hyperv_setup_mmu_ops(void); -void *hv_alloc_hyperv_page(void); -void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(unsigned long addr); void set_hv_tscchange_cb(void (*cb)(void)); void clear_hv_tscchange_cb(void); void hyperv_stop_tsc_emulation(void); @@ -272,8 +196,6 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); #else /* CONFIG_HYPERV */ static inline void hyperv_init(void) {} static inline void hyperv_setup_mmu_ops(void) {} -static inline void *hv_alloc_hyperv_page(void) { return NULL; } -static inline void hv_free_hyperv_page(unsigned long addr) {} static inline void set_hv_tscchange_cb(void (*cb)(void)) {} static inline void clear_hv_tscchange_cb(void) {} static inline void hyperv_stop_tsc_emulation(void) {}; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 546d6ecf0a35..742d89a00721 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -185,6 +185,9 @@ #define MSR_PEBS_DATA_CFG 0x000003f2 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 +#define PERF_CAP_METRICS_IDX 15 +#define PERF_CAP_PT_IDX 16 + #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 #define MSR_IA32_RTIT_CTL 0x00000570 @@ -265,6 +268,7 @@ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ #define DEBUGCTLMSR_BTF_SHIFT 1 #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ +#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2) #define DEBUGCTLMSR_TR (1UL << 6) #define DEBUGCTLMSR_BTS (1UL << 7) #define DEBUGCTLMSR_BTINT (1UL << 8) @@ -628,8 +632,6 @@ #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) -#define MSR_IA32_TSCDEADLINE 0x000006e0 - #define MSR_IA32_UCODE_WRITE 0x00000079 #define MSR_IA32_UCODE_REV 0x0000008b diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index e16cccdd0420..a3f87f1015d3 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -324,10 +324,6 @@ static inline int wrmsrl_safe(u32 msr, u64 val) return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); } -#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) - -#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) - struct msr *msrs_alloc(void); void msrs_free(struct msr *msrs); int msr_set_bit(u32 msr, u8 bit); diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h index 12f12b5cf2ca..c1e5e818ba16 100644 --- a/arch/x86/include/asm/nops.h +++ b/arch/x86/include/asm/nops.h @@ -4,89 +4,58 @@ /* * Define nops for use with alternative() and for tracing. - * - * *_NOP5_ATOMIC must be a single instruction. */ -#define NOP_DS_PREFIX 0x3e +#ifndef CONFIG_64BIT -/* generic versions from gas - 1: nop - the following instructions are NOT nops in 64-bit mode, - for 64-bit mode use K8 or P6 nops instead - 2: movl %esi,%esi - 3: leal 0x00(%esi),%esi - 4: leal 0x00(,%esi,1),%esi - 6: leal 0x00000000(%esi),%esi - 7: leal 0x00000000(,%esi,1),%esi -*/ -#define GENERIC_NOP1 0x90 -#define GENERIC_NOP2 0x89,0xf6 -#define GENERIC_NOP3 0x8d,0x76,0x00 -#define GENERIC_NOP4 0x8d,0x74,0x26,0x00 -#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4 -#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 -#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 -#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7 -#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4 +/* + * Generic 32bit nops from GAS: + * + * 1: nop + * 2: movl %esi,%esi + * 3: leal 0x0(%esi),%esi + * 4: leal 0x0(%esi,%eiz,1),%esi + * 5: leal %ds:0x0(%esi,%eiz,1),%esi + * 6: leal 0x0(%esi),%esi + * 7: leal 0x0(%esi,%eiz,1),%esi + * 8: leal %ds:0x0(%esi,%eiz,1),%esi + * + * Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2 + * nop instructions. + */ +#define BYTES_NOP1 0x90 +#define BYTES_NOP2 0x89,0xf6 +#define BYTES_NOP3 0x8d,0x76,0x00 +#define BYTES_NOP4 0x8d,0x74,0x26,0x00 +#define BYTES_NOP5 0x3e,BYTES_NOP4 +#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 +#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 +#define BYTES_NOP8 0x3e,BYTES_NOP7 -/* Opteron 64bit nops - 1: nop - 2: osp nop - 3: osp osp nop - 4: osp osp osp nop -*/ -#define K8_NOP1 GENERIC_NOP1 -#define K8_NOP2 0x66,K8_NOP1 -#define K8_NOP3 0x66,K8_NOP2 -#define K8_NOP4 0x66,K8_NOP3 -#define K8_NOP5 K8_NOP3,K8_NOP2 -#define K8_NOP6 K8_NOP3,K8_NOP3 -#define K8_NOP7 K8_NOP4,K8_NOP3 -#define K8_NOP8 K8_NOP4,K8_NOP4 -#define K8_NOP5_ATOMIC 0x66,K8_NOP4 +#else -/* K7 nops - uses eax dependencies (arbitrary choice) - 1: nop - 2: movl %eax,%eax - 3: leal (,%eax,1),%eax - 4: leal 0x00(,%eax,1),%eax - 6: leal 0x00000000(%eax),%eax - 7: leal 0x00000000(,%eax,1),%eax -*/ -#define K7_NOP1 GENERIC_NOP1 -#define K7_NOP2 0x8b,0xc0 -#define K7_NOP3 0x8d,0x04,0x20 -#define K7_NOP4 0x8d,0x44,0x20,0x00 -#define K7_NOP5 K7_NOP4,K7_NOP1 -#define K7_NOP6 0x8d,0x80,0,0,0,0 -#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0 -#define K7_NOP8 K7_NOP7,K7_NOP1 -#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4 +/* + * Generic 64bit nops from GAS: + * + * 1: nop + * 2: osp nop + * 3: nopl (%eax) + * 4: nopl 0x00(%eax) + * 5: nopl 0x00(%eax,%eax,1) + * 6: osp nopl 0x00(%eax,%eax,1) + * 7: nopl 0x00000000(%eax) + * 8: nopl 0x00000000(%eax,%eax,1) + */ +#define BYTES_NOP1 0x90 +#define BYTES_NOP2 0x66,BYTES_NOP1 +#define BYTES_NOP3 0x0f,0x1f,0x00 +#define BYTES_NOP4 0x0f,0x1f,0x40,0x00 +#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00 +#define BYTES_NOP6 0x66,BYTES_NOP5 +#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00 +#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 -/* P6 nops - uses eax dependencies (Intel-recommended choice) - 1: nop - 2: osp nop - 3: nopl (%eax) - 4: nopl 0x00(%eax) - 5: nopl 0x00(%eax,%eax,1) - 6: osp nopl 0x00(%eax,%eax,1) - 7: nopl 0x00000000(%eax) - 8: nopl 0x00000000(%eax,%eax,1) - Note: All the above are assumed to be a single instruction. - There is kernel code that depends on this. -*/ -#define P6_NOP1 GENERIC_NOP1 -#define P6_NOP2 0x66,0x90 -#define P6_NOP3 0x0f,0x1f,0x00 -#define P6_NOP4 0x0f,0x1f,0x40,0 -#define P6_NOP5 0x0f,0x1f,0x44,0x00,0 -#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 -#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 -#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 -#define P6_NOP5_ATOMIC P6_NOP5 +#endif /* CONFIG_64BIT */ #ifdef __ASSEMBLY__ #define _ASM_MK_NOP(x) .byte x @@ -94,54 +63,19 @@ #define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" #endif -#if defined(CONFIG_MK7) -#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) -#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2) -#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3) -#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4) -#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5) -#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6) -#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7) -#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8) -#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC) -#elif defined(CONFIG_X86_P6_NOP) -#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) -#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) -#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) -#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) -#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) -#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) -#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) -#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) -#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC) -#elif defined(CONFIG_X86_64) -#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1) -#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2) -#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3) -#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4) -#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5) -#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6) -#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7) -#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) -#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC) -#else -#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1) -#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2) -#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3) -#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4) -#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5) -#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6) -#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7) -#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8) -#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC) -#endif +#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8) #define ASM_NOP_MAX 8 -#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */ #ifndef __ASSEMBLY__ -extern const unsigned char * const *ideal_nops; -extern void arch_init_ideal_nops(void); +extern const unsigned char * const x86_nops[]; #endif #endif /* _ASM_X86_NOPS_H */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index cb9ad6b73973..3ad8c6d3cbb3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -7,7 +7,6 @@ #include <linux/objtool.h> #include <asm/alternative.h> -#include <asm/alternative-asm.h> #include <asm/cpufeatures.h> #include <asm/msr-index.h> #include <asm/unwind_hints.h> @@ -33,7 +32,7 @@ /* * Google experimented with loop-unrolling and this turned out to be - * the optimal version — two calls, each with their own speculation + * the optimal version - two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ #define __FILL_RETURN_BUFFER(reg, nr, sp) \ @@ -81,7 +80,7 @@ .macro JMP_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ - __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ + __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD #else jmp *%\reg @@ -91,7 +90,7 @@ .macro CALL_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ - __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ + __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD #else call *%\reg @@ -129,7 +128,7 @@ ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ - "call __x86_retpoline_%V[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ X86_FEATURE_RETPOLINE, \ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 939b1cff4a7b..ca840fec7776 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -56,6 +56,39 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); +#ifdef CONFIG_X86_5LEVEL +/* + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] + */ +static inline unsigned long task_size_max(void) +{ + unsigned long ret; + + alternative_io("movq %[small],%0","movq %[large],%0", + X86_FEATURE_LA57, + "=r" (ret), + [small] "i" ((1ul << 47)-PAGE_SIZE), + [large] "i" ((1ul << 56)-PAGE_SIZE)); + + return ret; +} +#endif /* CONFIG_X86_5LEVEL */ + #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 64297eabad63..a8d4ad856568 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -55,30 +55,13 @@ #ifdef CONFIG_X86_5LEVEL #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47) +/* See task_size_max() in <asm/page_64.h> */ #else #define __VIRTUAL_MASK_SHIFT 47 +#define task_size_max() ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) #endif -/* - * User space process size. This is the first address outside the user range. - * There are a few constraints that determine this: - * - * On Intel CPUs, if a SYSCALL instruction is at the highest canonical - * address, then that syscall will enter the kernel with a - * non-canonical return address, and SYSRET will explode dangerously. - * We avoid this particular problem by preventing anything - * from being mapped at the maximum canonical address. - * - * On AMD CPUs in the Ryzen family, there's a nasty bug in which the - * CPUs malfunction if they execute code from the highest canonical page. - * They'll speculate right off the end of the canonical space, and - * bad things happen. This is worked around in the same way as the - * Intel problem. - * - * With page table isolation enabled, we map the LDT in ... [stay tuned] - */ -#define TASK_SIZE_MAX ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) - +#define TASK_SIZE_MAX task_size_max() #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4abf110e2243..da3a1ac82be5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -15,11 +15,20 @@ #include <linux/bug.h> #include <linux/types.h> #include <linux/cpumask.h> +#include <linux/static_call_types.h> #include <asm/frame.h> -static inline unsigned long long paravirt_sched_clock(void) +u64 dummy_steal_clock(int cpu); +u64 dummy_sched_clock(void); + +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); +DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); + +void paravirt_set_sched_clock(u64 (*func)(void)); + +static inline u64 paravirt_sched_clock(void) { - return PVOP_CALL0(unsigned long long, time.sched_clock); + return static_call(pv_sched_clock)(); } struct static_key; @@ -33,9 +42,13 @@ bool pv_is_native_vcpu_is_preempted(void); static inline u64 paravirt_steal_clock(int cpu) { - return PVOP_CALL1(u64, time.steal_clock, cpu); + return static_call(pv_steal_clock)(cpu); } +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void __init paravirt_set_cap(void); +#endif + /* The paravirtualized I/O functions */ static inline void slow_down_io(void) { @@ -50,7 +63,7 @@ static inline void slow_down_io(void) void native_flush_tlb_local(void); void native_flush_tlb_global(void); void native_flush_tlb_one_user(unsigned long addr); -void native_flush_tlb_others(const struct cpumask *cpumask, +void native_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); static inline void __flush_tlb_local(void) @@ -68,10 +81,10 @@ static inline void __flush_tlb_one_user(unsigned long addr) PVOP_VCALL1(mmu.flush_tlb_one_user, addr); } -static inline void __flush_tlb_others(const struct cpumask *cpumask, +static inline void __flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info) { - PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); + PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); } static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) @@ -122,7 +135,9 @@ static inline void write_cr0(unsigned long x) static inline unsigned long read_cr2(void) { - return PVOP_CALLEE0(unsigned long, mmu.read_cr2); + return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2, + "mov %%cr2, %%rax;", + ALT_NOT(X86_FEATURE_XENPV)); } static inline void write_cr2(unsigned long x) @@ -132,12 +147,14 @@ static inline void write_cr2(unsigned long x) static inline unsigned long __read_cr3(void) { - return PVOP_CALL0(unsigned long, mmu.read_cr3); + return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3, + "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV)); } static inline void write_cr3(unsigned long x) { - PVOP_VCALL1(mmu.write_cr3, x); + PVOP_ALT_VCALL1(mmu.write_cr3, x, + "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV)); } static inline void __write_cr4(unsigned long x) @@ -157,7 +174,7 @@ static inline void halt(void) static inline void wbinvd(void) { - PVOP_VCALL0(cpu.wbinvd); + PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV)); } static inline u64 paravirt_read_msr(unsigned msr) @@ -371,22 +388,28 @@ static inline void paravirt_release_p4d(unsigned long pfn) static inline pte_t __pte(pteval_t val) { - return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) }; + return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val, + "mov %%rdi, %%rax", + ALT_NOT(X86_FEATURE_XENPV)) }; } static inline pteval_t pte_val(pte_t pte) { - return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); + return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); } static inline pgd_t __pgd(pgdval_t val) { - return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) }; + return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val, + "mov %%rdi, %%rax", + ALT_NOT(X86_FEATURE_XENPV)) }; } static inline pgdval_t pgd_val(pgd_t pgd) { - return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); + return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION @@ -419,12 +442,15 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) static inline pmd_t __pmd(pmdval_t val) { - return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) }; + return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val, + "mov %%rdi, %%rax", + ALT_NOT(X86_FEATURE_XENPV)) }; } static inline pmdval_t pmd_val(pmd_t pmd) { - return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); + return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); } static inline void set_pud(pud_t *pudp, pud_t pud) @@ -436,14 +462,16 @@ static inline pud_t __pud(pudval_t val) { pudval_t ret; - ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val); + ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); return (pud_t) { ret }; } static inline pudval_t pud_val(pud_t pud) { - return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud); + return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); } static inline void pud_clear(pud_t *pudp) @@ -462,14 +490,17 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) static inline p4d_t __p4d(p4dval_t val) { - p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val); + p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val, + "mov %%rdi, %%rax", + ALT_NOT(X86_FEATURE_XENPV)); return (p4d_t) { ret }; } static inline p4dval_t p4d_val(p4d_t p4d) { - return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d); + return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d, + "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV)); } static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) @@ -556,7 +587,9 @@ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) { - PVOP_VCALLEE1(lock.queued_spin_unlock, lock); + PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock, + "movb $0, (%%" _ASM_ARG1 ");", + ALT_NOT(X86_FEATURE_PVUNLOCK)); } static __always_inline void pv_wait(u8 *ptr, u8 val) @@ -571,7 +604,9 @@ static __always_inline void pv_kick(int cpu) static __always_inline bool pv_vcpu_is_preempted(long cpu) { - return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu); + return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu, + "xor %%" _ASM_AX ", %%" _ASM_AX ";", + ALT_NOT(X86_FEATURE_VCPUPREEMPT)); } void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); @@ -645,17 +680,18 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); #ifdef CONFIG_PARAVIRT_XXL static inline notrace unsigned long arch_local_save_flags(void) { - return PVOP_CALLEE0(unsigned long, irq.save_fl); + return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;", + ALT_NOT(X86_FEATURE_XENPV)); } static inline notrace void arch_local_irq_disable(void) { - PVOP_VCALLEE0(irq.irq_disable); + PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV)); } static inline notrace void arch_local_irq_enable(void) { - PVOP_VCALLEE0(irq.irq_enable); + PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV)); } static inline notrace unsigned long arch_local_irq_save(void) @@ -700,84 +736,27 @@ extern void default_banner(void); .popsection -#define COND_PUSH(set, mask, reg) \ - .if ((~(set)) & mask); push %reg; .endif -#define COND_POP(set, mask, reg) \ - .if ((~(set)) & mask); pop %reg; .endif - #ifdef CONFIG_X86_64 - -#define PV_SAVE_REGS(set) \ - COND_PUSH(set, CLBR_RAX, rax); \ - COND_PUSH(set, CLBR_RCX, rcx); \ - COND_PUSH(set, CLBR_RDX, rdx); \ - COND_PUSH(set, CLBR_RSI, rsi); \ - COND_PUSH(set, CLBR_RDI, rdi); \ - COND_PUSH(set, CLBR_R8, r8); \ - COND_PUSH(set, CLBR_R9, r9); \ - COND_PUSH(set, CLBR_R10, r10); \ - COND_PUSH(set, CLBR_R11, r11) -#define PV_RESTORE_REGS(set) \ - COND_POP(set, CLBR_R11, r11); \ - COND_POP(set, CLBR_R10, r10); \ - COND_POP(set, CLBR_R9, r9); \ - COND_POP(set, CLBR_R8, r8); \ - COND_POP(set, CLBR_RDI, rdi); \ - COND_POP(set, CLBR_RSI, rsi); \ - COND_POP(set, CLBR_RDX, rdx); \ - COND_POP(set, CLBR_RCX, rcx); \ - COND_POP(set, CLBR_RAX, rax) +#ifdef CONFIG_PARAVIRT_XXL #define PARA_PATCH(off) ((off) / 8) #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) #define PARA_INDIRECT(addr) *addr(%rip) -#else -#define PV_SAVE_REGS(set) \ - COND_PUSH(set, CLBR_EAX, eax); \ - COND_PUSH(set, CLBR_EDI, edi); \ - COND_PUSH(set, CLBR_ECX, ecx); \ - COND_PUSH(set, CLBR_EDX, edx) -#define PV_RESTORE_REGS(set) \ - COND_POP(set, CLBR_EDX, edx); \ - COND_POP(set, CLBR_ECX, ecx); \ - COND_POP(set, CLBR_EDI, edi); \ - COND_POP(set, CLBR_EAX, eax) - -#define PARA_PATCH(off) ((off) / 4) -#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4) -#define PARA_INDIRECT(addr) *%cs:addr -#endif -#ifdef CONFIG_PARAVIRT_XXL #define INTERRUPT_RETURN \ - PARA_SITE(PARA_PATCH(PV_CPU_iret), \ - ANNOTATE_RETPOLINE_SAFE; \ - jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) - -#define DISABLE_INTERRUPTS(clobbers) \ - PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - ANNOTATE_RETPOLINE_SAFE; \ - call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \ - PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) - -#define ENABLE_INTERRUPTS(clobbers) \ - PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \ - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - ANNOTATE_RETPOLINE_SAFE; \ - call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \ - PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) -#endif + ANNOTATE_RETPOLINE_SAFE; \ + ALTERNATIVE_TERNARY("jmp *paravirt_iret(%rip);", \ + X86_FEATURE_XENPV, "jmp xen_iret;", "jmp native_iret;") -#ifdef CONFIG_X86_64 -#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_DEBUG_ENTRY -#define SAVE_FLAGS(clobbers) \ - PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - ANNOTATE_RETPOLINE_SAFE; \ - call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \ - PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) +.macro PARA_IRQ_save_fl + PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), + ANNOTATE_RETPOLINE_SAFE; + call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);) +.endm + +#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \ + ALT_NOT(X86_FEATURE_XENPV) #endif #endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_X86_64 */ @@ -800,5 +779,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) { } #endif + +#ifndef CONFIG_PARAVIRT_SPINLOCKS +static inline void paravirt_set_cap(void) +{ +} +#endif #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PARAVIRT_H */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index de87087d3bde..d9d6b0203ec4 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -3,7 +3,6 @@ #define _ASM_X86_PARAVIRT_TYPES_H /* Bitmask of what can be clobbered: usually at least eax. */ -#define CLBR_NONE 0 #define CLBR_EAX (1 << 0) #define CLBR_ECX (1 << 1) #define CLBR_EDX (1 << 2) @@ -15,7 +14,6 @@ #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) -#define CLBR_SCRATCH (0) #else #define CLBR_RAX CLBR_EAX #define CLBR_RCX CLBR_ECX @@ -32,12 +30,9 @@ #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ CLBR_RCX | CLBR_R8 | CLBR_R9) #define CLBR_RET_REG (CLBR_RAX) -#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) #endif /* X86_64 */ -#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) - #ifndef __ASSEMBLY__ #include <asm/desc_defs.h> @@ -73,19 +68,6 @@ struct pv_info { const char *name; }; -struct pv_init_ops { - /* - * Patch may replace one of the defined code sequences with - * arbitrary code, subject to the same register constraints. - * This generally means the code is not free to clobber any - * registers other than EAX. The patch function should return - * the number of bytes of code generated, as we nop pad the - * rest in generic code. - */ - unsigned (*patch)(u8 type, void *insn_buff, - unsigned long addr, unsigned len); -} __no_randomize_layout; - #ifdef CONFIG_PARAVIRT_XXL struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ @@ -95,11 +77,6 @@ struct pv_lazy_ops { } __no_randomize_layout; #endif -struct pv_time_ops { - unsigned long long (*sched_clock)(void); - unsigned long long (*steal_clock)(int cpu); -} __no_randomize_layout; - struct pv_cpu_ops { /* hooks for various privileged instructions */ void (*io_delay)(void); @@ -156,10 +133,6 @@ struct pv_cpu_ops { u64 (*read_pmc)(int counter); - /* Normal iret. Jump to this with the standard iret stack - frame set up. */ - void (*iret)(void); - void (*start_context_switch)(struct task_struct *prev); void (*end_context_switch)(struct task_struct *next); #endif @@ -188,8 +161,8 @@ struct pv_mmu_ops { void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); void (*flush_tlb_one_user)(unsigned long addr); - void (*flush_tlb_others)(const struct cpumask *cpus, - const struct flush_tlb_info *info); + void (*flush_tlb_multi)(const struct cpumask *cpus, + const struct flush_tlb_info *info); void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); @@ -290,8 +263,6 @@ struct pv_lock_ops { * number for each function using the offset which we use to indicate * what to patch. */ struct paravirt_patch_template { - struct pv_init_ops init; - struct pv_time_ops time; struct pv_cpu_ops cpu; struct pv_irq_ops irq; struct pv_mmu_ops mmu; @@ -300,6 +271,7 @@ struct paravirt_patch_template { extern struct pv_info pv_info; extern struct paravirt_patch_template pv_ops; +extern void (*paravirt_iret)(void); #define PARAVIRT_PATCH(x) \ (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) @@ -331,11 +303,7 @@ extern struct paravirt_patch_template pv_ops; /* Simple instruction patching code. */ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" -unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len); -unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len); -unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); - -unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len); +unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len); int paravirt_disable_iospace(void); @@ -371,7 +339,7 @@ int paravirt_disable_iospace(void); * on the stack. All caller-save registers (eax,edx,ecx) are expected * to be modified (either clobbered or used for return values). * X86_64, on the other hand, already specifies a register-based calling - * conventions, returning at %rax, with parameteres going on %rdi, %rsi, + * conventions, returning at %rax, with parameters going on %rdi, %rsi, * %rdx, and %rcx. Note that for this reason, x86_64 does not need any * special handling for dealing with 4 arguments, unlike i386. * However, x86_64 also have to clobber all caller saved registers, which @@ -414,11 +382,9 @@ int paravirt_disable_iospace(void); * makes sure the incoming and outgoing types are always correct. */ #ifdef CONFIG_X86_32 -#define PVOP_VCALL_ARGS \ +#define PVOP_CALL_ARGS \ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS - #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) @@ -434,12 +400,10 @@ int paravirt_disable_iospace(void); #define VEXTRA_CLOBBERS #else /* CONFIG_X86_64 */ /* [re]ax isn't an arg, but the return val */ -#define PVOP_VCALL_ARGS \ +#define PVOP_CALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ __edx = __edx, __ecx = __ecx, __eax = __eax; -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS - #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) @@ -464,152 +428,138 @@ int paravirt_disable_iospace(void); #define PVOP_TEST_NULL(op) ((void)pv_ops.op) #endif -#define PVOP_RETMASK(rettype) \ +#define PVOP_RETVAL(rettype) \ ({ unsigned long __mask = ~0UL; \ + BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \ switch (sizeof(rettype)) { \ case 1: __mask = 0xffUL; break; \ case 2: __mask = 0xffffUL; break; \ case 4: __mask = 0xffffffffUL; break; \ default: break; \ } \ - __mask; \ + __mask & __eax; \ }) -#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ - pre, post, ...) \ +#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \ ({ \ - rettype __ret; \ PVOP_CALL_ARGS; \ PVOP_TEST_NULL(op); \ - /* This is 32-bit specific, but is okay in 64-bit */ \ - /* since this condition will never hold */ \ - if (sizeof(rettype) > sizeof(unsigned long)) { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : call_clbr, ASM_CALL_CONSTRAINT \ - : paravirt_type(op), \ - paravirt_clobber(clbr), \ - ##__VA_ARGS__ \ - : "memory", "cc" extra_clbr); \ - __ret = (rettype)((((u64)__edx) << 32) | __eax); \ - } else { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : call_clbr, ASM_CALL_CONSTRAINT \ - : paravirt_type(op), \ - paravirt_clobber(clbr), \ - ##__VA_ARGS__ \ - : "memory", "cc" extra_clbr); \ - __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \ - } \ - __ret; \ + asm volatile(paravirt_alt(PARAVIRT_CALL) \ + : call_clbr, ASM_CALL_CONSTRAINT \ + : paravirt_type(op), \ + paravirt_clobber(clbr), \ + ##__VA_ARGS__ \ + : "memory", "cc" extra_clbr); \ + ret; \ }) -#define __PVOP_CALL(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ - EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) - -#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ - PVOP_CALLEE_CLOBBERS, , \ - pre, post, ##__VA_ARGS__) - - -#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ +#define ____PVOP_ALT_CALL(ret, op, alt, cond, clbr, call_clbr, \ + extra_clbr, ...) \ ({ \ - PVOP_VCALL_ARGS; \ + PVOP_CALL_ARGS; \ PVOP_TEST_NULL(op); \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ + asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \ + alt, cond) \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ : "memory", "cc" extra_clbr); \ + ret; \ }) -#define __PVOP_VCALL(op, pre, post, ...) \ - ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ - VEXTRA_CLOBBERS, \ - pre, post, ##__VA_ARGS__) +#define __PVOP_CALL(rettype, op, ...) \ + ____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY, \ + PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__) + +#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \ + ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, CLBR_ANY,\ + PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \ + ##__VA_ARGS__) + +#define __PVOP_CALLEESAVE(rettype, op, ...) \ + ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, CLBR_RET_REG, \ + PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) + +#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \ + ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \ + CLBR_RET_REG, PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) + + +#define __PVOP_VCALL(op, ...) \ + (void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ + VEXTRA_CLOBBERS, ##__VA_ARGS__) + +#define __PVOP_ALT_VCALL(op, alt, cond, ...) \ + (void)____PVOP_ALT_CALL(, op, alt, cond, CLBR_ANY, \ + PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \ + ##__VA_ARGS__) -#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ - ____PVOP_VCALL(op.func, CLBR_RET_REG, \ - PVOP_VCALLEE_CLOBBERS, , \ - pre, post, ##__VA_ARGS__) +#define __PVOP_VCALLEESAVE(op, ...) \ + (void)____PVOP_CALL(, op.func, CLBR_RET_REG, \ + PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) +#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \ + (void)____PVOP_ALT_CALL(, op.func, alt, cond, CLBR_RET_REG, \ + PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) #define PVOP_CALL0(rettype, op) \ - __PVOP_CALL(rettype, op, "", "") + __PVOP_CALL(rettype, op) #define PVOP_VCALL0(op) \ - __PVOP_VCALL(op, "", "") + __PVOP_VCALL(op) +#define PVOP_ALT_CALL0(rettype, op, alt, cond) \ + __PVOP_ALT_CALL(rettype, op, alt, cond) +#define PVOP_ALT_VCALL0(op, alt, cond) \ + __PVOP_ALT_VCALL(op, alt, cond) #define PVOP_CALLEE0(rettype, op) \ - __PVOP_CALLEESAVE(rettype, op, "", "") + __PVOP_CALLEESAVE(rettype, op) #define PVOP_VCALLEE0(op) \ - __PVOP_VCALLEESAVE(op, "", "") + __PVOP_VCALLEESAVE(op) +#define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \ + __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond) +#define PVOP_ALT_VCALLEE0(op, alt, cond) \ + __PVOP_ALT_VCALLEESAVE(op, alt, cond) #define PVOP_CALL1(rettype, op, arg1) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) + __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1)) #define PVOP_VCALL1(op, arg1) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) + __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1)) +#define PVOP_ALT_VCALL1(op, arg1, alt, cond) \ + __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1)) #define PVOP_CALLEE1(rettype, op, arg1) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) + __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1)) #define PVOP_VCALLEE1(op, arg1) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) + __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1)) +#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \ + __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1)) +#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \ + __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1)) #define PVOP_CALL2(rettype, op, arg1, arg2) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) + __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) #define PVOP_VCALL2(op, arg1, arg2) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) - -#define PVOP_CALLEE2(rettype, op, arg1, arg2) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) -#define PVOP_VCALLEE2(op, arg1, arg2) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) - + __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) #define PVOP_VCALL3(op, arg1, arg2, arg3) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ + __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) -/* This is the only difference in x86_64. We can make it much simpler */ -#ifdef CONFIG_X86_32 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ __PVOP_CALL(rettype, op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ - PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) -#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ - "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) -#else -#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ - __PVOP_CALL(rettype, op, "", "", \ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, "", "", \ - PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) -#endif /* Lazy mode for batching updates / context switch */ enum paravirt_lazy_mode { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a02c67291cfc..b1099f2d9800 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1244,7 +1244,7 @@ static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * - * dst - pointer to pgd range anwhere on a pgd page + * dst - pointer to pgd range anywhere on a pgd page * src - "" * count - the number of pgds to copy. * diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index dc6d149bf851..154321d29050 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -314,11 +314,6 @@ struct x86_hw_tss { struct x86_hw_tss { u32 reserved1; u64 sp0; - - /* - * We store cpu_current_top_of_stack in sp1 so it's always accessible. - * Linux does not use ring 1, so sp1 is not otherwise needed. - */ u64 sp1; /* @@ -426,12 +421,7 @@ struct irq_stack { char stack[IRQ_STACK_SIZE]; } __aligned(IRQ_STACK_SIZE); -#ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); -#else -/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ -#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 -#endif #ifdef CONFIG_X86_64 struct fixed_percpu_data { @@ -439,6 +429,9 @@ struct fixed_percpu_data { * GCC hardcodes the stack canary as %gs:40. Since the * irq_stack is the object at %gs:0, we reserve the bottom * 48 bytes of the irq stack for the canary. + * + * Once we are willing to require -mstack-protector-guard-symbol= + * support for x86_64 stackprotector, we can get rid of this. */ char gs_base[40]; unsigned long stack_canary; @@ -460,17 +453,7 @@ extern asmlinkage void ignore_sysret(void); void current_save_fsgs(void); #else /* X86_64 */ #ifdef CONFIG_STACKPROTECTOR -/* - * Make sure stack canary segment base is cached-aligned: - * "For Intel Atom processors, avoid non zero segment base address - * that is not aligned to cache line boundary at all cost." - * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) - */ -struct stack_canary { - char __pad[20]; /* canary at %gs:20 */ - unsigned long canary; -}; -DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); +DECLARE_PER_CPU(unsigned long, __stack_chk_guard); #endif DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr); @@ -527,7 +510,7 @@ struct thread_struct { struct io_bitmap *io_bitmap; /* - * IOPL. Priviledge level dependent I/O permission which is + * IOPL. Privilege level dependent I/O permission which is * emulated via the I/O bitmap to prevent user space from disabling * interrupts. */ @@ -551,15 +534,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, *size = fpu_kernel_xstate_size; } -/* - * Thread-synchronous status. - * - * This is different from the flags in that nobody else - * ever touches our thread-synchronous status, so we don't - * have to worry about atomic accesses. - */ -#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ - static inline void native_load_sp0(unsigned long sp0) { diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index b6a9d51d1d79..8c5d1910a848 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -4,6 +4,8 @@ #include <asm/ldt.h> +struct task_struct; + /* misc architecture specific prototypes */ void syscall_init(void); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 409f661481e1..b94f615600d5 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -37,7 +37,10 @@ struct pt_regs { unsigned short __esh; unsigned short fs; unsigned short __fsh; - /* On interrupt, gs and __gsh store the vector number. */ + /* + * On interrupt, gs and __gsh store the vector number. They never + * store gs any more. + */ unsigned short gs; unsigned short __gsh; /* On interrupt, this is the error code. */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 7fdd4facfce7..72044026eb3c 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -95,7 +95,7 @@ * * 26 - ESPFIX small SS * 27 - per-cpu [ offset to per-cpu data area ] - * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8 + * 28 - unused * 29 - unused * 30 - unused * 31 - TSS for double fault handler @@ -118,7 +118,6 @@ #define GDT_ENTRY_ESPFIX_SS 26 #define GDT_ENTRY_PERCPU 27 -#define GDT_ENTRY_STACK_CANARY 28 #define GDT_ENTRY_DOUBLEFAULT_TSS 31 @@ -158,12 +157,6 @@ # define __KERNEL_PERCPU 0 #endif -#ifdef CONFIG_STACKPROTECTOR -# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) -#else -# define __KERNEL_STACK_CANARY 0 -#endif - #else /* 64-bit: */ #include <asm/cache.h> @@ -364,22 +357,15 @@ static inline void __loadsegment_fs(unsigned short value) asm("mov %%" #seg ",%0":"=r" (value) : : "memory") /* - * x86-32 user GS accessors: + * x86-32 user GS accessors. This is ugly and could do with some cleaning up. */ #ifdef CONFIG_X86_32 -# ifdef CONFIG_X86_32_LAZY_GS -# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) -# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) -# define task_user_gs(tsk) ((tsk)->thread.gs) -# define lazy_save_gs(v) savesegment(gs, (v)) -# define lazy_load_gs(v) loadsegment(gs, (v)) -# else /* X86_32_LAZY_GS */ -# define get_user_gs(regs) (u16)((regs)->gs) -# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) -# define task_user_gs(tsk) (task_pt_regs(tsk)->gs) -# define lazy_save_gs(v) do { } while (0) -# define lazy_load_gs(v) do { } while (0) -# endif /* X86_32_LAZY_GS */ +# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) +# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) +# define task_user_gs(tsk) ((tsk)->thread.gs) +# define lazy_save_gs(v) savesegment(gs, (v)) +# define lazy_load_gs(v) loadsegment(gs, (v)) +# define load_gs_index(v) loadsegment(gs, (v)) #endif /* X86_32 */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 4352f08bfbb5..43fa081a1adb 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -8,8 +8,8 @@ /* * The set_memory_* API can be used to change various attributes of a virtual * address range. The attributes include: - * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack - * Executability : eXeutable, NoteXecutable + * Cacheability : UnCached, WriteCombining, WriteThrough, WriteBack + * Executability : eXecutable, NoteXecutable * Read/Write : ReadOnly, ReadWrite * Presence : NotPresent * Encryption : Encrypted, Decrypted diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 389d851a02c4..a12458a7a8d4 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -130,11 +130,6 @@ void *extend_brk(size_t size, size_t align); : : "i" (sz)); \ } -/* Helper for reserving space for arrays of things */ -#define RESERVE_BRK_ARRAY(type, name, entries) \ - type *name; \ - RESERVE_BRK(name, sizeof(type) * entries) - extern void probe_roms(void); #ifdef __i386__ diff --git a/arch/x86/kernel/cpu/sgx/arch.h b/arch/x86/include/asm/sgx.h index dd7602c44c72..9c31e0ebc55b 100644 --- a/arch/x86/kernel/cpu/sgx/arch.h +++ b/arch/x86/include/asm/sgx.h @@ -2,15 +2,20 @@ /** * Copyright(c) 2016-20 Intel Corporation. * - * Contains data structures defined by the SGX architecture. Data structures - * defined by the Linux software stack should not be placed here. + * Intel Software Guard Extensions (SGX) support. */ -#ifndef _ASM_X86_SGX_ARCH_H -#define _ASM_X86_SGX_ARCH_H +#ifndef _ASM_X86_SGX_H +#define _ASM_X86_SGX_H #include <linux/bits.h> #include <linux/types.h> +/* + * This file contains both data structures defined by SGX architecture and Linux + * defined software data structures and functions. The two should not be mixed + * together for better readibility. The architectural definitions come first. + */ + /* The SGX specific CPUID function. */ #define SGX_CPUID 0x12 /* EPC enumeration. */ @@ -22,16 +27,36 @@ /* The bitmask for the EPC section type. */ #define SGX_CPUID_EPC_MASK GENMASK(3, 0) +enum sgx_encls_function { + ECREATE = 0x00, + EADD = 0x01, + EINIT = 0x02, + EREMOVE = 0x03, + EDGBRD = 0x04, + EDGBWR = 0x05, + EEXTEND = 0x06, + ELDU = 0x08, + EBLOCK = 0x09, + EPA = 0x0A, + EWB = 0x0B, + ETRACK = 0x0C, + EAUG = 0x0D, + EMODPR = 0x0E, + EMODT = 0x0F, +}; + /** * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV * %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not * been completed yet. + * %SGX_CHILD_PRESENT SECS has child pages present in the EPC. * %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's * public key does not match IA32_SGXLEPUBKEYHASH. * %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received */ enum sgx_return_code { SGX_NOT_TRACKED = 11, + SGX_CHILD_PRESENT = 13, SGX_INVALID_EINITTOKEN = 16, SGX_UNMASKED_EVENT = 128, }; @@ -271,7 +296,7 @@ struct sgx_pcmd { * @header1: constant byte string * @vendor: must be either 0x0000 or 0x8086 * @date: YYYYMMDD in BCD - * @header2: costant byte string + * @header2: constant byte string * @swdefined: software defined value */ struct sgx_sigstruct_header { @@ -335,4 +360,19 @@ struct sgx_sigstruct { #define SGX_LAUNCH_TOKEN_SIZE 304 -#endif /* _ASM_X86_SGX_ARCH_H */ +/* + * Do not put any hardware-defined SGX structure representations below this + * comment! + */ + +#ifdef CONFIG_X86_SGX_KVM +int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs, + int *trapnr); +int sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs, u64 *lepubkeyhash, int *trapnr); +#endif + +int sgx_set_attribute(unsigned long *allowed_attributes, + unsigned int attribute_fd); + +#endif /* _ASM_X86_SGX_H */ diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 0bc9b0895f33..d17b39893b79 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -11,6 +11,7 @@ #include <asm/nops.h> #include <asm/cpufeatures.h> +#include <asm/alternative.h> /* "Raw" instruction opcodes */ #define __ASM_CLAC ".byte 0x0f,0x01,0xca" @@ -18,8 +19,6 @@ #ifdef __ASSEMBLY__ -#include <asm/alternative-asm.h> - #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ @@ -37,8 +36,6 @@ #else /* __ASSEMBLY__ */ -#include <asm/alternative.h> - #ifdef CONFIG_X86_SMAP static __always_inline void clac(void) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c0538f82c9a2..630ff08532be 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -132,6 +132,7 @@ void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); +void cond_wakeup_cpu0(void); void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 1d3cbaef4bb7..2acd6cb62328 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -214,7 +214,7 @@ static inline void clflush(volatile void *__p) static inline void clflushopt(volatile void *__p) { - alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0", + alternative_io(".byte 0x3e; clflush %P0", ".byte 0x66; clflush %P0", X86_FEATURE_CLFLUSHOPT, "+m" (*(volatile char __force *)__p)); @@ -225,7 +225,7 @@ static inline void clwb(volatile void *__p) volatile struct { char x[64]; } *p = __p; asm volatile(ALTERNATIVE_2( - ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])", + ".byte 0x3e; clflush (%[pax])", ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */ X86_FEATURE_CLFLUSHOPT, ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */ diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 7fb482f0f25b..b6ffe58c70fa 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -5,30 +5,23 @@ * Stack protector works by putting predefined pattern at the start of * the stack frame and verifying that it hasn't been overwritten when * returning from the function. The pattern is called stack canary - * and unfortunately gcc requires it to be at a fixed offset from %gs. - * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64 - * and x86_32 use segment registers differently and thus handles this - * requirement differently. + * and unfortunately gcc historically required it to be at a fixed offset + * from the percpu segment base. On x86_64, the offset is 40 bytes. * - * On x86_64, %gs is shared by percpu area and stack canary. All - * percpu symbols are zero based and %gs points to the base of percpu - * area. The first occupant of the percpu area is always - * fixed_percpu_data which contains stack_canary at offset 40. Userland - * %gs is always saved and restored on kernel entry and exit using - * swapgs, so stack protector doesn't add any complexity there. + * The same segment is shared by percpu area and stack canary. On + * x86_64, percpu symbols are zero based and %gs (64-bit) points to the + * base of percpu area. The first occupant of the percpu area is always + * fixed_percpu_data which contains stack_canary at the approproate + * offset. On x86_32, the stack canary is just a regular percpu + * variable. * - * On x86_32, it's slightly more complicated. As in x86_64, %gs is - * used for userland TLS. Unfortunately, some processors are much - * slower at loading segment registers with different value when - * entering and leaving the kernel, so the kernel uses %fs for percpu - * area and manages %gs lazily so that %gs is switched only when - * necessary, usually during task switch. + * Putting percpu data in %fs on 32-bit is a minor optimization compared to + * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely + * to load 0 into %fs on exit to usermode, whereas with percpu data in + * %gs, we are likely to load a non-null %gs on return to user mode. * - * As gcc requires the stack canary at %gs:20, %gs can't be managed - * lazily if stack protector is enabled, so the kernel saves and - * restores userland %gs on kernel entry and exit. This behavior is - * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in - * system.h to hide the details. + * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector + * support, we can remove some of this complexity. */ #ifndef _ASM_STACKPROTECTOR_H @@ -45,14 +38,6 @@ #include <linux/sched.h> /* - * 24 byte read-only segment initializer for stack canary. Linker - * can't handle the address bit shifting. Address will be set in - * head_32 for boot CPU and setup_per_cpu_areas() for others. - */ -#define GDT_STACK_CANARY_INIT \ - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), - -/* * Initialize the stackprotector canary value. * * NOTE: this must only be called from functions that never return @@ -86,7 +71,7 @@ static __always_inline void boot_init_stack_canary(void) #ifdef CONFIG_X86_64 this_cpu_write(fixed_percpu_data.stack_canary, canary); #else - this_cpu_write(stack_canary.canary, canary); + this_cpu_write(__stack_chk_guard, canary); #endif } @@ -95,48 +80,16 @@ static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) #ifdef CONFIG_X86_64 per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary; #else - per_cpu(stack_canary.canary, cpu) = idle->stack_canary; -#endif -} - -static inline void setup_stack_canary_segment(int cpu) -{ -#ifdef CONFIG_X86_32 - unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); - struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu); - struct desc_struct desc; - - desc = gdt_table[GDT_ENTRY_STACK_CANARY]; - set_desc_base(&desc, canary); - write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); -#endif -} - -static inline void load_stack_canary_segment(void) -{ -#ifdef CONFIG_X86_32 - asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory"); + per_cpu(__stack_chk_guard, cpu) = idle->stack_canary; #endif } #else /* STACKPROTECTOR */ -#define GDT_STACK_CANARY_INIT - /* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */ -static inline void setup_stack_canary_segment(int cpu) -{ } - static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) { } -static inline void load_stack_canary_segment(void) -{ -#ifdef CONFIG_X86_32 - asm volatile ("mov %0, %%gs" : : "r" (0)); -#endif -} - #endif /* STACKPROTECTOR */ #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index fdbd9d7b7bca..7b132d0312eb 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -13,12 +13,10 @@ /* image of the saved processor state */ struct saved_context { /* - * On x86_32, all segment registers, with the possible exception of - * gs, are saved at kernel entry in pt_regs. + * On x86_32, all segment registers except gs are saved at kernel + * entry in pt_regs. */ -#ifdef CONFIG_X86_32_LAZY_GS u16 gs; -#endif unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; bool misc_enable_saved; diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1c561945b426..772e60efe243 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -269,7 +269,9 @@ struct vmcb_save_area { * SEV-ES guests when referenced through the GHCB or for * saving to the host save area. */ - u8 reserved_7[80]; + u8 reserved_7[72]; + u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ + u8 reserved_7b[4]; u32 pkru; u8 reserved_7a[20]; u64 reserved_8; /* rax already available at 0x01f8 */ diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 9f69cc497f4b..b5f0d2ff47e4 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -71,12 +71,7 @@ static inline void update_task_stack(struct task_struct *task) else this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0); #else - /* - * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That - * doesn't work on x86-32 because sp1 and - * cpu_current_top_of_stack have different values (because of - * the non-zero stack-padding on 32bit). - */ + /* Xen PV enters the kernel on the thread stack. */ if (static_cpu_has(X86_FEATURE_XENPV)) load_sp0(task_top_of_stack(task)); #endif diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index a84333adeef2..80c08c7d5e72 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); } #define __COND_SYSCALL(abi, name) \ + __weak long __##abi##_##name(const struct pt_regs *__unused); \ __weak long __##abi##_##name(const struct pt_regs *__unused) \ { \ return sys_ni_syscall(); \ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 0d751d5da702..de406d93b515 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -197,18 +197,25 @@ static inline int arch_within_stack_frames(const void * const stack, #endif } -#else /* !__ASSEMBLY__ */ - -#ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1) -#endif +#endif /* !__ASSEMBLY__ */ -#endif +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ +#ifndef __ASSEMBLY__ #ifdef CONFIG_COMPAT #define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */ + +#define arch_set_restart_data(restart) \ + do { restart->arch_data = current_thread_info()->status; } while (0) + #endif -#ifndef __ASSEMBLY__ #ifdef CONFIG_X86_32 #define in_ia32_syscall() true diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 8c87a2e0b660..fa952eadbc2e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -90,23 +90,6 @@ struct tlb_state { u16 next_asid; /* - * We can be in one of several states: - * - * - Actively using an mm. Our CPU's bit will be set in - * mm_cpumask(loaded_mm) and is_lazy == false; - * - * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit - * will not be set in mm_cpumask(&init_mm) and is_lazy == false. - * - * - Lazily using a real mm. loaded_mm != &init_mm, our bit - * is set in mm_cpumask(loaded_mm), but is_lazy == true. - * We're heuristically guessing that the CR3 load we - * skipped more than makes up for the overhead added by - * lazy mode. - */ - bool is_lazy; - - /* * If set we changed the page tables in such a way that we * needed an invalidation of all contexts (aka. PCIDs / ASIDs). * This tells us to go invalidate all the non-loaded ctxs[] @@ -151,7 +134,27 @@ struct tlb_state { */ struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; }; -DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); +DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate); + +struct tlb_state_shared { + /* + * We can be in one of several states: + * + * - Actively using an mm. Our CPU's bit will be set in + * mm_cpumask(loaded_mm) and is_lazy == false; + * + * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit + * will not be set in mm_cpumask(&init_mm) and is_lazy == false. + * + * - Lazily using a real mm. loaded_mm != &init_mm, our bit + * is set in mm_cpumask(loaded_mm), but is_lazy == true. + * We're heuristically guessing that the CR3 load we + * skipped more than makes up for the overhead added by + * lazy mode. + */ + bool is_lazy; +}; +DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); bool nmi_uaccess_okay(void); #define nmi_uaccess_okay nmi_uaccess_okay @@ -175,7 +178,7 @@ extern void initialize_tlbstate_and_flush(void); * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus + * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. @@ -201,14 +204,15 @@ struct flush_tlb_info { unsigned long start; unsigned long end; u64 new_tlb_gen; - unsigned int stride_shift; - bool freed_tables; + unsigned int initiating_cpu; + u8 stride_shift; + u8 freed_tables; }; void flush_tlb_local(void); void flush_tlb_one_user(unsigned long addr); void flush_tlb_one_kernel(unsigned long addr); -void flush_tlb_others(const struct cpumask *cpumask, +void flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h index 4d705cb4d63b..a8e5a7a2b460 100644 --- a/arch/x86/include/asm/trace/hyperv.h +++ b/arch/x86/include/asm/trace/hyperv.h @@ -8,7 +8,7 @@ #if IS_ENABLED(CONFIG_HYPERV) -TRACE_EVENT(hyperv_mmu_flush_tlb_others, +TRACE_EVENT(hyperv_mmu_flush_tlb_multi, TP_PROTO(const struct cpumask *cpus, const struct flush_tlb_info *info), TP_ARGS(cpus, info), diff --git a/arch/x86/include/asm/uv/uv_geo.h b/arch/x86/include/asm/uv/uv_geo.h index f241451035fb..027a9258dbca 100644 --- a/arch/x86/include/asm/uv/uv_geo.h +++ b/arch/x86/include/asm/uv/uv_geo.h @@ -10,7 +10,7 @@ #ifndef _ASM_UV_GEO_H #define _ASM_UV_GEO_H -/* Type declaractions */ +/* Type declarations */ /* Size of a geoid_s structure (must be before decl. of geoid_u) */ #define GEOID_SIZE 8 diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 5002f52be332..d3e3197917be 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -353,7 +353,7 @@ union uvh_apicid { * * Note there are NO leds on a UV system. This register is only * used by the system controller to monitor system-wide operation. - * There are 64 regs per node. With Nahelem cpus (2 cores per node, + * There are 64 regs per node. With Nehalem cpus (2 cores per node, * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on * a node. * diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index df01d7349d79..1936f21ed8cd 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -58,7 +58,8 @@ extern struct ms_hyperv_tsc_page hvclock_page #endif #ifdef CONFIG_TIME_NS -static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) { return __timens_vdso_data; } diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index 29837740b520..49ce331f3ac6 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,6 +1,26 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H +#include <asm/cpufeature.h> +#include <asm/page.h> #include <asm/pgtable_areas.h> +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifdef CONFIG_X86_64 +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return boot_cpu_has(X86_FEATURE_GBPAGES); +} +#endif + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return boot_cpu_has(X86_FEATURE_PSE); +} + +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 358707f60d99..0ffaa3156a4e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -373,6 +373,7 @@ enum vmcs_field { #define GUEST_INTR_STATE_MOV_SS 0x00000002 #define GUEST_INTR_STATE_SMI 0x00000004 #define GUEST_INTR_STATE_NMI 0x00000008 +#define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010 /* GUEST_ACTIVITY_STATE flags */ #define GUEST_ACTIVITY_ACTIVE 0 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 7068e4bb057d..1a162e559753 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -87,18 +87,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, #endif /* - * The maximum amount of extra memory compared to the base size. The - * main scaling factor is the size of struct page. At extreme ratios - * of base:extra, all the base memory can be filled with page - * structures for the extra memory, leaving no space for anything - * else. - * - * 10x seems like a reasonable balance between scaling flexibility and - * leaving a practically usable system. - */ -#define XEN_EXTRA_MEM_RATIO (10) - -/* * Helper functions to write or read unsigned long values to/from * memory, when the access may fault. */ diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 600a141c8805..b25d3f82c2f3 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -234,7 +234,7 @@ struct boot_params { * handling of page tables. * * These enums should only ever be used by x86 code, and the code that uses - * it should be well contained and compartamentalized. + * it should be well contained and compartmentalized. * * KVM and Xen HVM do not have a subarch as these are expected to follow * standard x86 boot entries. If there is a genuine need for "hypervisor" type @@ -252,7 +252,7 @@ struct boot_params { * @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path, * which start at asm startup_xen() entry point and later jump to the C * xen_start_kernel() entry point. Both domU and dom0 type of guests are - * currently supportd through this PV boot path. + * currently supported through this PV boot path. * @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform * systems which do not have the PCI legacy interfaces. * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h index d95d080b30e3..0007ba077c0c 100644 --- a/arch/x86/include/uapi/asm/debugreg.h +++ b/arch/x86/include/uapi/asm/debugreg.h @@ -24,6 +24,7 @@ #define DR_TRAP3 (0x8) /* db3 */ #define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) +#define DR_BUS_LOCK (0x800) /* bus_lock */ #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index b3d0664fadc9..ac83e25bbf37 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -12,7 +12,7 @@ * The msqid64_ds structure for x86 architecture with x32 ABI. * * On x86-32 and x86-64 we can just use the generic definition, but - * x32 uses the same binary layout as x86_64, which is differnet + * x32 uses the same binary layout as x86_64, which is different * from other 32-bit architectures. */ diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h index 9034f3007c4e..9690d6899ad9 100644 --- a/arch/x86/include/uapi/asm/sgx.h +++ b/arch/x86/include/uapi/asm/sgx.h @@ -152,7 +152,7 @@ struct sgx_enclave_run { * Most exceptions reported on ENCLU, including those that occur within the * enclave, are fixed up and reported synchronously instead of being delivered * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are - * never fixed up and are always delivered via standard signals. On synchrously + * never fixed up and are always delivered via standard signals. On synchronously * reported exceptions, -EFAULT is returned and details about the exception are * recorded in @run.exception, the optional sgx_enclave_exception struct. * diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index f0305dc660c9..fce18eaa070c 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -9,7 +9,7 @@ * The shmid64_ds structure for x86 architecture with x32 ABI. * * On x86-32 and x86-64 we can just use the generic definition, but - * x32 uses the same binary layout as x86_64, which is differnet + * x32 uses the same binary layout as x86_64, which is different * from other 32-bit architectures. */ diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h index 844d60eb1882..d0d9b331d3a1 100644 --- a/arch/x86/include/uapi/asm/sigcontext.h +++ b/arch/x86/include/uapi/asm/sigcontext.h @@ -139,7 +139,7 @@ struct _fpstate_32 { * The 64-bit FPU frame. (FXSAVE format and later) * * Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is - * larger: 'struct _xstate'. Note that 'struct _xstate' embedds + * larger: 'struct _xstate'. Note that 'struct _xstate' embeds * 'struct _fpstate' so that you can always assume the _fpstate portion * exists so that you can check the magic value. * diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index b8e650a985e3..946d761adbd3 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -27,6 +27,7 @@ #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 +#define VMX_EXIT_REASONS_SGX_ENCLAVE_MODE 0x08000000 #define EXIT_REASON_EXCEPTION_NMI 0 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 2ddf08351f0b..0704c2a94272 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -35,7 +35,6 @@ KASAN_SANITIZE_sev-es.o := n KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD_test_nx.o := y -OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y ifdef CONFIG_FRAME_POINTER OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y @@ -121,7 +120,7 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o -obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7bdc0239a943..e90310cbe73a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -830,7 +830,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) EXPORT_SYMBOL(acpi_unregister_ioapic); /** - * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base + * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base * has been registered * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC @@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void) /* * Initialize the ACPI boot-time table parser. */ - if (acpi_table_init()) { + if (acpi_locate_initial_tables()) disable_acpi(); - return; - } + else + acpi_reserve_initial_tables(); +} + +int __init early_acpi_boot_init(void) +{ + if (acpi_disabled) + return 1; + + acpi_table_init_complete(); acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); @@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void) } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); - return; + return 1; } } -} - -int __init early_acpi_boot_init(void) -{ - /* - * If acpi_disabled, bail out - */ - if (acpi_disabled) - return 1; /* * Process the Multiple APIC Description Table (MADT), if present @@ -1657,7 +1656,7 @@ static int __init parse_acpi(char *arg) else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); } - /* "acpi=copy_dsdt" copys DSDT */ + /* "acpi=copy_dsdt" copies DSDT */ else if (strcmp(arg, "copy_dsdt") == 0) { acpi_gbl_copy_dsdt_locally = 1; } diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index cc1fea76aab0..3f85fcae450c 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -41,7 +41,7 @@ unsigned long acpi_get_wakeup_address(void) * x86_acpi_enter_sleep_state - enter sleep state * @state: Sleep state to enter. * - * Wrapper around acpi_enter_sleep_state() to be called by assmebly. + * Wrapper around acpi_enter_sleep_state() to be called by assembly. */ asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state) { diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 56b6865afb2a..d5d8a352eafa 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel) movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 -#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 8d778e46725d..6974b5174495 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -28,6 +28,7 @@ #include <asm/insn.h> #include <asm/io.h> #include <asm/fixmap.h> +#include <asm/paravirt.h> int __read_mostly alternatives_patched; @@ -74,186 +75,30 @@ do { \ } \ } while (0) -/* - * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes - * that correspond to that nop. Getting from one nop to the next, we - * add to the array the offset that is equal to the sum of all sizes of - * nops preceding the one we are after. - * - * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the - * nice symmetry of sizes of the previous nops. - */ -#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) -static const unsigned char intelnops[] = -{ - GENERIC_NOP1, - GENERIC_NOP2, - GENERIC_NOP3, - GENERIC_NOP4, - GENERIC_NOP5, - GENERIC_NOP6, - GENERIC_NOP7, - GENERIC_NOP8, - GENERIC_NOP5_ATOMIC -}; -static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = +const unsigned char x86nops[] = { - NULL, - intelnops, - intelnops + 1, - intelnops + 1 + 2, - intelnops + 1 + 2 + 3, - intelnops + 1 + 2 + 3 + 4, - intelnops + 1 + 2 + 3 + 4 + 5, - intelnops + 1 + 2 + 3 + 4 + 5 + 6, - intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, + BYTES_NOP1, + BYTES_NOP2, + BYTES_NOP3, + BYTES_NOP4, + BYTES_NOP5, + BYTES_NOP6, + BYTES_NOP7, + BYTES_NOP8, }; -#endif -#ifdef K8_NOP1 -static const unsigned char k8nops[] = -{ - K8_NOP1, - K8_NOP2, - K8_NOP3, - K8_NOP4, - K8_NOP5, - K8_NOP6, - K8_NOP7, - K8_NOP8, - K8_NOP5_ATOMIC -}; -static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = +const unsigned char * const x86_nops[ASM_NOP_MAX+1] = { NULL, - k8nops, - k8nops + 1, - k8nops + 1 + 2, - k8nops + 1 + 2 + 3, - k8nops + 1 + 2 + 3 + 4, - k8nops + 1 + 2 + 3 + 4 + 5, - k8nops + 1 + 2 + 3 + 4 + 5 + 6, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, + x86nops, + x86nops + 1, + x86nops + 1 + 2, + x86nops + 1 + 2 + 3, + x86nops + 1 + 2 + 3 + 4, + x86nops + 1 + 2 + 3 + 4 + 5, + x86nops + 1 + 2 + 3 + 4 + 5 + 6, + x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, }; -#endif - -#if defined(K7_NOP1) && !defined(CONFIG_X86_64) -static const unsigned char k7nops[] = -{ - K7_NOP1, - K7_NOP2, - K7_NOP3, - K7_NOP4, - K7_NOP5, - K7_NOP6, - K7_NOP7, - K7_NOP8, - K7_NOP5_ATOMIC -}; -static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = -{ - NULL, - k7nops, - k7nops + 1, - k7nops + 1 + 2, - k7nops + 1 + 2 + 3, - k7nops + 1 + 2 + 3 + 4, - k7nops + 1 + 2 + 3 + 4 + 5, - k7nops + 1 + 2 + 3 + 4 + 5 + 6, - k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, -}; -#endif - -#ifdef P6_NOP1 -static const unsigned char p6nops[] = -{ - P6_NOP1, - P6_NOP2, - P6_NOP3, - P6_NOP4, - P6_NOP5, - P6_NOP6, - P6_NOP7, - P6_NOP8, - P6_NOP5_ATOMIC -}; -static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = -{ - NULL, - p6nops, - p6nops + 1, - p6nops + 1 + 2, - p6nops + 1 + 2 + 3, - p6nops + 1 + 2 + 3 + 4, - p6nops + 1 + 2 + 3 + 4 + 5, - p6nops + 1 + 2 + 3 + 4 + 5 + 6, - p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, -}; -#endif - -/* Initialize these to a safe default */ -#ifdef CONFIG_X86_64 -const unsigned char * const *ideal_nops = p6_nops; -#else -const unsigned char * const *ideal_nops = intel_nops; -#endif - -void __init arch_init_ideal_nops(void) -{ - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - /* - * Due to a decoder implementation quirk, some - * specific Intel CPUs actually perform better with - * the "k8_nops" than with the SDM-recommended NOPs. - */ - if (boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 0x0f && - boot_cpu_data.x86_model != 0x1c && - boot_cpu_data.x86_model != 0x26 && - boot_cpu_data.x86_model != 0x27 && - boot_cpu_data.x86_model < 0x30) { - ideal_nops = k8_nops; - } else if (boot_cpu_has(X86_FEATURE_NOPL)) { - ideal_nops = p6_nops; - } else { -#ifdef CONFIG_X86_64 - ideal_nops = k8_nops; -#else - ideal_nops = intel_nops; -#endif - } - break; - - case X86_VENDOR_HYGON: - ideal_nops = p6_nops; - return; - - case X86_VENDOR_AMD: - if (boot_cpu_data.x86 > 0xf) { - ideal_nops = p6_nops; - return; - } - - fallthrough; - - default: -#ifdef CONFIG_X86_64 - ideal_nops = k8_nops; -#else - if (boot_cpu_has(X86_FEATURE_K8)) - ideal_nops = k8_nops; - else if (boot_cpu_has(X86_FEATURE_K7)) - ideal_nops = k7_nops; - else - ideal_nops = intel_nops; -#endif - } -} /* Use this to add nops to a buffer, then text_poke the whole buffer. */ static void __init_or_module add_nops(void *insns, unsigned int len) @@ -262,7 +107,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) unsigned int noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; - memcpy(insns, ideal_nops[noplen], noplen); + memcpy(insns, x86_nops[noplen], noplen); insns += noplen; len -= noplen; } @@ -344,19 +189,35 @@ done: static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) { unsigned long flags; - int i; + struct insn insn; + int nop, i = 0; + + /* + * Jump over the non-NOP insns, the remaining bytes must be single-byte + * NOPs, optimize them. + */ + for (;;) { + if (insn_decode_kernel(&insn, &instr[i])) + return; + + if (insn.length == 1 && insn.opcode.bytes[0] == 0x90) + break; + + if ((i += insn.length) >= a->instrlen) + return; + } - for (i = 0; i < a->padlen; i++) { - if (instr[i] != 0x90) + for (nop = i; i < a->instrlen; i++) { + if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i])) return; } local_irq_save(flags); - add_nops(instr + (a->instrlen - a->padlen), a->padlen); + add_nops(instr + nop, i - nop); local_irq_restore(flags); DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", - instr, a->instrlen - a->padlen, a->padlen); + instr, nop, a->instrlen); } /* @@ -388,23 +249,29 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, */ for (a = start; a < end; a++) { int insn_buff_sz = 0; + /* Mask away "NOT" flag bit for feature to test. */ + u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV; instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; BUG_ON(a->instrlen > sizeof(insn_buff)); - BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); - if (!boot_cpu_has(a->cpuid)) { - if (a->padlen > 1) - optimize_nops(a, instr); + BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32); - continue; - } + /* + * Patch if either: + * - feature is present + * - feature not present but ALTINSTR_FLAG_INV is set to mean, + * patch if feature is *NOT* present. + */ + if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) + goto next; - DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", - a->cpuid >> 5, - a->cpuid & 0x1f, + DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)", + (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "", + feature >> 5, + feature & 0x1f, instr, instr, a->instrlen, - replacement, a->replacementlen, a->padlen); + replacement, a->replacementlen); DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); @@ -428,14 +295,15 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, if (a->replacementlen && is_jmp(replacement[0])) recompute_jump(a, instr, replacement, insn_buff); - if (a->instrlen > a->replacementlen) { - add_nops(insn_buff + a->replacementlen, - a->instrlen - a->replacementlen); - insn_buff_sz += a->instrlen - a->replacementlen; - } + for (; insn_buff_sz < a->instrlen; insn_buff_sz++) + insn_buff[insn_buff_sz] = 0x90; + DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr); text_poke_early(instr, insn_buff, insn_buff_sz); + +next: + optimize_nops(a, instr); } } @@ -605,7 +473,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insn_buff, p->instr, p->len); - used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len); + used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); @@ -723,6 +591,33 @@ void __init alternative_instructions(void) * patching. */ + /* + * Paravirt patching and alternative patching can be combined to + * replace a function call with a short direct code sequence (e.g. + * by setting a constant return value instead of doing that in an + * external function). + * In order to make this work the following sequence is required: + * 1. set (artificial) features depending on used paravirt + * functions which can later influence alternative patching + * 2. apply paravirt patching (generally replacing an indirect + * function call with a direct one) + * 3. apply alternative patching (e.g. replacing a direct function + * call with a custom code sequence) + * Doing paravirt patching after alternative patching would clobber + * the optimization of the custom code with a function call again. + */ + paravirt_set_cap(); + + /* + * First patch paravirt functions, such that we overwrite the indirect + * call with the direct call. + */ + apply_paravirt(__parainstructions, __parainstructions_end); + + /* + * Then patch alternatives, such that those paravirt calls that are in + * alternatives can be overwritten by their immediate fragments. + */ apply_alternatives(__alt_instructions, __alt_instructions_end); #ifdef CONFIG_SMP @@ -741,8 +636,6 @@ void __init alternative_instructions(void) } #endif - apply_paravirt(__parainstructions, __parainstructions_end); - restart_nmi(); alternatives_patched = 1; } @@ -813,7 +706,7 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) * with a stale address space WITHOUT being in lazy mode after * restoring the previous mm. */ - if (this_cpu_read(cpu_tlbstate.is_lazy)) + if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) leave_mm(smp_processor_id()); temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); @@ -1274,15 +1167,15 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, const void *opcode, size_t len, const void *emulate) { struct insn insn; + int ret; memcpy((void *)tp->text, opcode, len); if (!emulate) emulate = opcode; - kernel_insn_init(&insn, emulate, MAX_INSN_SIZE); - insn_get_length(&insn); + ret = insn_decode_kernel(&insn, emulate); - BUG_ON(!insn_complete(&insn)); + BUG_ON(ret < 0); BUG_ON(len != insn.length); tp->rel_addr = addr - (void *)_stext; @@ -1302,13 +1195,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, default: /* assume NOP */ switch (len) { case 2: /* NOP2 -- emulate as JMP8+0 */ - BUG_ON(memcmp(emulate, ideal_nops[len], len)); + BUG_ON(memcmp(emulate, x86_nops[len], len)); tp->opcode = JMP8_INSN_OPCODE; tp->rel32 = 0; break; case 5: /* NOP5 -- emulate as JMP32+0 */ - BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len)); + BUG_ON(memcmp(emulate, x86_nops[len], len)); tp->opcode = JMP32_INSN_OPCODE; tp->rel32 = 0; break; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index b4396952c9a6..09083094eb57 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Shared support code for AMD K8 northbridges and derivates. + * Shared support code for AMD K8 northbridges and derivatives. * Copyright 2006 Andi Kleen, SUSE Labs. */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index bda4f2a36868..4a39fb429f15 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -619,7 +619,7 @@ static void setup_APIC_timer(void) if (this_cpu_has(X86_FEATURE_ARAT)) { lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; - /* Make LAPIC timer preferrable over percpu HPET */ + /* Make LAPIC timer preferable over percpu HPET */ lapic_clockevent.rating = 150; } @@ -666,7 +666,7 @@ void lapic_update_tsc_freq(void) * In this functions we calibrate APIC bus clocks to the external timer. * * We want to do the calibration only once since we want to have local timer - * irqs syncron. CPUs connected by the same APIC bus have the very same bus + * irqs synchronous. CPUs connected by the same APIC bus have the very same bus * frequency. * * This was previously done by reading the PIT/HPET and waiting for a wrap @@ -1532,7 +1532,7 @@ static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) * Most probably by now the CPU has serviced that pending interrupt and it * might not have done the ack_APIC_irq() because it thought, interrupt * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear - * the ISR bit and cpu thinks it has already serivced the interrupt. Hence + * the ISR bit and cpu thinks it has already serviced the interrupt. Hence * a vector might get locked. It was noticed for timer irq (vector * 0x31). Issue an extra EOI to clear ISR. * @@ -1657,7 +1657,7 @@ static void setup_local_APIC(void) */ /* * Actually disabling the focus CPU check just makes the hang less - * frequent as it makes the interrupt distributon model be more + * frequent as it makes the interrupt distribution model be more * like LRU than MRU (the short-term load is more even across CPUs). */ @@ -1875,7 +1875,7 @@ static __init void try_to_enable_x2apic(int remap_mode) /* * Without IR, all CPUs can be addressed by IOAPIC/MSI only - * in physical mode, and CPUs with an APIC ID that cannnot + * in physical mode, and CPUs with an APIC ID that cannot * be addressed must not be brought online. */ x2apic_set_max_apicid(apic_limit); @@ -2342,6 +2342,11 @@ static int cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = -1, }; +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return phys_id == cpuid_to_apicid[cpu]; +} + #ifdef CONFIG_SMP /** * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index c3b60c37c728..d5c691a3208b 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -928,7 +928,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) /* * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger - * and polarity attirbutes. So allow the first user to reprogram the + * and polarity attributes. So allow the first user to reprogram the * pin with real trigger and polarity attributes. */ if (irq < nr_legacy_irqs() && data->count == 1) { @@ -994,7 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain, /* * Legacy ISA IRQ has already been allocated, just add pin to - * the pin list assoicated with this IRQ and program the IOAPIC + * the pin list associated with this IRQ and program the IOAPIC * entry. The IOAPIC entry */ if (irq_data && irq_data->parent_data) { @@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; legacy = mp_is_legacy_irq(irq); + /* + * IRQ2 is unusable for historical reasons on systems which + * have a legacy PIC. See the comment vs. IRQ2 further down. + * + * If this gets removed at some point then the related code + * in lapic_assign_system_vectors() needs to be adjusted as + * well. + */ + if (legacy && irq == PIC_CASCADE_IR) + return -EINVAL; } mutex_lock(&ioapic_mutex); @@ -1742,7 +1752,7 @@ static inline void ioapic_finish_move(struct irq_data *data, bool moveit) * with masking the ioapic entry and then polling until * Remote IRR was clear before reprogramming the * ioapic I don't trust the Remote IRR bit to be - * completey accurate. + * completely accurate. * * However there appears to be no other way to plug * this race, so if the Remote IRR bit is not @@ -1820,7 +1830,7 @@ static void ioapic_ack_level(struct irq_data *irq_data) /* * Tail end of clearing remote IRR bit (either by delivering the EOI * message via io-apic EOI register write or simulating it using - * mask+edge followed by unnask+level logic) manually when the + * mask+edge followed by unmask+level logic) manually when the * level triggered interrupt is seen as the edge triggered interrupt * at the cpu. */ diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 3c9c7492252f..6dbdc7c22bb7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -543,6 +543,14 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) return -ENOSYS; + /* + * Catch any attempt to touch the cascade interrupt on a PIC + * equipped system. + */ + if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && + virq == PIC_CASCADE_IR)) + return -EINVAL; + for (i = 0; i < nr_irqs; i++) { irqd = irq_domain_get_irq_data(domain, virq + i); BUG_ON(!irqd); @@ -745,6 +753,11 @@ void __init lapic_assign_system_vectors(void) /* Mark the preallocated legacy interrupts */ for (i = 0; i < nr_legacy_irqs(); i++) { + /* + * Don't touch the cascade interrupt. It's unusable + * on PIC equipped machines. See the large comment + * in the IO/APIC code. + */ if (i != PIC_CASCADE_IR) irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i)); } @@ -1045,7 +1058,7 @@ void irq_force_complete_move(struct irq_desc *desc) * * But in case of cpu hotplug this should be a non issue * because if the affinity update happens right before all - * cpus rendevouz in stop machine, there is no way that the + * cpus rendezvous in stop machine, there is no way that the * interrupt can be blocked on the target cpu because all cpus * loops first with interrupts enabled in stop machine, so the * old vector is not yet cleaned up when the interrupt fires. @@ -1054,7 +1067,7 @@ void irq_force_complete_move(struct irq_desc *desc) * of the interrupt on the apic/system bus would be delayed * beyond the point where the target cpu disables interrupts * in stop machine. I doubt that it can happen, but at least - * there is a theroretical chance. Virtualization might be + * there is a theoretical chance. Virtualization might be * able to expose this, but AFAICT the IOAPIC emulation is not * as stupid as the real hardware. * diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 52bc217ca8c3..f5a48e66e4f5 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -369,6 +369,15 @@ static int __init early_get_arch_type(void) return ret; } +/* UV system found, check which APIC MODE BIOS already selected */ +static void __init early_set_apic_mode(void) +{ + if (x2apic_enabled()) + uv_system_type = UV_X2APIC; + else + uv_system_type = UV_LEGACY_APIC; +} + static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id) { /* Save OEM_ID passed from ACPI MADT */ @@ -404,11 +413,12 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id) else uv_hubless_system |= 0x8; - /* Copy APIC type */ + /* Copy OEM Table ID */ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n", oem_id, oem_table_id, uv_system_type, uv_hubless_system); + return 0; } @@ -453,6 +463,7 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id) early_set_hub_type(); /* Other UV setup functions */ + early_set_apic_mode(); early_get_pnodeid(); early_get_apic_socketid_shift(); x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; @@ -472,29 +483,14 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) if (uv_set_system_type(_oem_id, _oem_table_id) == 0) return 0; - /* Save and Decode OEM Table ID */ + /* Save for display of the OEM Table ID */ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); - /* This is the most common hardware variant, x2apic mode */ - if (!strcmp(oem_table_id, "UVX")) - uv_system_type = UV_X2APIC; - - /* Only used for very small systems, usually 1 chassis, legacy mode */ - else if (!strcmp(oem_table_id, "UVL")) - uv_system_type = UV_LEGACY_APIC; - - else - goto badbios; - pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n", oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY), uv_min_hub_revision_id); return 0; - -badbios: - pr_err("UV: UVarchtype:%s not supported\n", uv_archtype); - BUG(); } enum uv_system_type get_uv_system_type(void) @@ -1671,6 +1667,9 @@ static __init int uv_system_init_hubless(void) if (rc < 0) return rc; + /* Set section block size for current node memory */ + set_block_size(); + /* Create user access node */ if (rc >= 0) uv_setup_proc_files(1); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 660270359d39..241dda687eb9 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -94,7 +94,7 @@ * Remove APM dependencies in arch/i386/kernel/process.c * Remove APM dependencies in drivers/char/sysrq.c * Reset time across standby. - * Allow more inititialisation on SMP. + * Allow more initialisation on SMP. * Remove CONFIG_APM_POWER_OFF and make it boot time * configurable (default on). * Make debug only a boot time parameter (remove APM_DEBUG). @@ -766,7 +766,7 @@ static int apm_driver_version(u_short *val) * not cleared until it is acknowledged. * * Additional information is returned in the info pointer, providing - * that APM 1.2 is in use. If no messges are pending the value 0x80 + * that APM 1.2 is in use. If no messages are pending the value 0x80 * is returned (No power management events pending). */ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) @@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable) * status which gives the rough battery status, and current power * source. The bat value returned give an estimate as a percentage * of life and a status value for the battery. The estimated life - * if reported is a lifetime in secodnds/minutes at current powwer + * if reported is a lifetime in seconds/minutes at current power * consumption. */ diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 60b9f42ce3c1..ecd3fd6993d1 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -61,13 +61,6 @@ static void __used common(void) OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); #endif -#ifdef CONFIG_PARAVIRT_XXL - BLANK(); - OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); - OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); - OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); -#endif - #ifdef CONFIG_XEN BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 6e043f295a60..2b411cd00a4e 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -53,11 +53,6 @@ void foo(void) offsetof(struct cpu_entry_area, tss.x86_tss.sp1) - offsetofend(struct cpu_entry_area, entry_stack_page.stack)); -#ifdef CONFIG_STACKPROTECTOR - BLANK(); - OFFSET(stack_canary_offset, stack_canary, canary); -#endif - BLANK(); DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map)); } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 347a956f71ca..2d11384dc9ab 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -628,11 +628,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) early_init_amd_mc(c); -#ifdef CONFIG_X86_32 - if (c->x86 == 6) - set_cpu_cap(c, X86_FEATURE_K7); -#endif - if (c->x86 >= 0xf) set_cpu_cap(c, X86_FEATURE_K8); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 3ca9be482a9e..d66af2950e06 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -877,7 +877,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) static int __cache_amd_cpumap_setup(unsigned int cpu, int index, struct _cpuid4_info_regs *base) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cpu_cacheinfo *this_cpu_ci; struct cacheinfo *this_leaf; int i, sibling; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ab640abe26b6..a1b756c49a93 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -161,7 +161,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - GDT_STACK_CANARY_INIT #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); @@ -482,7 +481,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c) if (pk) pk->pkru = init_pkru_value; /* - * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE + * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE * cpuid bit to be set. We need to ensure that we * update that bit in this CPU's "cpu_info". */ @@ -599,7 +598,6 @@ void load_percpu_segment(int cpu) __loadsegment_simple(gs, 0); wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); #endif - load_stack_canary_segment(); } #ifdef CONFIG_X86_32 @@ -1330,7 +1328,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) cpu_set_bug_bits(c); - cpu_set_core_cap_bits(c); + sld_setup(c); fpu__init_system(c); @@ -1404,7 +1402,7 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c) * where GS is unused by the prev and next threads. * * Since neither vendor documents this anywhere that I can see, - * detect it directly instead of hardcoding the choice by + * detect it directly instead of hard-coding the choice by * vendor. * * I've designated AMD's behavior as the "bug" because it's @@ -1748,6 +1746,8 @@ DEFINE_PER_CPU(bool, hardirq_stack_inuse); DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); +DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK; + /* May not be marked __init: used by software suspend */ void syscall_init(void) { @@ -1796,7 +1796,8 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); #ifdef CONFIG_STACKPROTECTOR -DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); +DEFINE_PER_CPU(unsigned long, __stack_chk_guard); +EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); #endif #endif /* CONFIG_X86_64 */ @@ -1850,8 +1851,8 @@ static inline void setup_getcpu(int cpu) unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); struct desc_struct d = { }; - if (boot_cpu_has(X86_FEATURE_RDTSCP)) - write_rdtscp_aux(cpudata); + if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) + wrmsr(MSR_TSC_AUX, cpudata, 0); /* Store CPU and node number in limit. */ d.limit0 = cpudata; diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 42af31b64c2c..defda61f372d 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -72,6 +72,9 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AVX512_FP16, X86_FEATURE_AVX512BW }, { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES }, { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA }, + { X86_FEATURE_SGX_LC, X86_FEATURE_SGX }, + { X86_FEATURE_SGX1, X86_FEATURE_SGX }, + { X86_FEATURE_SGX2, X86_FEATURE_SGX1 }, {} }; diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 1d9b8aaea06c..7227c15299d0 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -291,7 +291,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) mark_tsc_unstable("cyrix 5510/5520 detected"); } #endif - c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ + c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */ /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 3b1b01f2b248..da696eb4821a 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -93,15 +93,9 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ -static void clear_sgx_caps(void) -{ - setup_clear_cpu_cap(X86_FEATURE_SGX); - setup_clear_cpu_cap(X86_FEATURE_SGX_LC); -} - static int __init nosgx(char *str) { - clear_sgx_caps(); + setup_clear_cpu_cap(X86_FEATURE_SGX); return 0; } @@ -110,23 +104,30 @@ early_param("nosgx", nosgx); void init_ia32_feat_ctl(struct cpuinfo_x86 *c) { + bool enable_sgx_kvm = false, enable_sgx_driver = false; bool tboot = tboot_enabled(); - bool enable_sgx; + bool enable_vmx; u64 msr; if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { clear_cpu_cap(c, X86_FEATURE_VMX); - clear_sgx_caps(); + clear_cpu_cap(c, X86_FEATURE_SGX); return; } - /* - * Enable SGX if and only if the kernel supports SGX and Launch Control - * is supported, i.e. disable SGX if the LE hash MSRs can't be written. - */ - enable_sgx = cpu_has(c, X86_FEATURE_SGX) && - cpu_has(c, X86_FEATURE_SGX_LC) && - IS_ENABLED(CONFIG_X86_SGX); + enable_vmx = cpu_has(c, X86_FEATURE_VMX) && + IS_ENABLED(CONFIG_KVM_INTEL); + + if (cpu_has(c, X86_FEATURE_SGX) && IS_ENABLED(CONFIG_X86_SGX)) { + /* + * Separate out SGX driver enabling from KVM. This allows KVM + * guests to use SGX even if the kernel SGX driver refuses to + * use it. This happens if flexible Launch Control is not + * available. + */ + enable_sgx_driver = cpu_has(c, X86_FEATURE_SGX_LC); + enable_sgx_kvm = enable_vmx && IS_ENABLED(CONFIG_X86_SGX_KVM); + } if (msr & FEAT_CTL_LOCKED) goto update_caps; @@ -142,15 +143,18 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) * i.e. KVM is enabled, to avoid unnecessarily adding an attack vector * for the kernel, e.g. using VMX to hide malicious code. */ - if (cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL)) { + if (enable_vmx) { msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; if (tboot) msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; } - if (enable_sgx) - msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED; + if (enable_sgx_kvm || enable_sgx_driver) { + msr |= FEAT_CTL_SGX_ENABLED; + if (enable_sgx_driver) + msr |= FEAT_CTL_SGX_LC_ENABLED; + } wrmsrl(MSR_IA32_FEAT_CTL, msr); @@ -173,10 +177,29 @@ update_caps: } update_sgx: - if (!(msr & FEAT_CTL_SGX_ENABLED) || - !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) { - if (enable_sgx) - pr_err_once("SGX disabled by BIOS\n"); - clear_sgx_caps(); + if (!(msr & FEAT_CTL_SGX_ENABLED)) { + if (enable_sgx_kvm || enable_sgx_driver) + pr_err_once("SGX disabled by BIOS.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX); + return; + } + + /* + * VMX feature bit may be cleared due to being disabled in BIOS, + * in which case SGX virtualization cannot be supported either. + */ + if (!cpu_has(c, X86_FEATURE_VMX) && enable_sgx_kvm) { + pr_err_once("SGX virtualization disabled due to lack of VMX.\n"); + enable_sgx_kvm = 0; + } + + if (!(msr & FEAT_CTL_SGX_LC_ENABLED) && enable_sgx_driver) { + if (!enable_sgx_kvm) { + pr_err_once("SGX Launch Control is locked. Disable SGX.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX); + } else { + pr_err_once("SGX Launch Control is locked. Support SGX virtualization only.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX_LC); + } } } diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index ae59115d18f9..0bd6c74e3ba1 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -215,12 +215,12 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) u32 ecx; ecx = cpuid_ecx(0x8000001e); - nodes_per_socket = ((ecx >> 8) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - nodes_per_socket = ((value >> 3) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0e422a544835..8adffc17fa8b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -44,9 +44,9 @@ enum split_lock_detect_state { }; /* - * Default to sld_off because most systems do not support split lock detection - * split_lock_setup() will switch this to sld_warn on systems that support - * split lock detect, unless there is a command line override. + * Default to sld_off because most systems do not support split lock detection. + * sld_state_setup() will switch this to sld_warn on systems that support + * split lock/bus lock detect, unless there is a command line override. */ static enum split_lock_detect_state sld_state __ro_after_init = sld_off; static u64 msr_test_ctrl_cache __ro_after_init; @@ -301,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * The operating system must reload CR3 to cause the TLB to be flushed" * * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h - * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE + * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE * to be modified. */ if (c->x86 == 5 && c->x86_model == 9) { @@ -603,6 +603,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) } static void split_lock_init(void); +static void bus_lock_init(void); static void init_intel(struct cpuinfo_x86 *c) { @@ -720,6 +721,7 @@ static void init_intel(struct cpuinfo_x86 *c) tsx_disable(); split_lock_init(); + bus_lock_init(); intel_init_thermal(c); } @@ -1020,16 +1022,15 @@ static bool split_lock_verify_msr(bool on) return ctrl == tmp; } -static void __init split_lock_setup(void) +static void __init sld_state_setup(void) { enum split_lock_detect_state state = sld_warn; char arg[20]; int i, ret; - if (!split_lock_verify_msr(false)) { - pr_info("MSR access failed: Disabled\n"); + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && + !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) return; - } ret = cmdline_find_option(boot_command_line, "split_lock_detect", arg, sizeof(arg)); @@ -1041,17 +1042,14 @@ static void __init split_lock_setup(void) } } } + sld_state = state; +} - switch (state) { - case sld_off: - pr_info("disabled\n"); +static void __init __split_lock_setup(void) +{ + if (!split_lock_verify_msr(false)) { + pr_info("MSR access failed: Disabled\n"); return; - case sld_warn: - pr_info("warning about user-space split_locks\n"); - break; - case sld_fatal: - pr_info("sending SIGBUS on user-space split_locks\n"); - break; } rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); @@ -1061,7 +1059,9 @@ static void __init split_lock_setup(void) return; } - sld_state = state; + /* Restore the MSR to its cached value. */ + wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); } @@ -1118,6 +1118,29 @@ bool handle_guest_split_lock(unsigned long ip) } EXPORT_SYMBOL_GPL(handle_guest_split_lock); +static void bus_lock_init(void) +{ + u64 val; + + /* + * Warn and fatal are handled by #AC for split lock if #AC for + * split lock is supported. + */ + if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) || + (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && + (sld_state == sld_warn || sld_state == sld_fatal)) || + sld_state == sld_off) + return; + + /* + * Enable #DB for bus lock. All bus locks are handled in #DB except + * split locks are handled in #AC in the fatal case. + */ + rdmsrl(MSR_IA32_DEBUGCTLMSR, val); + val |= DEBUGCTLMSR_BUS_LOCK_DETECT; + wrmsrl(MSR_IA32_DEBUGCTLMSR, val); +} + bool handle_user_split_lock(struct pt_regs *regs, long error_code) { if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) @@ -1126,6 +1149,21 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code) return true; } +void handle_bus_lock(struct pt_regs *regs) +{ + switch (sld_state) { + case sld_off: + break; + case sld_warn: + pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n", + current->comm, current->pid, regs->ip); + break; + case sld_fatal: + force_sig_fault(SIGBUS, BUS_ADRALN, NULL); + break; + } +} + /* * This function is called only when switching between tasks with * different split-lock detection modes. It sets the MSR for the @@ -1166,7 +1204,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { {} }; -void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) +static void __init split_lock_setup(struct cpuinfo_x86 *c) { const struct x86_cpu_id *m; u64 ia32_core_caps; @@ -1193,5 +1231,56 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) } cpu_model_supports_sld = true; - split_lock_setup(); + __split_lock_setup(); +} + +static void sld_state_show(void) +{ + if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && + !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) + return; + + switch (sld_state) { + case sld_off: + pr_info("disabled\n"); + break; + case sld_warn: + if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) + pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); + else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) + pr_info("#DB: warning on user-space bus_locks\n"); + break; + case sld_fatal: + if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { + pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n"); + } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { + pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n", + boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ? + " from non-WB" : ""); + } + break; + } +} + +void __init sld_setup(struct cpuinfo_x86 *c) +{ + split_lock_setup(c); + sld_state_setup(); + sld_state_show(); +} + +#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24 + +/** + * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU + * + * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in + * a hybrid processor. If the processor is not hybrid, returns 0. + */ +u8 get_this_hybrid_cpu_type(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + return 0; + + return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT; } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 7962355436da..bf7fe87a7e88 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -529,7 +529,7 @@ static void mce_irq_work_cb(struct irq_work *entry) * Check if the address reported by the CPU is in a format we can parse. * It would be possible to add code for most other cases, but all would * be somewhat complicated (e.g. segment offset would require an instruction - * parser). So only support physical addresses up to page granuality for now. + * parser). So only support physical addresses up to page granularity for now. */ int mce_usable_address(struct mce *m) { diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 7b360731fc2d..4e86d97f9653 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -74,6 +74,7 @@ MCE_INJECT_SET(status); MCE_INJECT_SET(misc); MCE_INJECT_SET(addr); MCE_INJECT_SET(synd); +MCE_INJECT_SET(ipid); #define MCE_INJECT_GET(reg) \ static int inj_##reg##_get(void *data, u64 *val) \ @@ -88,11 +89,13 @@ MCE_INJECT_GET(status); MCE_INJECT_GET(misc); MCE_INJECT_GET(addr); MCE_INJECT_GET(synd); +MCE_INJECT_GET(ipid); DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n"); static void setup_inj_struct(struct mce *m) { @@ -629,6 +632,8 @@ static const char readme_msg[] = "\t is present in hardware. \n" "\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n" "\t APIC interrupt handler to handle the error. \n" +"\n" +"ipid:\t IPID (AMD-specific)\n" "\n"; static ssize_t @@ -652,6 +657,7 @@ static struct dfs_node { { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "synd", .fops = &synd_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "ipid", .fops = &ipid_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR }, diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index e309476743b7..acfd5d9f93c6 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ICELAKE_X: + case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index 83df991314c5..17e631443116 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -142,7 +142,7 @@ static struct severity { MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR) ), MCESEV( - KEEP, "Non signalled machine check", + KEEP, "Non signaled machine check", SER, BITCLR(MCI_STATUS_S) ), @@ -218,15 +218,15 @@ static struct severity { static bool is_copy_from_user(struct pt_regs *regs) { u8 insn_buf[MAX_INSN_SIZE]; - struct insn insn; unsigned long addr; + struct insn insn; + int ret; if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) return false; - kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); - insn_get_opcode(&insn); - if (!insn.opcode.got) + ret = insn_decode_kernel(&insn, insn_buf); + if (ret < 0) return false; switch (insn.opcode.value) { @@ -234,10 +234,6 @@ static bool is_copy_from_user(struct pt_regs *regs) case 0x8A: case 0x8B: /* MOVZ mem,reg */ case 0xB60F: case 0xB70F: - insn_get_modrm(&insn); - insn_get_sib(&insn); - if (!insn.modrm.got || !insn.sib.got) - return false; addr = (unsigned long)insn_get_addr_ref(&insn, regs); break; /* REP MOVS */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b935e1b5f115..6a6318e9590c 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); - if (tmp_ret != UCODE_NEW) - return size; - get_online_cpus(); ret = check_online_cpus(); if (ret) goto put; + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + if (tmp_ret != UCODE_NEW) + goto put; + mutex_lock(µcode_mutex); ret = microcode_reload_late(); mutex_unlock(µcode_mutex); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index e88bc296afca..22f13343b5da 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -60,23 +60,18 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback) set_irq_regs(old_regs); } -int hv_setup_vmbus_irq(int irq, void (*handler)(void)) +void hv_setup_vmbus_handler(void (*handler)(void)) { - /* - * The 'irq' argument is ignored on x86/x64 because a hard-coded - * interrupt vector is used for Hyper-V interrupts. - */ vmbus_handler = handler; - return 0; } +EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler); -void hv_remove_vmbus_irq(void) +void hv_remove_vmbus_handler(void) { /* We have no way to deallocate the interrupt gate */ vmbus_handler = NULL; } -EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq); -EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq); +EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler); /* * Routines to do per-architecture handling of stimer0 @@ -95,21 +90,17 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) set_irq_regs(old_regs); } -int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)) +/* For x86/x64, override weak placeholders in hyperv_timer.c */ +void hv_setup_stimer0_handler(void (*handler)(void)) { - *vector = HYPERV_STIMER0_VECTOR; - *irq = -1; /* Unused on x86/x64 */ hv_stimer0_handler = handler; - return 0; } -EXPORT_SYMBOL_GPL(hv_setup_stimer0_irq); -void hv_remove_stimer0_irq(int irq) +void hv_remove_stimer0_handler(void) { /* We have no way to deallocate the interrupt gate */ hv_stimer0_handler = NULL; } -EXPORT_SYMBOL_GPL(hv_remove_stimer0_irq); void hv_setup_kexec_handler(void (*handler)(void)) { @@ -197,7 +188,7 @@ static unsigned char hv_get_nmi_reason(void) #ifdef CONFIG_X86_LOCAL_APIC /* * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes - * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle + * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle * unknown NMI on the first CPU which gets it. */ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) @@ -274,12 +265,13 @@ static void __init ms_hyperv_init_platform(void) * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); - ms_hyperv.features_b = cpuid_ebx(HYPERV_CPUID_FEATURES); + ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); - pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n", - ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features); + pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n", + ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints, + ms_hyperv.misc_features); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); @@ -325,7 +317,7 @@ static void __init ms_hyperv_init_platform(void) x86_platform.calibrate_cpu = hv_get_tsc_khz; } - if (ms_hyperv.features_b & HV_ISOLATION) { + if (ms_hyperv.priv_high & HV_ISOLATION) { ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG); ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG); @@ -428,7 +420,7 @@ static void __init ms_hyperv_init_platform(void) /* * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic, - * set x2apic destination mode to physcial mode when x2apic is available + * set x2apic destination mode to physical mode when x2apic is available * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs * have 8-bit APIC id. */ diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 9231640782fa..0c3b372318b7 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -434,7 +434,7 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, state->range_sizek = sizek - second_sizek; } -/* Mininum size of mtrr block that can take hole: */ +/* Minimum size of mtrr block that can take hole: */ static u64 mtrr_chunk_size __initdata = (256ULL<<20); static int __init parse_mtrr_chunk_size_opt(char *p) diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 28c8a23aa42e..a76694bffe86 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -799,7 +799,7 @@ void mtrr_ap_init(void) * * This routine is called in two cases: * - * 1. very earily time of software resume, when there absolutely + * 1. very early time of software resume, when there absolutely * isn't mtrr entry changes; * * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 698bb26aeb6e..23001ae03e82 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -192,7 +192,7 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid) * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz * - * Probe by trying to write the first of the L3 cach mask registers + * Probe by trying to write the first of the L3 cache mask registers * and checking that the bits stick. Max CLOSids is always 4 and max cbm length * is always 20 on hsw server parts. The minimum cache bitmask length * allowed for HSW server is always 2 bits. Hardcode all of them. diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 7ac31210e452..f07c10b87a87 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -84,7 +84,7 @@ unsigned int resctrl_cqm_threshold; static const struct mbm_correction_factor_table { u32 rmidthreshold; u64 cf; -} mbm_cf_table[] __initdata = { +} mbm_cf_table[] __initconst = { {7, CF(1.000000)}, {15, CF(1.000000)}, {15, CF(0.969650)}, @@ -387,7 +387,7 @@ void mon_event_count(void *info) * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so * that: * - * current bandwdith(cur_bw) < user specified bandwidth(user_bw) + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) * * This uses the MBM counters to measure the bandwidth and MBA throttle * MSRs to control the bandwidth for a particular rdtgrp. It builds on the @@ -397,7 +397,7 @@ void mon_event_count(void *info) * timer. Having 1s interval makes the calculation of bandwidth simpler. * * Although MBA's goal is to restrict the bandwidth to a maximum, there may - * be a need to increase the bandwidth to avoid uncecessarily restricting + * be a need to increase the bandwidth to avoid unnecessarily restricting * the L2 <-> L3 traffic. * * Since MBA controls the L2 external bandwidth where as MBM measures the @@ -480,7 +480,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) /* * Delta values are updated dynamically package wise for each - * rdtgrp everytime the throttle MSR changes value. + * rdtgrp every time the throttle MSR changes value. * * This is because (1)the increase in bandwidth is not perfectly * linear and only "approximately" linear even when the hardware diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index e916646adc69..05a89e33fde2 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1307,7 +1307,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) * If the thread does not get on the CPU for whatever * reason and the process which sets up the region is * interrupted then this will leave the thread in runnable - * state and once it gets on the CPU it will derefence + * state and once it gets on the CPU it will dereference * the cleared, but not freed, plr struct resulting in an * empty pseudo-locking loop. */ @@ -1391,7 +1391,7 @@ out: * group is removed from user space via a "rmdir" from userspace or the * unmount of the resctrl filesystem. On removal the resource group does * not go back to pseudo-locksetup mode before it is removed, instead it is - * removed directly. There is thus assymmetry with the creation where the + * removed directly. There is thus asymmetry with the creation where the * &struct pseudo_lock_region is removed here while it was not created in * rdtgroup_pseudo_lock_create(). * @@ -1458,7 +1458,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) return 0; } -static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags) +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) { /* Not supported */ return -EINVAL; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f9190adc52cb..01fd30e7829d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * User interface for Resource Alloction in Resource Director Technology(RDT) + * User interface for Resource Allocation in Resource Director Technology(RDT) * * Copyright (C) 2016 Intel Corporation * @@ -294,7 +294,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, /* * This is safe against resctrl_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call - * from update_closid_rmid() is proteced against __switch_to() because + * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. */ static void update_cpu_closid_rmid(void *info) @@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, /* * This creates a directory mon_data which contains the monitored data. * - * mon_data has one directory for each domain whic are named + * mon_data has one directory for each domain which are named * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data * with L3 domain looks as below: * ./mon_data: diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 972ec3bfa9c0..21d1f062895a 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -36,6 +36,8 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 }, { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 }, { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 }, + { X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 }, + { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, diff --git a/arch/x86/kernel/cpu/sgx/Makefile b/arch/x86/kernel/cpu/sgx/Makefile index 91d3dc784a29..9c1656779b2a 100644 --- a/arch/x86/kernel/cpu/sgx/Makefile +++ b/arch/x86/kernel/cpu/sgx/Makefile @@ -3,3 +3,4 @@ obj-y += \ encl.o \ ioctl.o \ main.o +obj-$(CONFIG_X86_SGX_KVM) += virt.o diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 8ce6d8371cfb..aa9b8b868867 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -136,10 +136,6 @@ static const struct file_operations sgx_encl_fops = { .get_unmapped_area = sgx_get_unmapped_area, }; -const struct file_operations sgx_provision_fops = { - .owner = THIS_MODULE, -}; - static struct miscdevice sgx_dev_enclave = { .minor = MISC_DYNAMIC_MINOR, .name = "sgx_enclave", @@ -147,13 +143,6 @@ static struct miscdevice sgx_dev_enclave = { .fops = &sgx_encl_fops, }; -static struct miscdevice sgx_dev_provision = { - .minor = MISC_DYNAMIC_MINOR, - .name = "sgx_provision", - .nodename = "sgx_provision", - .fops = &sgx_provision_fops, -}; - int __init sgx_drv_init(void) { unsigned int eax, ebx, ecx, edx; @@ -187,11 +176,5 @@ int __init sgx_drv_init(void) if (ret) return ret; - ret = misc_register(&sgx_dev_provision); - if (ret) { - misc_deregister(&sgx_dev_enclave); - return ret; - } - return 0; } diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 7449ef33f081..3be203297988 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -7,7 +7,7 @@ #include <linux/shmem_fs.h> #include <linux/suspend.h> #include <linux/sched/mm.h> -#include "arch.h" +#include <asm/sgx.h> #include "encl.h" #include "encls.h" #include "sgx.h" @@ -78,7 +78,7 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); if (ret) { - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); return ERR_PTR(ret); } @@ -404,7 +404,7 @@ void sgx_encl_release(struct kref *ref) if (sgx_unmark_page_reclaimable(entry->epc_page)) continue; - sgx_free_epc_page(entry->epc_page); + sgx_encl_free_epc_page(entry->epc_page); encl->secs_child_cnt--; entry->epc_page = NULL; } @@ -415,7 +415,7 @@ void sgx_encl_release(struct kref *ref) xa_destroy(&encl->page_array); if (!encl->secs_child_cnt && encl->secs.epc_page) { - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; } @@ -423,7 +423,7 @@ void sgx_encl_release(struct kref *ref) va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, list); list_del(&va_page->list); - sgx_free_epc_page(va_page->epc_page); + sgx_encl_free_epc_page(va_page->epc_page); kfree(va_page); } @@ -686,7 +686,7 @@ struct sgx_epc_page *sgx_alloc_va_page(void) ret = __epa(sgx_get_epc_virt_addr(epc_page)); if (ret) { WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); return ERR_PTR(-EFAULT); } @@ -735,3 +735,24 @@ bool sgx_va_page_full(struct sgx_va_page *va_page) return slot == SGX_VA_SLOT_COUNT; } + +/** + * sgx_encl_free_epc_page - free an EPC page assigned to an enclave + * @page: EPC page to be freed + * + * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and + * only upon success, it puts the page back to free page list. Otherwise, it + * gives a WARNING to indicate page is leaked. + */ +void sgx_encl_free_epc_page(struct sgx_epc_page *page) +{ + int ret; + + WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); + + ret = __eremove(sgx_get_epc_virt_addr(page)); + if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret)) + return; + + sgx_free_epc_page(page); +} diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index d8d30ccbef4c..6e74f85b6264 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -115,5 +115,6 @@ struct sgx_epc_page *sgx_alloc_va_page(void); unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page); void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset); bool sgx_va_page_full(struct sgx_va_page *va_page); +void sgx_encl_free_epc_page(struct sgx_epc_page *page); #endif /* _X86_ENCL_H */ diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h index 443188fe7e70..9b204843b78d 100644 --- a/arch/x86/kernel/cpu/sgx/encls.h +++ b/arch/x86/kernel/cpu/sgx/encls.h @@ -11,21 +11,6 @@ #include <asm/traps.h> #include "sgx.h" -enum sgx_encls_function { - ECREATE = 0x00, - EADD = 0x01, - EINIT = 0x02, - EREMOVE = 0x03, - EDGBRD = 0x04, - EDGBWR = 0x05, - EEXTEND = 0x06, - ELDU = 0x08, - EBLOCK = 0x09, - EPA = 0x0A, - EWB = 0x0B, - ETRACK = 0x0C, -}; - /** * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr * @@ -55,6 +40,19 @@ enum sgx_encls_function { } while (0); \ } +/* + * encls_faulted() - Check if an ENCLS leaf faulted given an error code + * @ret: the return value of an ENCLS leaf function call + * + * Return: + * - true: ENCLS leaf faulted. + * - false: Otherwise. + */ +static inline bool encls_faulted(int ret) +{ + return ret & ENCLS_FAULT_FLAG; +} + /** * encls_failed() - Check if an ENCLS function failed * @ret: the return value of an ENCLS function call @@ -65,7 +63,7 @@ enum sgx_encls_function { */ static inline bool encls_failed(int ret) { - if (ret & ENCLS_FAULT_FLAG) + if (encls_faulted(ret)) return ENCLS_TRAPNR(ret) != X86_TRAP_PF; return !!ret; diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index 90a5caf76939..83df20e3e633 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -2,6 +2,7 @@ /* Copyright(c) 2016-20 Intel Corporation. */ #include <asm/mman.h> +#include <asm/sgx.h> #include <linux/mman.h> #include <linux/delay.h> #include <linux/file.h> @@ -47,7 +48,7 @@ static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page) encl->page_cnt--; if (va_page) { - sgx_free_epc_page(va_page->epc_page); + sgx_encl_free_epc_page(va_page->epc_page); list_del(&va_page->list); kfree(va_page); } @@ -117,7 +118,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) return 0; err_out: - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; err_out_backing: @@ -365,7 +366,7 @@ err_out_unlock: mmap_read_unlock(current->mm); err_out_free: - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); kfree(encl_page); return ret; @@ -495,7 +496,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, void *token) { u64 mrsigner[4]; - int i, j, k; + int i, j; void *addr; int ret; @@ -544,8 +545,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, preempt_disable(); - for (k = 0; k < 4; k++) - wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]); + sgx_update_lepubkeyhash(mrsigner); ret = __einit(sigstruct, token, addr); @@ -568,7 +568,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, } } - if (ret & ENCLS_FAULT_FLAG) { + if (encls_faulted(ret)) { if (encls_failed(ret)) ENCLS_WARN(ret, "EINIT"); @@ -604,7 +604,6 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) { struct sgx_sigstruct *sigstruct; struct sgx_enclave_init init_arg; - struct page *initp_page; void *token; int ret; @@ -615,11 +614,15 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) if (copy_from_user(&init_arg, arg, sizeof(init_arg))) return -EFAULT; - initp_page = alloc_page(GFP_KERNEL); - if (!initp_page) + /* + * 'sigstruct' must be on a page boundary and 'token' on a 512 byte + * boundary. kmalloc() will give this alignment when allocating + * PAGE_SIZE bytes. + */ + sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!sigstruct) return -ENOMEM; - sigstruct = kmap(initp_page); token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2); memset(token, 0, SGX_LAUNCH_TOKEN_SIZE); @@ -645,8 +648,7 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) ret = sgx_encl_init(encl, sigstruct, token); out: - kunmap(initp_page); - __free_page(initp_page); + kfree(sigstruct); return ret; } @@ -665,24 +667,11 @@ out: static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_provision params; - struct file *file; if (copy_from_user(¶ms, arg, sizeof(params))) return -EFAULT; - file = fget(params.fd); - if (!file) - return -EINVAL; - - if (file->f_op != &sgx_provision_fops) { - fput(file); - return -EINVAL; - } - - encl->attributes_mask |= SGX_ATTR_PROVISIONKEY; - - fput(file); - return 0; + return sgx_set_attribute(&encl->attributes_mask, params.fd); } long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 8df81a3ed945..63d3de02bbcc 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ +#include <linux/file.h> #include <linux/freezer.h> #include <linux/highmem.h> #include <linux/kthread.h> +#include <linux/miscdevice.h> #include <linux/pagemap.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> +#include <asm/sgx.h> #include "driver.h" #include "encl.h" #include "encls.h" @@ -23,42 +26,58 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); * with sgx_reclaimer_lock acquired. */ static LIST_HEAD(sgx_active_page_list); - static DEFINE_SPINLOCK(sgx_reclaimer_lock); +/* The free page list lock protected variables prepend the lock. */ +static unsigned long sgx_nr_free_pages; + +/* Nodes with one or more EPC sections. */ +static nodemask_t sgx_numa_mask; + +/* + * Array with one list_head for each possible NUMA node. Each + * list contains all the sgx_epc_section's which are on that + * node. + */ +static struct sgx_numa_node *sgx_numa_nodes; + +static LIST_HEAD(sgx_dirty_page_list); + /* - * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS - * pages whose child pages blocked EREMOVE. + * Reset post-kexec EPC pages to the uninitialized state. The pages are removed + * from the input list, and made available for the page allocator. SECS pages + * prepending their children in the input list are left intact. */ -static void sgx_sanitize_section(struct sgx_epc_section *section) +static void __sgx_sanitize_pages(struct list_head *dirty_page_list) { struct sgx_epc_page *page; LIST_HEAD(dirty); int ret; - /* init_laundry_list is thread-local, no need for a lock: */ - while (!list_empty(§ion->init_laundry_list)) { + /* dirty_page_list is thread-local, no need for a lock: */ + while (!list_empty(dirty_page_list)) { if (kthread_should_stop()) return; - /* needed for access to ->page_list: */ - spin_lock(§ion->lock); - - page = list_first_entry(§ion->init_laundry_list, - struct sgx_epc_page, list); + page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); ret = __eremove(sgx_get_epc_virt_addr(page)); - if (!ret) - list_move(&page->list, §ion->page_list); - else + if (!ret) { + /* + * page is now sanitized. Make it available via the SGX + * page allocator: + */ + list_del(&page->list); + sgx_free_epc_page(page); + } else { + /* The page is not yet clean - move to the dirty list. */ list_move_tail(&page->list, &dirty); - - spin_unlock(§ion->lock); + } cond_resched(); } - list_splice(&dirty, §ion->init_laundry_list); + list_splice(&dirty, dirty_page_list); } static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) @@ -195,10 +214,10 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) /* * Swap page to the regular memory transformed to the blocked state by using - * EBLOCK, which means that it can no loger be referenced (no new TLB entries). + * EBLOCK, which means that it can no longer be referenced (no new TLB entries). * * The first trial just tries to write the page assuming that some other thread - * has reset the count for threads inside the enlave by using ETRACK, and + * has reset the count for threads inside the enclave by using ETRACK, and * previous thread count has been zeroed out. The second trial calls ETRACK * before EWB. If that fails we kick all the HW threads out, and then do EWB, * which should be guaranteed the succeed. @@ -278,7 +297,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, sgx_encl_ewb(encl->secs.epc_page, &secs_backing); - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; sgx_encl_put_backing(&secs_backing, true); @@ -308,6 +327,7 @@ static void sgx_reclaim_pages(void) struct sgx_epc_section *section; struct sgx_encl_page *encl_page; struct sgx_epc_page *epc_page; + struct sgx_numa_node *node; pgoff_t page_index; int cnt = 0; int ret; @@ -379,50 +399,33 @@ skip: epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; section = &sgx_epc_sections[epc_page->section]; - spin_lock(§ion->lock); - list_add_tail(&epc_page->list, §ion->page_list); - section->free_cnt++; - spin_unlock(§ion->lock); - } -} - -static unsigned long sgx_nr_free_pages(void) -{ - unsigned long cnt = 0; - int i; - - for (i = 0; i < sgx_nr_epc_sections; i++) - cnt += sgx_epc_sections[i].free_cnt; + node = section->node; - return cnt; + spin_lock(&node->lock); + list_add_tail(&epc_page->list, &node->free_page_list); + sgx_nr_free_pages++; + spin_unlock(&node->lock); + } } static bool sgx_should_reclaim(unsigned long watermark) { - return sgx_nr_free_pages() < watermark && - !list_empty(&sgx_active_page_list); + return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list); } static int ksgxd(void *p) { - int i; - set_freezable(); /* * Sanitize pages in order to recover from kexec(). The 2nd pass is * required for SECS pages, whose child pages blocked EREMOVE. */ - for (i = 0; i < sgx_nr_epc_sections; i++) - sgx_sanitize_section(&sgx_epc_sections[i]); - - for (i = 0; i < sgx_nr_epc_sections; i++) { - sgx_sanitize_section(&sgx_epc_sections[i]); + __sgx_sanitize_pages(&sgx_dirty_page_list); + __sgx_sanitize_pages(&sgx_dirty_page_list); - /* Should never happen. */ - if (!list_empty(&sgx_epc_sections[i].init_laundry_list)) - WARN(1, "EPC section %d has unsanitized pages.\n", i); - } + /* sanity check: */ + WARN_ON(!list_empty(&sgx_dirty_page_list)); while (!kthread_should_stop()) { if (try_to_freeze()) @@ -454,45 +457,56 @@ static bool __init sgx_page_reclaimer_init(void) return true; } -static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section) +static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) { - struct sgx_epc_page *page; + struct sgx_numa_node *node = &sgx_numa_nodes[nid]; + struct sgx_epc_page *page = NULL; - spin_lock(§ion->lock); + spin_lock(&node->lock); - if (list_empty(§ion->page_list)) { - spin_unlock(§ion->lock); + if (list_empty(&node->free_page_list)) { + spin_unlock(&node->lock); return NULL; } - page = list_first_entry(§ion->page_list, struct sgx_epc_page, list); + page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); list_del_init(&page->list); - section->free_cnt--; + sgx_nr_free_pages--; + + spin_unlock(&node->lock); - spin_unlock(§ion->lock); return page; } /** * __sgx_alloc_epc_page() - Allocate an EPC page * - * Iterate through EPC sections and borrow a free EPC page to the caller. When a - * page is no longer needed it must be released with sgx_free_epc_page(). + * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start + * from the NUMA node, where the caller is executing. * * Return: - * an EPC page, - * -errno on error + * - an EPC page: A borrowed EPC pages were available. + * - NULL: Out of EPC pages. */ struct sgx_epc_page *__sgx_alloc_epc_page(void) { - struct sgx_epc_section *section; struct sgx_epc_page *page; - int i; + int nid_of_current = numa_node_id(); + int nid = nid_of_current; - for (i = 0; i < sgx_nr_epc_sections; i++) { - section = &sgx_epc_sections[i]; + if (node_isset(nid_of_current, sgx_numa_mask)) { + page = __sgx_alloc_epc_page_from_node(nid_of_current); + if (page) + return page; + } + + /* Fall back to the non-local NUMA nodes: */ + while (true) { + nid = next_node_in(nid, sgx_numa_mask); + if (nid == nid_of_current) + break; - page = __sgx_alloc_epc_page_from_section(section); + page = __sgx_alloc_epc_page_from_node(nid); if (page) return page; } @@ -598,23 +612,22 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) * sgx_free_epc_page() - Free an EPC page * @page: an EPC page * - * Call EREMOVE for an EPC page and insert it back to the list of free pages. + * Put the EPC page back to the list of free pages. It's the caller's + * responsibility to make sure that the page is in uninitialized state. In other + * words, do EREMOVE, EWB or whatever operation is necessary before calling + * this function. */ void sgx_free_epc_page(struct sgx_epc_page *page) { struct sgx_epc_section *section = &sgx_epc_sections[page->section]; - int ret; + struct sgx_numa_node *node = section->node; - WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); + spin_lock(&node->lock); - ret = __eremove(sgx_get_epc_virt_addr(page)); - if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret)) - return; + list_add_tail(&page->list, &node->free_page_list); + sgx_nr_free_pages++; - spin_lock(§ion->lock); - list_add_tail(&page->list, §ion->page_list); - section->free_cnt++; - spin_unlock(§ion->lock); + spin_unlock(&node->lock); } static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, @@ -635,18 +648,14 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, } section->phys_addr = phys_addr; - spin_lock_init(§ion->lock); - INIT_LIST_HEAD(§ion->page_list); - INIT_LIST_HEAD(§ion->init_laundry_list); for (i = 0; i < nr_pages; i++) { section->pages[i].section = index; section->pages[i].flags = 0; section->pages[i].owner = NULL; - list_add_tail(§ion->pages[i].list, §ion->init_laundry_list); + list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list); } - section->free_cnt = nr_pages; return true; } @@ -665,8 +674,13 @@ static bool __init sgx_page_cache_init(void) { u32 eax, ebx, ecx, edx, type; u64 pa, size; + int nid; int i; + sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL); + if (!sgx_numa_nodes) + return false; + for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) { cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx); @@ -689,6 +703,21 @@ static bool __init sgx_page_cache_init(void) break; } + nid = numa_map_to_online_node(phys_to_target_node(pa)); + if (nid == NUMA_NO_NODE) { + /* The physical address is already printed above. */ + pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n"); + nid = 0; + } + + if (!node_isset(nid, sgx_numa_mask)) { + spin_lock_init(&sgx_numa_nodes[nid].lock); + INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list); + node_set(nid, sgx_numa_mask); + } + + sgx_epc_sections[i].node = &sgx_numa_nodes[nid]; + sgx_nr_epc_sections++; } @@ -700,6 +729,67 @@ static bool __init sgx_page_cache_init(void) return true; } +/* + * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller. + * Bare-metal driver requires to update them to hash of enclave's signer + * before EINIT. KVM needs to update them to guest's virtual MSR values + * before doing EINIT from guest. + */ +void sgx_update_lepubkeyhash(u64 *lepubkeyhash) +{ + int i; + + WARN_ON_ONCE(preemptible()); + + for (i = 0; i < 4; i++) + wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); +} + +const struct file_operations sgx_provision_fops = { + .owner = THIS_MODULE, +}; + +static struct miscdevice sgx_dev_provision = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_provision", + .nodename = "sgx_provision", + .fops = &sgx_provision_fops, +}; + +/** + * sgx_set_attribute() - Update allowed attributes given file descriptor + * @allowed_attributes: Pointer to allowed enclave attributes + * @attribute_fd: File descriptor for specific attribute + * + * Append enclave attribute indicated by file descriptor to allowed + * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by + * /dev/sgx_provision is supported. + * + * Return: + * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes + * -EINVAL: Invalid, or not supported file descriptor + */ +int sgx_set_attribute(unsigned long *allowed_attributes, + unsigned int attribute_fd) +{ + struct file *file; + + file = fget(attribute_fd); + if (!file) + return -EINVAL; + + if (file->f_op != &sgx_provision_fops) { + fput(file); + return -EINVAL; + } + + *allowed_attributes |= SGX_ATTR_PROVISIONKEY; + + fput(file); + return 0; +} +EXPORT_SYMBOL_GPL(sgx_set_attribute); + static int __init sgx_init(void) { int ret; @@ -716,12 +806,28 @@ static int __init sgx_init(void) goto err_page_cache; } - ret = sgx_drv_init(); + ret = misc_register(&sgx_dev_provision); if (ret) goto err_kthread; + /* + * Always try to initialize the native *and* KVM drivers. + * The KVM driver is less picky than the native one and + * can function if the native one is not supported on the + * current system or fails to initialize. + * + * Error out only if both fail to initialize. + */ + ret = sgx_drv_init(); + + if (sgx_vepc_init() && ret) + goto err_provision; + return 0; +err_provision: + misc_deregister(&sgx_dev_provision); + err_kthread: kthread_stop(ksgxd_tsk); diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h index 5fa42d143feb..4628acec0009 100644 --- a/arch/x86/kernel/cpu/sgx/sgx.h +++ b/arch/x86/kernel/cpu/sgx/sgx.h @@ -8,11 +8,15 @@ #include <linux/rwsem.h> #include <linux/types.h> #include <asm/asm.h> -#include "arch.h" +#include <asm/sgx.h> #undef pr_fmt #define pr_fmt(fmt) "sgx: " fmt +#define EREMOVE_ERROR_MESSAGE \ + "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \ + "Refer to Documentation/x86/sgx.rst for more information." + #define SGX_MAX_EPC_SECTIONS 8 #define SGX_EEXTEND_BLOCK_SIZE 256 #define SGX_NR_TO_SCAN 16 @@ -30,28 +34,25 @@ struct sgx_epc_page { }; /* + * Contains the tracking data for NUMA nodes having EPC pages. Most importantly, + * the free page list local to the node is stored here. + */ +struct sgx_numa_node { + struct list_head free_page_list; + spinlock_t lock; +}; + +/* * The firmware can define multiple chunks of EPC to the different areas of the * physical memory e.g. for memory areas of the each node. This structure is * used to store EPC pages for one EPC section and virtual memory area where * the pages have been mapped. - * - * 'lock' must be held before accessing 'page_list' or 'free_cnt'. */ struct sgx_epc_section { unsigned long phys_addr; void *virt_addr; struct sgx_epc_page *pages; - - spinlock_t lock; - struct list_head page_list; - unsigned long free_cnt; - - /* - * Pages which need EREMOVE run on them before they can be - * used. Only safe to be accessed in ksgxd and init code. - * Not protected by locks. - */ - struct list_head init_laundry_list; + struct sgx_numa_node *node; }; extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; @@ -83,4 +84,15 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page); int sgx_unmark_page_reclaimable(struct sgx_epc_page *page); struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim); +#ifdef CONFIG_X86_SGX_KVM +int __init sgx_vepc_init(void); +#else +static inline int __init sgx_vepc_init(void) +{ + return -ENODEV; +} +#endif + +void sgx_update_lepubkeyhash(u64 *lepubkeyhash); + #endif /* _X86_SGX_H */ diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c new file mode 100644 index 000000000000..6ad165a5c0cc --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/virt.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device driver to expose SGX enclave memory to KVM guests. + * + * Copyright(c) 2021 Intel Corporation. + */ + +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/xarray.h> +#include <asm/sgx.h> +#include <uapi/asm/sgx.h> + +#include "encls.h" +#include "sgx.h" + +struct sgx_vepc { + struct xarray page_array; + struct mutex lock; +}; + +/* + * Temporary SECS pages that cannot be EREMOVE'd due to having child in other + * virtual EPC instances, and the lock to protect it. + */ +static struct mutex zombie_secs_pages_lock; +static struct list_head zombie_secs_pages; + +static int __sgx_vepc_fault(struct sgx_vepc *vepc, + struct vm_area_struct *vma, unsigned long addr) +{ + struct sgx_epc_page *epc_page; + unsigned long index, pfn; + int ret; + + WARN_ON(!mutex_is_locked(&vepc->lock)); + + /* Calculate index of EPC page in virtual EPC's page_array */ + index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start); + + epc_page = xa_load(&vepc->page_array, index); + if (epc_page) + return 0; + + epc_page = sgx_alloc_epc_page(vepc, false); + if (IS_ERR(epc_page)) + return PTR_ERR(epc_page); + + ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL)); + if (ret) + goto err_free; + + pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page)); + + ret = vmf_insert_pfn(vma, addr, pfn); + if (ret != VM_FAULT_NOPAGE) { + ret = -EFAULT; + goto err_delete; + } + + return 0; + +err_delete: + xa_erase(&vepc->page_array, index); +err_free: + sgx_free_epc_page(epc_page); + return ret; +} + +static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct sgx_vepc *vepc = vma->vm_private_data; + int ret; + + mutex_lock(&vepc->lock); + ret = __sgx_vepc_fault(vepc, vma, vmf->address); + mutex_unlock(&vepc->lock); + + if (!ret) + return VM_FAULT_NOPAGE; + + if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) { + mmap_read_unlock(vma->vm_mm); + return VM_FAULT_RETRY; + } + + return VM_FAULT_SIGBUS; +} + +static const struct vm_operations_struct sgx_vepc_vm_ops = { + .fault = sgx_vepc_fault, +}; + +static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct sgx_vepc *vepc = file->private_data; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vma->vm_ops = &sgx_vepc_vm_ops; + /* Don't copy VMA in fork() */ + vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY; + vma->vm_private_data = vepc; + + return 0; +} + +static int sgx_vepc_free_page(struct sgx_epc_page *epc_page) +{ + int ret; + + /* + * Take a previously guest-owned EPC page and return it to the + * general EPC page pool. + * + * Guests can not be trusted to have left this page in a good + * state, so run EREMOVE on the page unconditionally. In the + * case that a guest properly EREMOVE'd this page, a superfluous + * EREMOVE is harmless. + */ + ret = __eremove(sgx_get_epc_virt_addr(epc_page)); + if (ret) { + /* + * Only SGX_CHILD_PRESENT is expected, which is because of + * EREMOVE'ing an SECS still with child, in which case it can + * be handled by EREMOVE'ing the SECS again after all pages in + * virtual EPC have been EREMOVE'd. See comments in below in + * sgx_vepc_release(). + * + * The user of virtual EPC (KVM) needs to guarantee there's no + * logical processor is still running in the enclave in guest, + * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be + * handled here. + */ + WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE, + ret, ret); + return ret; + } + + sgx_free_epc_page(epc_page); + + return 0; +} + +static int sgx_vepc_release(struct inode *inode, struct file *file) +{ + struct sgx_vepc *vepc = file->private_data; + struct sgx_epc_page *epc_page, *tmp, *entry; + unsigned long index; + + LIST_HEAD(secs_pages); + + xa_for_each(&vepc->page_array, index, entry) { + /* + * Remove all normal, child pages. sgx_vepc_free_page() + * will fail if EREMOVE fails, but this is OK and expected on + * SECS pages. Those can only be EREMOVE'd *after* all their + * child pages. Retries below will clean them up. + */ + if (sgx_vepc_free_page(entry)) + continue; + + xa_erase(&vepc->page_array, index); + } + + /* + * Retry EREMOVE'ing pages. This will clean up any SECS pages that + * only had children in this 'epc' area. + */ + xa_for_each(&vepc->page_array, index, entry) { + epc_page = entry; + /* + * An EREMOVE failure here means that the SECS page still + * has children. But, since all children in this 'sgx_vepc' + * have been removed, the SECS page must have a child on + * another instance. + */ + if (sgx_vepc_free_page(epc_page)) + list_add_tail(&epc_page->list, &secs_pages); + + xa_erase(&vepc->page_array, index); + } + + /* + * SECS pages are "pinned" by child pages, and "unpinned" once all + * children have been EREMOVE'd. A child page in this instance + * may have pinned an SECS page encountered in an earlier release(), + * creating a zombie. Since some children were EREMOVE'd above, + * try to EREMOVE all zombies in the hopes that one was unpinned. + */ + mutex_lock(&zombie_secs_pages_lock); + list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) { + /* + * Speculatively remove the page from the list of zombies, + * if the page is successfully EREMOVE'd it will be added to + * the list of free pages. If EREMOVE fails, throw the page + * on the local list, which will be spliced on at the end. + */ + list_del(&epc_page->list); + + if (sgx_vepc_free_page(epc_page)) + list_add_tail(&epc_page->list, &secs_pages); + } + + if (!list_empty(&secs_pages)) + list_splice_tail(&secs_pages, &zombie_secs_pages); + mutex_unlock(&zombie_secs_pages_lock); + + kfree(vepc); + + return 0; +} + +static int sgx_vepc_open(struct inode *inode, struct file *file) +{ + struct sgx_vepc *vepc; + + vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL); + if (!vepc) + return -ENOMEM; + mutex_init(&vepc->lock); + xa_init(&vepc->page_array); + + file->private_data = vepc; + + return 0; +} + +static const struct file_operations sgx_vepc_fops = { + .owner = THIS_MODULE, + .open = sgx_vepc_open, + .release = sgx_vepc_release, + .mmap = sgx_vepc_mmap, +}; + +static struct miscdevice sgx_vepc_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_vepc", + .nodename = "sgx_vepc", + .fops = &sgx_vepc_fops, +}; + +int __init sgx_vepc_init(void) +{ + /* SGX virtualization requires KVM to work */ + if (!cpu_feature_enabled(X86_FEATURE_VMX)) + return -ENODEV; + + INIT_LIST_HEAD(&zombie_secs_pages); + mutex_init(&zombie_secs_pages_lock); + + return misc_register(&sgx_vepc_dev); +} + +/** + * sgx_virt_ecreate() - Run ECREATE on behalf of guest + * @pageinfo: Pointer to PAGEINFO structure + * @secs: Userspace pointer to SECS page + * @trapnr: trap number injected to guest in case of ECREATE error + * + * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose + * of enforcing policies of guest's enclaves, and return the trap number + * which should be injected to guest in case of any ECREATE error. + * + * Return: + * - 0: ECREATE was successful. + * - <0: on error. + */ +int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs, + int *trapnr) +{ + int ret; + + /* + * @secs is an untrusted, userspace-provided address. It comes from + * KVM and is assumed to be a valid pointer which points somewhere in + * userspace. This can fault and call SGX or other fault handlers when + * userspace mapping @secs doesn't exist. + * + * Add a WARN() to make sure @secs is already valid userspace pointer + * from caller (KVM), who should already have handled invalid pointer + * case (for instance, made by malicious guest). All other checks, + * such as alignment of @secs, are deferred to ENCLS itself. + */ + if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE))) + return -EINVAL; + + __uaccess_begin(); + ret = __ecreate(pageinfo, (void *)secs); + __uaccess_end(); + + if (encls_faulted(ret)) { + *trapnr = ENCLS_TRAPNR(ret); + return -EFAULT; + } + + /* ECREATE doesn't return an error code, it faults or succeeds. */ + WARN_ON_ONCE(ret); + return 0; +} +EXPORT_SYMBOL_GPL(sgx_virt_ecreate); + +static int __sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs) +{ + int ret; + + /* + * Make sure all userspace pointers from caller (KVM) are valid. + * All other checks deferred to ENCLS itself. Also see comment + * for @secs in sgx_virt_ecreate(). + */ +#define SGX_EINITTOKEN_SIZE 304 + if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) || + !access_ok(token, SGX_EINITTOKEN_SIZE) || + !access_ok(secs, PAGE_SIZE))) + return -EINVAL; + + __uaccess_begin(); + ret = __einit((void *)sigstruct, (void *)token, (void *)secs); + __uaccess_end(); + + return ret; +} + +/** + * sgx_virt_einit() - Run EINIT on behalf of guest + * @sigstruct: Userspace pointer to SIGSTRUCT structure + * @token: Userspace pointer to EINITTOKEN structure + * @secs: Userspace pointer to SECS page + * @lepubkeyhash: Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values + * @trapnr: trap number injected to guest in case of EINIT error + * + * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available + * in host, SGX driver may rewrite the hardware values at wish, therefore KVM + * needs to update hardware values to guest's virtual MSR values in order to + * ensure EINIT is executed with expected hardware values. + * + * Return: + * - 0: EINIT was successful. + * - <0: on error. + */ +int sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs, u64 *lepubkeyhash, int *trapnr) +{ + int ret; + + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + ret = __sgx_virt_einit(sigstruct, token, secs); + } else { + preempt_disable(); + + sgx_update_lepubkeyhash(lepubkeyhash); + + ret = __sgx_virt_einit(sigstruct, token, secs); + preempt_enable(); + } + + /* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */ + if (ret == -EINVAL) + return ret; + + if (encls_faulted(ret)) { + *trapnr = ENCLS_TRAPNR(ret); + return -EFAULT; + } + + return ret; +} +EXPORT_SYMBOL_GPL(sgx_virt_einit); diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 8678864ce712..132a2de44d2f 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -30,7 +30,7 @@ EXPORT_SYMBOL(__max_die_per_package); #ifdef CONFIG_SMP /* - * Check if given CPUID extended toplogy "leaf" is implemented + * Check if given CPUID extended topology "leaf" is implemented */ static int check_extended_topology_leaf(int leaf) { @@ -44,7 +44,7 @@ static int check_extended_topology_leaf(int leaf) return 0; } /* - * Return best CPUID Extended Toplogy Leaf supported + * Return best CPUID Extended Topology Leaf supported */ static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) { diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index c6ede3b3d302..c04b933f48d3 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -27,6 +27,7 @@ #include <linux/clocksource.h> #include <linux/cpu.h> #include <linux/reboot.h> +#include <linux/static_call.h> #include <asm/div64.h> #include <asm/x86_init.h> #include <asm/hypervisor.h> @@ -336,11 +337,11 @@ static void __init vmware_paravirt_ops_setup(void) vmware_cyc2ns_setup(); if (vmw_sched_clock) - pv_ops.time.sched_clock = vmware_sched_clock; + paravirt_set_sched_clock(vmware_sched_clock); if (vmware_is_stealclock_available()) { has_steal_clock = true; - pv_ops.time.steal_clock = vmware_steal_clock; + static_call_update(pv_steal_clock, vmware_steal_clock); /* We use reboot notifier only to disable steal clock */ register_reboot_notifier(&vmware_pv_reboot_nb); @@ -378,6 +379,8 @@ static void __init vmware_set_capabilities(void) { setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); + if (vmware_tsc_khz) + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL) setup_force_cpu_cap(X86_FEATURE_VMCALL); else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL) diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a8f3af257e26..54ce999ed321 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -323,8 +323,8 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, cmem->nr_ranges = 1; /* Exclude elf header region */ - start = image->arch.elf_load_addr; - end = start + image->arch.elf_headers_sz - 1; + start = image->elf_load_addr; + end = start + image->elf_headers_sz - 1; return crash_exclude_mem_range(cmem, start, end); } @@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) struct crash_memmap_data cmd; struct crash_mem *cmem; - cmem = vzalloc(sizeof(struct crash_mem)); + cmem = vzalloc(struct_size(cmem, ranges, 1)); if (!cmem) return -ENOMEM; @@ -407,20 +407,20 @@ int crash_load_segments(struct kimage *image) if (ret) return ret; - image->arch.elf_headers = kbuf.buffer; - image->arch.elf_headers_sz = kbuf.bufsz; + image->elf_headers = kbuf.buffer; + image->elf_headers_sz = kbuf.bufsz; kbuf.memsz = kbuf.bufsz; kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) { - vfree((void *)image->arch.elf_headers); + vfree((void *)image->elf_headers); return ret; } - image->arch.elf_load_addr = kbuf.mem; + image->elf_load_addr = kbuf.mem; pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); + image->elf_load_addr, kbuf.bufsz, kbuf.bufsz); return ret; } diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index 759d392cbe9f..d1d49e3d536b 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -100,9 +100,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = { .ss = __KERNEL_DS, .ds = __USER_DS, .fs = __KERNEL_PERCPU, -#ifndef CONFIG_X86_32_LAZY_GS - .gs = __KERNEL_STACK_CANARY, -#endif + .gs = 0, .__cr3 = __pa_nodebug(swapper_pg_dir), }, diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 22aad412f965..bc0657f0deed 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -31,8 +31,8 @@ * - inform the user about the firmware's notion of memory layout * via /sys/firmware/memmap * - * - the hibernation code uses it to generate a kernel-independent MD5 - * fingerprint of the physical memory layout of a system. + * - the hibernation code uses it to generate a kernel-independent CRC32 + * checksum of the physical memory layout of a system. * * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version * passed to us by the bootloader - the major difference between @@ -793,7 +793,7 @@ core_initcall(e820__register_nvs_regions); #endif /* - * Allocate the requested number of bytes with the requsted alignment + * Allocate the requested number of bytes with the requested alignment * and return (the physical address) to the caller. Also register this * range in the 'kexec' E820 table as a reserved range. * diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 683749b80ae2..a85c64000218 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -253,7 +253,7 @@ static bool xfeature_enabled(enum xfeature xfeature) static void __init setup_xstate_features(void) { u32 eax, ebx, ecx, edx, i; - /* start at the beginnning of the "extended state" */ + /* start at the beginning of the "extended state" */ unsigned int last_good_offset = offsetof(struct xregs_state, extended_state_area); /* diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 7edbd5ee5ed4..1b3ce3b4a2a2 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -66,7 +66,7 @@ int ftrace_arch_code_modify_post_process(void) static const char *ftrace_nop_replace(void) { - return ideal_nops[NOP_ATOMIC5]; + return x86_nops[5]; } static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) @@ -377,7 +377,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ip = trampoline + (jmp_offset - start_offset); if (WARN_ON(*(char *)ip != 0x75)) goto fail; - ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2); + ret = copy_from_kernel_nofault(ip, x86_nops[2], 2); if (ret < 0) goto fail; } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5e9beb77cafd..18be44163a50 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -104,7 +104,7 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) static bool __head check_la57_support(unsigned long physaddr) { /* - * 5-level paging is detected and enabled at kernel decomression + * 5-level paging is detected and enabled at kernel decompression * stage. Only check if it has been enabled there. */ if (!(native_read_cr4() & X86_CR4_LA57)) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 7ed84c282233..67f590425d90 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -318,8 +318,8 @@ SYM_FUNC_START(startup_32_smp) movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu - movl $(__KERNEL_STACK_CANARY),%eax - movl %eax,%gs + xorl %eax,%eax + movl %eax,%gs # clear possible garbage in %gs xorl %eax,%eax # Clear LDT lldt %ax @@ -339,20 +339,6 @@ SYM_FUNC_END(startup_32_smp) */ __INIT setup_once: -#ifdef CONFIG_STACKPROTECTOR - /* - * Configure the stack canary. The linker can't handle this by - * relocation. Manually set base address in stack canary - * segment descriptor. - */ - movl $gdt_page,%eax - movl $stack_canary,%ecx - movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) - shrl $16, %ecx - movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) - movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) -#endif - andl $0,setup_once_ref /* Once is enough, thanks */ ret diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index ee1a283f8e96..d552f177eca0 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -245,7 +245,7 @@ static const __initconst struct idt_data ist_idts[] = { * after that. * * Note, that X86_64 cannot install the real #PF handler in - * idt_setup_early_traps() because the memory intialization needs the #PF + * idt_setup_early_traps() because the memory initialization needs the #PF * handler from the early_idt_handler_array to initialize the early page * tables. */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 58aa712973ac..e28f6a5d14f1 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -338,7 +338,7 @@ void fixup_irqs(void) irq_migrate_all_off_this_cpu(); /* - * We can remove mdelay() and then send spuriuous interrupts to + * We can remove mdelay() and then send spurious interrupts to * new cpu targets for all the irqs that were handled previously by * this cpu. While it works, I have seen spurious interrupt messages * (nothing wrong but still...). diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 5ba8477c2cb7..6a2eb62c85e6 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -28,10 +28,8 @@ static void bug_at(const void *ip, int line) } static const void * -__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, int init) +__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type) { - const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; - const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; const void *expect, *code; const void *addr, *dest; int line; @@ -41,10 +39,8 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest); - if (init) { - expect = default_nop; line = __LINE__; - } else if (type == JUMP_LABEL_JMP) { - expect = ideal_nop; line = __LINE__; + if (type == JUMP_LABEL_JMP) { + expect = x86_nops[5]; line = __LINE__; } else { expect = code; line = __LINE__; } @@ -53,7 +49,7 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, bug_at(addr, line); if (type == JUMP_LABEL_NOP) - code = ideal_nop; + code = x86_nops[5]; return code; } @@ -62,7 +58,7 @@ static inline void __jump_label_transform(struct jump_entry *entry, enum jump_label_type type, int init) { - const void *opcode = __jump_label_set_jump_code(entry, type, init); + const void *opcode = __jump_label_set_jump_code(entry, type); /* * As long as only a single processor is running and the code is still @@ -113,7 +109,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry, } mutex_lock(&text_mutex); - opcode = __jump_label_set_jump_code(entry, type, 0); + opcode = __jump_label_set_jump_code(entry, type); text_poke_queue((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL); mutex_unlock(&text_mutex); @@ -136,22 +132,6 @@ static enum { __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { - /* - * This function is called at boot up and when modules are - * first loaded. Check if the default nop, the one that is - * inserted at compile time, is the ideal nop. If it is, then - * we do not need to update the nop, and we can leave it as is. - * If it is not, then we need to update the nop to the ideal nop. - */ - if (jlstate == JL_STATE_START) { - const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; - const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; - - if (memcmp(ideal_nop, default_nop, 5) != 0) - jlstate = JL_STATE_UPDATE; - else - jlstate = JL_STATE_NO_UPDATE; - } if (jlstate == JL_STATE_UPDATE) jump_label_transform(entry, type, 1); } diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index ce831f9448e7..170d0fd68b1f 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -75,7 +75,7 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params, if (image->type == KEXEC_TYPE_CRASH) { len = sprintf(cmdline_ptr, - "elfcorehdr=0x%lx ", image->arch.elf_load_addr); + "elfcorehdr=0x%lx ", image->elf_load_addr); } memcpy(cmdline_ptr + len, cmdline, cmdline_len); cmdline_len += len; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index ff7878df96b4..3a43a2dee658 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -17,7 +17,7 @@ * Updated by: Tom Rini <trini@kernel.crashing.org> * Updated by: Jason Wessel <jason.wessel@windriver.com> * Modified for 386 by Jim Kingdon, Cygnus Support. - * Origianl kgdb, compatibility with 2.1.xx kernel by + * Original kgdb, compatibility with 2.1.xx kernel by * David Grothe <dave@gcom.com> * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> * X86_64 changes from Andi Kleen's patch merged by Jim Houston @@ -642,7 +642,7 @@ void kgdb_arch_late(void) struct perf_event **pevent; /* - * Pre-allocate the hw breakpoint structions in the non-atomic + * Pre-allocate the hw breakpoint instructions in the non-atomic * portion of kgdb because this operation requires mutexs to * complete. */ diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index df776cdca327..d3d65545cb8b 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall); int can_boost(struct insn *insn, void *addr) { kprobe_opcode_t opcode; + insn_byte_t prefix; + int i; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ @@ -151,35 +153,39 @@ int can_boost(struct insn *insn, void *addr) if (insn->opcode.nbytes != 1) return 0; - /* Can't boost Address-size override prefix */ - if (unlikely(inat_is_address_size_prefix(insn->attr))) - return 0; + for_each_insn_prefix(insn, i, prefix) { + insn_attr_t attr; + + attr = inat_get_opcode_attribute(prefix); + /* Can't boost Address-size override prefix and CS override prefix */ + if (prefix == 0x2e || inat_is_address_size_prefix(attr)) + return 0; + } opcode = insn->opcode.bytes[0]; - switch (opcode & 0xf0) { - case 0x60: - /* can't boost "bound" */ - return (opcode != 0x62); - case 0x70: - return 0; /* can't boost conditional jump */ - case 0x90: - return opcode != 0x9a; /* can't boost call far */ - case 0xc0: - /* can't boost software-interruptions */ - return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; - case 0xd0: - /* can boost AA* and XLAT */ - return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); - case 0xe0: - /* can boost in/out and absolute jmps */ - return ((opcode & 0x04) || opcode == 0xea); - case 0xf0: - /* clear and set flags are boostable */ - return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); + switch (opcode) { + case 0x62: /* bound */ + case 0x70 ... 0x7f: /* Conditional jumps */ + case 0x9a: /* Call far */ + case 0xc0 ... 0xc1: /* Grp2 */ + case 0xcc ... 0xce: /* software exceptions */ + case 0xd0 ... 0xd3: /* Grp2 */ + case 0xd6: /* (UD) */ + case 0xd8 ... 0xdf: /* ESC */ + case 0xe0 ... 0xe3: /* LOOP*, JCXZ */ + case 0xe8 ... 0xe9: /* near Call, JMP */ + case 0xeb: /* Short JMP */ + case 0xf0 ... 0xf4: /* LOCK/REP, HLT */ + case 0xf6 ... 0xf7: /* Grp3 */ + case 0xfe: /* Grp4 */ + /* ... are not boostable */ + return 0; + case 0xff: /* Grp5 */ + /* Only indirect jmp is boostable */ + return X86_MODRM_REG(insn->modrm.bytes[0]) == 4; default: - /* CS override prefix and call are not boostable */ - return (opcode != 0x2e && opcode != 0x9a); + return 1; } } @@ -229,7 +235,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) return 0UL; if (faddr) - memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); + memcpy(buf, x86_nops[5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; @@ -265,6 +271,8 @@ static int can_probe(unsigned long paddr) /* Decode instructions */ addr = paddr - offset; while (addr < paddr) { + int ret; + /* * Check if the instruction has been modified by another * kprobe, in which case we replace the breakpoint by the @@ -276,8 +284,10 @@ static int can_probe(unsigned long paddr) __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; - kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); - insn_get_length(&insn); + + ret = insn_decode_kernel(&insn, (void *)__addr); + if (ret < 0) + return 0; /* * Another debugging subsystem might insert this breakpoint. @@ -301,8 +311,8 @@ static int can_probe(unsigned long paddr) int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) { kprobe_opcode_t buf[MAX_INSN_SIZE]; - unsigned long recovered_insn = - recover_probed_instruction(buf, (unsigned long)src); + unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); + int ret; if (!recovered_insn || !insn) return 0; @@ -312,8 +322,9 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) MAX_INSN_SIZE)) return 0; - kernel_insn_init(insn, dest, MAX_INSN_SIZE); - insn_get_length(insn); + ret = insn_decode_kernel(insn, dest); + if (ret < 0) + return 0; /* We can not probe force emulate prefixed instruction */ if (insn_has_emulate_prefix(insn)) @@ -357,13 +368,14 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) return insn->length; } -/* Prepare reljump right after instruction to boost */ -static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p, - struct insn *insn) +/* Prepare reljump or int3 right after instruction */ +static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, + struct insn *insn) { int len = insn->length; - if (can_boost(insn, p->addr) && + if (!IS_ENABLED(CONFIG_PREEMPTION) && + !p->post_handler && can_boost(insn, p->addr) && MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) { /* * These instructions can be executed directly if it @@ -374,7 +386,12 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p, len += JMP32_INSN_SIZE; p->ainsn.boostable = 1; } else { - p->ainsn.boostable = 0; + /* Otherwise, put an int3 for trapping singlestep */ + if (MAX_INSN_SIZE - len < INT3_INSN_SIZE) + return -ENOSPC; + + buf[len] = INT3_INSN_OPCODE; + len += INT3_INSN_SIZE; } return len; @@ -411,86 +428,290 @@ void free_insn_page(void *page) module_memfree(page); } -static void set_resume_flags(struct kprobe *p, struct insn *insn) +/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ + +static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) +{ + switch (p->ainsn.opcode) { + case 0xfa: /* cli */ + regs->flags &= ~(X86_EFLAGS_IF); + break; + case 0xfb: /* sti */ + regs->flags |= X86_EFLAGS_IF; + break; + case 0x9c: /* pushf */ + int3_emulate_push(regs, regs->flags); + break; + case 0x9d: /* popf */ + regs->flags = int3_emulate_pop(regs); + break; + } + regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; +} +NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers); + +static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs) +{ + int3_emulate_ret(regs); +} +NOKPROBE_SYMBOL(kprobe_emulate_ret); + +static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size; + + func += p->ainsn.rel32; + int3_emulate_call(regs, func); +} +NOKPROBE_SYMBOL(kprobe_emulate_call); + +static nokprobe_inline +void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond) +{ + unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; + + if (cond) + ip += p->ainsn.rel32; + int3_emulate_jmp(regs, ip); +} + +static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs) +{ + __kprobe_emulate_jmp(p, regs, true); +} +NOKPROBE_SYMBOL(kprobe_emulate_jmp); + +static const unsigned long jcc_mask[6] = { + [0] = X86_EFLAGS_OF, + [1] = X86_EFLAGS_CF, + [2] = X86_EFLAGS_ZF, + [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, + [4] = X86_EFLAGS_SF, + [5] = X86_EFLAGS_PF, +}; + +static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) +{ + bool invert = p->ainsn.jcc.type & 1; + bool match; + + if (p->ainsn.jcc.type < 0xc) { + match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1]; + } else { + match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ + ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); + if (p->ainsn.jcc.type >= 0xe) + match = match && (regs->flags & X86_EFLAGS_ZF); + } + __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert)); +} +NOKPROBE_SYMBOL(kprobe_emulate_jcc); + +static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) +{ + bool match; + + if (p->ainsn.loop.type != 3) { /* LOOP* */ + if (p->ainsn.loop.asize == 32) + match = ((*(u32 *)®s->cx)--) != 0; +#ifdef CONFIG_X86_64 + else if (p->ainsn.loop.asize == 64) + match = ((*(u64 *)®s->cx)--) != 0; +#endif + else + match = ((*(u16 *)®s->cx)--) != 0; + } else { /* JCXZ */ + if (p->ainsn.loop.asize == 32) + match = *(u32 *)(®s->cx) == 0; +#ifdef CONFIG_X86_64 + else if (p->ainsn.loop.asize == 64) + match = *(u64 *)(®s->cx) == 0; +#endif + else + match = *(u16 *)(®s->cx) == 0; + } + + if (p->ainsn.loop.type == 0) /* LOOPNE */ + match = match && !(regs->flags & X86_EFLAGS_ZF); + else if (p->ainsn.loop.type == 1) /* LOOPE */ + match = match && (regs->flags & X86_EFLAGS_ZF); + + __kprobe_emulate_jmp(p, regs, match); +} +NOKPROBE_SYMBOL(kprobe_emulate_loop); + +static const int addrmode_regoffs[] = { + offsetof(struct pt_regs, ax), + offsetof(struct pt_regs, cx), + offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, sp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), +#ifdef CONFIG_X86_64 + offsetof(struct pt_regs, r8), + offsetof(struct pt_regs, r9), + offsetof(struct pt_regs, r10), + offsetof(struct pt_regs, r11), + offsetof(struct pt_regs, r12), + offsetof(struct pt_regs, r13), + offsetof(struct pt_regs, r14), + offsetof(struct pt_regs, r15), +#endif +}; + +static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; + + int3_emulate_call(regs, regs_get_register(regs, offs)); +} +NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); + +static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; + + int3_emulate_jmp(regs, regs_get_register(regs, offs)); +} +NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect); + +static int prepare_emulation(struct kprobe *p, struct insn *insn) { insn_byte_t opcode = insn->opcode.bytes[0]; switch (opcode) { case 0xfa: /* cli */ case 0xfb: /* sti */ + case 0x9c: /* pushfl */ case 0x9d: /* popf/popfd */ - /* Check whether the instruction modifies Interrupt Flag or not */ - p->ainsn.if_modifier = 1; - break; - case 0x9c: /* pushfl */ - p->ainsn.is_pushf = 1; + /* + * IF modifiers must be emulated since it will enable interrupt while + * int3 single stepping. + */ + p->ainsn.emulate_op = kprobe_emulate_ifmodifiers; + p->ainsn.opcode = opcode; break; - case 0xcf: /* iret */ - p->ainsn.if_modifier = 1; - fallthrough; case 0xc2: /* ret/lret */ case 0xc3: case 0xca: case 0xcb: - case 0xea: /* jmp absolute -- ip is correct */ - /* ip is already adjusted, no more changes required */ - p->ainsn.is_abs_ip = 1; - /* Without resume jump, this is boostable */ - p->ainsn.boostable = 1; + p->ainsn.emulate_op = kprobe_emulate_ret; break; - case 0xe8: /* call relative - Fix return addr */ - p->ainsn.is_call = 1; + case 0x9a: /* far call absolute -- segment is not supported */ + case 0xea: /* far jmp absolute -- segment is not supported */ + case 0xcc: /* int3 */ + case 0xcf: /* iret -- in-kernel IRET is not supported */ + return -EOPNOTSUPP; break; -#ifdef CONFIG_X86_32 - case 0x9a: /* call absolute -- same as call absolute, indirect */ - p->ainsn.is_call = 1; - p->ainsn.is_abs_ip = 1; + case 0xe8: /* near call relative */ + p->ainsn.emulate_op = kprobe_emulate_call; + if (insn->immediate.nbytes == 2) + p->ainsn.rel32 = *(s16 *)&insn->immediate.value; + else + p->ainsn.rel32 = *(s32 *)&insn->immediate.value; break; -#endif - case 0xff: + case 0xeb: /* short jump relative */ + case 0xe9: /* near jump relative */ + p->ainsn.emulate_op = kprobe_emulate_jmp; + if (insn->immediate.nbytes == 1) + p->ainsn.rel32 = *(s8 *)&insn->immediate.value; + else if (insn->immediate.nbytes == 2) + p->ainsn.rel32 = *(s16 *)&insn->immediate.value; + else + p->ainsn.rel32 = *(s32 *)&insn->immediate.value; + break; + case 0x70 ... 0x7f: + /* 1 byte conditional jump */ + p->ainsn.emulate_op = kprobe_emulate_jcc; + p->ainsn.jcc.type = opcode & 0xf; + p->ainsn.rel32 = *(char *)insn->immediate.bytes; + break; + case 0x0f: opcode = insn->opcode.bytes[1]; + if ((opcode & 0xf0) == 0x80) { + /* 2 bytes Conditional Jump */ + p->ainsn.emulate_op = kprobe_emulate_jcc; + p->ainsn.jcc.type = opcode & 0xf; + if (insn->immediate.nbytes == 2) + p->ainsn.rel32 = *(s16 *)&insn->immediate.value; + else + p->ainsn.rel32 = *(s32 *)&insn->immediate.value; + } else if (opcode == 0x01 && + X86_MODRM_REG(insn->modrm.bytes[0]) == 0 && + X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) { + /* VM extensions - not supported */ + return -EOPNOTSUPP; + } + break; + case 0xe0: /* Loop NZ */ + case 0xe1: /* Loop */ + case 0xe2: /* Loop */ + case 0xe3: /* J*CXZ */ + p->ainsn.emulate_op = kprobe_emulate_loop; + p->ainsn.loop.type = opcode & 0x3; + p->ainsn.loop.asize = insn->addr_bytes * 8; + p->ainsn.rel32 = *(s8 *)&insn->immediate.value; + break; + case 0xff: + /* + * Since the 0xff is an extended group opcode, the instruction + * is determined by the MOD/RM byte. + */ + opcode = insn->modrm.bytes[0]; if ((opcode & 0x30) == 0x10) { - /* - * call absolute, indirect - * Fix return addr; ip is correct. - * But this is not boostable - */ - p->ainsn.is_call = 1; - p->ainsn.is_abs_ip = 1; + if ((opcode & 0x8) == 0x8) + return -EOPNOTSUPP; /* far call */ + /* call absolute, indirect */ + p->ainsn.emulate_op = kprobe_emulate_call_indirect; + } else if ((opcode & 0x30) == 0x20) { + if ((opcode & 0x8) == 0x8) + return -EOPNOTSUPP; /* far jmp */ + /* jmp near absolute indirect */ + p->ainsn.emulate_op = kprobe_emulate_jmp_indirect; + } else break; - } else if (((opcode & 0x31) == 0x20) || - ((opcode & 0x31) == 0x21)) { - /* - * jmp near and far, absolute indirect - * ip is correct. - */ - p->ainsn.is_abs_ip = 1; - /* Without resume jump, this is boostable */ - p->ainsn.boostable = 1; - } + + if (insn->addr_bytes != sizeof(unsigned long)) + return -EOPNOTSUPP; /* Don't support differnt size */ + if (X86_MODRM_MOD(opcode) != 3) + return -EOPNOTSUPP; /* TODO: support memory addressing */ + + p->ainsn.indirect.reg = X86_MODRM_RM(opcode); +#ifdef CONFIG_X86_64 + if (X86_REX_B(insn->rex_prefix.value)) + p->ainsn.indirect.reg += 8; +#endif + break; + default: break; } + p->ainsn.size = insn->length; + + return 0; } static int arch_copy_kprobe(struct kprobe *p) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; - int len; + int ret, len; /* Copy an instruction with recovering if other optprobe modifies it.*/ len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn); if (!len) return -EINVAL; - /* - * __copy_instruction can modify the displacement of the instruction, - * but it doesn't affect boostable check. - */ - len = prepare_boost(buf, p, &insn); + /* Analyze the opcode and setup emulate functions */ + ret = prepare_emulation(p, &insn); + if (ret < 0) + return ret; - /* Analyze the opcode and set resume flags */ - set_resume_flags(p, &insn); + /* Add int3 for single-step or booster jmp */ + len = prepare_singlestep(buf, p, &insn); + if (len < 0) + return len; /* Also, displacement change doesn't affect the first byte */ p->opcode = buf[0]; @@ -583,29 +804,7 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs, { __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags - = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); - if (p->ainsn.if_modifier) - kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; -} - -static nokprobe_inline void clear_btf(void) -{ - if (test_thread_flag(TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl &= ~DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - } -} - -static nokprobe_inline void restore_btf(void) -{ - if (test_thread_flag(TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl |= DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - } + = (regs->flags & X86_EFLAGS_IF); } void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) @@ -620,6 +819,22 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) } NOKPROBE_SYMBOL(arch_prepare_kretprobe); +static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); +} +NOKPROBE_SYMBOL(kprobe_post_process); + static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) { @@ -627,7 +842,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, return; #if !defined(CONFIG_PREEMPTION) - if (p->ainsn.boostable && !p->post_handler) { + if (p->ainsn.boostable) { /* Boost up -- we can execute copied instructions directly */ if (!reenter) reset_current_kprobe(); @@ -646,19 +861,51 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_status = KPROBE_REENTER; } else kcb->kprobe_status = KPROBE_HIT_SS; - /* Prepare real single stepping */ - clear_btf(); - regs->flags |= X86_EFLAGS_TF; + + if (p->ainsn.emulate_op) { + p->ainsn.emulate_op(p, regs); + kprobe_post_process(p, regs, kcb); + return; + } + + /* Disable interrupt, and set ip register on trampoline */ regs->flags &= ~X86_EFLAGS_IF; - /* single step inline if the instruction is an int3 */ - if (p->opcode == INT3_INSN_OPCODE) - regs->ip = (unsigned long)p->addr; - else - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = (unsigned long)p->ainsn.insn; } NOKPROBE_SYMBOL(setup_singlestep); /* + * Called after single-stepping. p->addr is the address of the + * instruction whose first byte has been replaced by the "int3" + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again + * right after the copied instruction. + * Different from the trap single-step, "int3" single-step can not + * handle the instruction which changes the ip register, e.g. jmp, + * call, conditional jmp, and the instructions which changes the IF + * flags because interrupt must be disabled around the single-stepping. + * Such instructions are software emulated, but others are single-stepped + * using "int3". + * + * When the 2nd "int3" handled, the regs->ip and regs->flags needs to + * be adjusted, so that we can resume execution on correct code. + */ +static void resume_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + unsigned long copy_ip = (unsigned long)p->ainsn.insn; + unsigned long orig_ip = (unsigned long)p->addr; + + /* Restore saved interrupt flag and ip register */ + regs->flags |= kcb->kprobe_saved_flags; + /* Note that regs->ip is executed int3 so must be a step back */ + regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE; +} +NOKPROBE_SYMBOL(resume_singlestep); + +/* * We have reentered the kprobe_handler(), since another probe was hit while * within the handler. We save the original kprobes variables and just single * step on the instruction of the new probe without calling any user handlers. @@ -693,6 +940,12 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, } NOKPROBE_SYMBOL(reenter_kprobe); +static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb) +{ + return (kcb->kprobe_status == KPROBE_HIT_SS || + kcb->kprobe_status == KPROBE_REENTER); +} + /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. @@ -737,7 +990,18 @@ int kprobe_int3_handler(struct pt_regs *regs) reset_current_kprobe(); return 1; } - } else if (*addr != INT3_INSN_OPCODE) { + } else if (kprobe_is_ss(kcb)) { + p = kprobe_running(); + if ((unsigned long)p->ainsn.insn < regs->ip && + (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) { + /* Most provably this is the second int3 for singlestep */ + resume_singlestep(p, regs, kcb); + kprobe_post_process(p, regs, kcb); + return 1; + } + } + + if (*addr != INT3_INSN_OPCODE) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -810,91 +1074,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) } NOKPROBE_SYMBOL(trampoline_handler); -/* - * Called after single-stepping. p->addr is the address of the - * instruction whose first byte has been replaced by the "int 3" - * instruction. To avoid the SMP problems that can occur when we - * temporarily put back the original opcode to single-step, we - * single-stepped a copy of the instruction. The address of this - * copy is p->ainsn.insn. - * - * This function prepares to return from the post-single-step - * interrupt. We have to fix up the stack as follows: - * - * 0) Except in the case of absolute or indirect jump or call instructions, - * the new ip is relative to the copied instruction. We need to make - * it relative to the original instruction. - * - * 1) If the single-stepped instruction was pushfl, then the TF and IF - * flags are set in the just-pushed flags, and may need to be cleared. - * - * 2) If the single-stepped instruction was a call, the return address - * that is atop the stack is the address following the copied instruction. - * We need to make it the address following the original instruction. - */ -static void resume_execution(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - unsigned long *tos = stack_addr(regs); - unsigned long copy_ip = (unsigned long)p->ainsn.insn; - unsigned long orig_ip = (unsigned long)p->addr; - - regs->flags &= ~X86_EFLAGS_TF; - - /* Fixup the contents of top of stack */ - if (p->ainsn.is_pushf) { - *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); - *tos |= kcb->kprobe_old_flags; - } else if (p->ainsn.is_call) { - *tos = orig_ip + (*tos - copy_ip); - } - - if (!p->ainsn.is_abs_ip) - regs->ip += orig_ip - copy_ip; - - restore_btf(); -} -NOKPROBE_SYMBOL(resume_execution); - -/* - * Interrupts are disabled on entry as trap1 is an interrupt gate and they - * remain disabled throughout this function. - */ -int kprobe_debug_handler(struct pt_regs *regs) -{ - struct kprobe *cur = kprobe_running(); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (!cur) - return 0; - - resume_execution(cur, regs, kcb); - regs->flags |= kcb->kprobe_saved_flags; - - if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - cur->post_handler(cur, regs, 0); - } - - /* Restore back the original saved kprobes variables and continue. */ - if (kcb->kprobe_status == KPROBE_REENTER) { - restore_previous_kprobe(kcb); - goto out; - } - reset_current_kprobe(); -out: - /* - * if somebody else is singlestepping across a probe point, flags - * will have TF set, in which case, continue the remaining processing - * of do_debug, as if this is not a probe hit. - */ - if (regs->flags & X86_EFLAGS_TF) - return 0; - - return 1; -} -NOKPROBE_SYMBOL(kprobe_debug_handler); - int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); @@ -912,20 +1091,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * normal page fault. */ regs->ip = (unsigned long)cur->addr; - /* - * Trap flag (TF) has been set here because this fault - * happened where the single stepping will be done. - * So clear it by resetting the current kprobe: - */ - regs->flags &= ~X86_EFLAGS_TF; - /* - * Since the single step (trap) has been cancelled, - * we need to restore BTF here. - */ - restore_btf(); /* - * If the TF flag was set before the kprobe hit, + * If the IF flag was set before the kprobe hit, * don't touch it: */ regs->flags |= kcb->kprobe_old_flags; diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 373e5fa3ce1f..596de2f6d3a5 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -12,7 +12,7 @@ #include "common.h" -/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +/* Ftrace callback handler for kprobes -- called under preempt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 08eb23074f92..71425ebba98a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -312,6 +312,8 @@ static int can_optimize(unsigned long paddr) addr = paddr - offset; while (addr < paddr - offset + size) { /* Decode until function end */ unsigned long recovered_insn; + int ret; + if (search_exception_tables(addr)) /* * Since some fixup code will jumps into this function, @@ -321,8 +323,11 @@ static int can_optimize(unsigned long paddr) recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; - kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); - insn_get_length(&insn); + + ret = insn_decode_kernel(&insn, (void *)recovered_insn); + if (ret < 0) + return 0; + /* * In the case of detecting unknown breakpoint, this could be * a padding INT3 between functions. Let's check that all the diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 5e78e01ca3b4..d307c22e5c18 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -451,6 +451,10 @@ static void __init sev_map_percpu_data(void) } } +#ifdef CONFIG_SMP + +static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); + static bool pv_tlb_flush_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && @@ -458,10 +462,6 @@ static bool pv_tlb_flush_supported(void) kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); } -static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); - -#ifdef CONFIG_SMP - static bool pv_ipi_supported(void) { return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); @@ -574,6 +574,54 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask) } } +static void kvm_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) +{ + u8 state; + int cpu; + struct kvm_steal_time *src; + struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); + + cpumask_copy(flushmask, cpumask); + /* + * We have to call flush only on online vCPUs. And + * queue flush_on_enter for pre-empted vCPUs + */ + for_each_cpu(cpu, flushmask) { + /* + * The local vCPU is never preempted, so we do not explicitly + * skip check for local vCPU - it will never be cleared from + * flushmask. + */ + src = &per_cpu(steal_time, cpu); + state = READ_ONCE(src->preempted); + if ((state & KVM_VCPU_PREEMPTED)) { + if (try_cmpxchg(&src->preempted, &state, + state | KVM_VCPU_FLUSH_TLB)) + __cpumask_clear_cpu(cpu, flushmask); + } + } + + native_flush_tlb_multi(flushmask, info); +} + +static __init int kvm_alloc_cpumask(void) +{ + int cpu; + + if (!kvm_para_available() || nopv) + return 0; + + if (pv_tlb_flush_supported() || pv_ipi_supported()) + for_each_possible_cpu(cpu) { + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + } + + return 0; +} +arch_initcall(kvm_alloc_cpumask); + static void __init kvm_smp_prepare_boot_cpu(void) { /* @@ -611,33 +659,8 @@ static int kvm_cpu_down_prepare(unsigned int cpu) local_irq_enable(); return 0; } -#endif -static void kvm_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) -{ - u8 state; - int cpu; - struct kvm_steal_time *src; - struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); - - cpumask_copy(flushmask, cpumask); - /* - * We have to call flush only on online vCPUs. And - * queue flush_on_enter for pre-empted vCPUs - */ - for_each_cpu(cpu, flushmask) { - src = &per_cpu(steal_time, cpu); - state = READ_ONCE(src->preempted); - if ((state & KVM_VCPU_PREEMPTED)) { - if (try_cmpxchg(&src->preempted, &state, - state | KVM_VCPU_FLUSH_TLB)) - __cpumask_clear_cpu(cpu, flushmask); - } - } - - native_flush_tlb_others(flushmask, info); -} +#endif static void __init kvm_guest_init(void) { @@ -650,13 +673,7 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; - pv_ops.time.steal_clock = kvm_steal_clock; - } - - if (pv_tlb_flush_supported()) { - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; - pv_ops.mmu.tlb_remove_table = tlb_remove_table; - pr_info("KVM setup pv remote TLB flush\n"); + static_call_update(pv_steal_clock, kvm_steal_clock); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -668,6 +685,12 @@ static void __init kvm_guest_init(void) } #ifdef CONFIG_SMP + if (pv_tlb_flush_supported()) { + pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; + pv_ops.mmu.tlb_remove_table = tlb_remove_table; + pr_info("KVM setup pv remote TLB flush\n"); + } + smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; if (pv_sched_yield_supported()) { smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; @@ -734,7 +757,7 @@ static uint32_t __init kvm_detect(void) static void __init kvm_apic_init(void) { -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP if (pv_ipi_supported()) kvm_setup_pv_ipi(); #endif @@ -794,32 +817,6 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); -static __init int kvm_alloc_cpumask(void) -{ - int cpu; - bool alloc = false; - - if (!kvm_para_available() || nopv) - return 0; - - if (pv_tlb_flush_supported()) - alloc = true; - -#if defined(CONFIG_SMP) - if (pv_ipi_supported()) - alloc = true; -#endif - - if (alloc) - for_each_possible_cpu(cpu) { - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), - GFP_KERNEL, cpu_to_node(cpu)); - } - - return 0; -} -arch_initcall(kvm_alloc_cpumask); - #ifdef CONFIG_PARAVIRT_SPINLOCKS /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ @@ -836,28 +833,25 @@ static void kvm_kick_cpu(int cpu) static void kvm_wait(u8 *ptr, u8 val) { - unsigned long flags; - if (in_nmi()) return; - local_irq_save(flags); - - if (READ_ONCE(*ptr) != val) - goto out; - /* * halt until it's our turn and kicked. Note that we do safe halt * for irq enabled case to avoid hang when lock info is overwritten * in irq spinlock slowpath and no spurious interrupt occur to save us. */ - if (arch_irqs_disabled_flags(flags)) - halt(); - else - safe_halt(); + if (irqs_disabled()) { + if (READ_ONCE(*ptr) == val) + halt(); + } else { + local_irq_disable(); -out: - local_irq_restore(flags); + if (READ_ONCE(*ptr) == val) + safe_halt(); + + local_irq_enable(); + } } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1fc0962c89c0..d37ed4e1d033 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -106,7 +106,7 @@ static inline void kvm_sched_clock_init(bool stable) if (!stable) clear_sched_clock_stable(); kvm_sched_clock_offset = kvm_clock_read(); - pv_ops.time.sched_clock = kvm_sched_clock_read; + paravirt_set_sched_clock(kvm_sched_clock_read); pr_info("kvm-clock: using sched offset of %llu cycles", kvm_sched_clock_offset); diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index a29a44a98e5b..c078b0d3ab0e 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -260,7 +260,7 @@ static void set_idt(void *newidt, u16 limit) { struct desc_ptr curidt; - /* x86-64 supports unaliged loads & stores */ + /* x86-64 supports unaligned loads & stores */ curidt.size = limit; curidt.address = (unsigned long)newidt; @@ -402,8 +402,8 @@ void machine_kexec(struct kimage *image) #ifdef CONFIG_KEXEC_FILE void *arch_kexec_kernel_image_load(struct kimage *image) { - vfree(image->arch.elf_headers); - image->arch.elf_headers = NULL; + vfree(image->elf_headers); + image->elf_headers = NULL; if (!image->fops || !image->fops->load) return ERR_PTR(-ENOEXEC); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index bf250a339655..2ef961cf4cfc 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -524,6 +524,16 @@ nmi_restart: mds_user_clear_cpu_buffers(); } +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL) +DEFINE_IDTENTRY_RAW(exc_nmi_noist) +{ + exc_nmi(regs); +} +#endif +#if IS_MODULE(CONFIG_KVM_INTEL) +EXPORT_SYMBOL_GPL(asm_exc_nmi_noist); +#endif + void stop_nmi(void) { ignore_nmis++; diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 4f75d0cf6305..9e1ea99ad9df 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void) return pv_ops.lock.vcpu_is_preempted.func == __raw_callee_save___native_vcpu_is_preempted; } + +void __init paravirt_set_cap(void) +{ + if (!pv_is_native_spin_unlock()) + setup_force_cpu_cap(X86_FEATURE_PVUNLOCK); + + if (!pv_is_native_vcpu_is_preempted()) + setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); +} diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index c60222ab8ab9..04cafc057bed 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -14,6 +14,7 @@ #include <linux/highmem.h> #include <linux/kprobes.h> #include <linux/pgtable.h> +#include <linux/static_call.h> #include <asm/bug.h> #include <asm/paravirt.h> @@ -52,7 +53,10 @@ void __init default_banner(void) } /* Undefined instruction for dealing with missing ops pointers. */ -static const unsigned char ud2a[] = { 0x0f, 0x0b }; +static void paravirt_BUG(void) +{ + BUG(); +} struct branch { unsigned char opcode; @@ -85,25 +89,6 @@ u64 notrace _paravirt_ident_64(u64 x) { return x; } - -static unsigned paravirt_patch_jmp(void *insn_buff, const void *target, - unsigned long addr, unsigned len) -{ - struct branch *b = insn_buff; - unsigned long delta = (unsigned long)target - (addr+5); - - if (len < 5) { -#ifdef CONFIG_RETPOLINE - WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); -#endif - return len; /* call too long for patch site */ - } - - b->opcode = 0xe9; /* jmp */ - b->delta = delta; - - return 5; -} #endif DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); @@ -114,8 +99,8 @@ void __init native_pv_lock_init(void) static_branch_disable(&virt_spin_lock_key); } -unsigned paravirt_patch_default(u8 type, void *insn_buff, - unsigned long addr, unsigned len) +unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, + unsigned int len) { /* * Neat trick to map patch type back to the call within the @@ -125,20 +110,10 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned ret; if (opfunc == NULL) - /* If there's no function, patch it with a ud2a (BUG) */ - ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a)); + /* If there's no function, patch it with paravirt_BUG() */ + ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len); else if (opfunc == _paravirt_nop) ret = 0; - -#ifdef CONFIG_PARAVIRT_XXL - /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_64) - ret = paravirt_patch_ident_64(insn_buff, len); - - else if (type == PARAVIRT_PATCH(cpu.iret)) - /* If operation requires a jmp, then jmp */ - ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len); -#endif else /* Otherwise call the function. */ ret = paravirt_patch_call(insn_buff, opfunc, addr, len); @@ -146,19 +121,6 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff, return ret; } -unsigned paravirt_patch_insns(void *insn_buff, unsigned len, - const char *start, const char *end) -{ - unsigned insn_len = end - start; - - /* Alternative instruction is too large for the patch site and we cannot continue: */ - BUG_ON(insn_len > len || start == NULL); - - memcpy(insn_buff, start, insn_len); - - return insn_len; -} - struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; @@ -167,6 +129,14 @@ static u64 native_steal_clock(int cpu) return 0; } +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); +DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); + +void paravirt_set_sched_clock(u64 (*func)(void)) +{ + static_call_update(pv_sched_clock, func); +} + /* These are in entry.S */ extern void native_iret(void); @@ -269,13 +239,6 @@ struct pv_info pv_info = { #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) struct paravirt_patch_template pv_ops = { - /* Init ops. */ - .init.patch = native_patch, - - /* Time ops. */ - .time.sched_clock = native_sched_clock, - .time.steal_clock = native_steal_clock, - /* Cpu ops. */ .cpu.io_delay = native_io_delay, @@ -308,8 +271,6 @@ struct paravirt_patch_template pv_ops = { .cpu.load_sp0 = native_load_sp0, - .cpu.iret = native_iret, - #ifdef CONFIG_X86_IOPL_IOPERM .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, .cpu.update_io_bitmap = native_tss_update_io_bitmap, @@ -330,7 +291,7 @@ struct paravirt_patch_template pv_ops = { .mmu.flush_tlb_user = native_flush_tlb_local, .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, - .mmu.flush_tlb_others = native_flush_tlb_others, + .mmu.flush_tlb_multi = native_flush_tlb_multi, .mmu.tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page, @@ -414,6 +375,8 @@ struct paravirt_patch_template pv_ops = { NOKPROBE_SYMBOL(native_get_debugreg); NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); + +void (*paravirt_iret)(void) = native_iret; #endif EXPORT_SYMBOL(pv_ops); diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c deleted file mode 100644 index abd27ec67397..000000000000 --- a/arch/x86/kernel/paravirt_patch.c +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/stringify.h> - -#include <asm/paravirt.h> -#include <asm/asm-offsets.h> - -#define PSTART(d, m) \ - patch_data_##d.m - -#define PEND(d, m) \ - (PSTART(d, m) + sizeof(patch_data_##d.m)) - -#define PATCH(d, m, insn_buff, len) \ - paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m)) - -#define PATCH_CASE(ops, m, data, insn_buff, len) \ - case PARAVIRT_PATCH(ops.m): \ - return PATCH(data, ops##_##m, insn_buff, len) - -#ifdef CONFIG_PARAVIRT_XXL -struct patch_xxl { - const unsigned char irq_irq_disable[1]; - const unsigned char irq_irq_enable[1]; - const unsigned char irq_save_fl[2]; - const unsigned char mmu_read_cr2[3]; - const unsigned char mmu_read_cr3[3]; - const unsigned char mmu_write_cr3[3]; - const unsigned char cpu_wbinvd[2]; - const unsigned char mov64[3]; -}; - -static const struct patch_xxl patch_data_xxl = { - .irq_irq_disable = { 0xfa }, // cli - .irq_irq_enable = { 0xfb }, // sti - .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax - .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax - .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax - .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 - .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd - .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax -}; - -unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len) -{ - return PATCH(xxl, mov64, insn_buff, len); -} -# endif /* CONFIG_PARAVIRT_XXL */ - -#ifdef CONFIG_PARAVIRT_SPINLOCKS -struct patch_lock { - unsigned char queued_spin_unlock[3]; - unsigned char vcpu_is_preempted[2]; -}; - -static const struct patch_lock patch_data_lock = { - .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax - -# ifdef CONFIG_X86_64 - .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi) -# else - .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax) -# endif -}; -#endif /* CONFIG_PARAVIRT_SPINLOCKS */ - -unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr, - unsigned int len) -{ - switch (type) { - -#ifdef CONFIG_PARAVIRT_XXL - PATCH_CASE(irq, save_fl, xxl, insn_buff, len); - PATCH_CASE(irq, irq_enable, xxl, insn_buff, len); - PATCH_CASE(irq, irq_disable, xxl, insn_buff, len); - - PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len); - PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len); - PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len); - - PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len); -#endif - -#ifdef CONFIG_PARAVIRT_SPINLOCKS - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) - return PATCH(lock, queued_spin_unlock, insn_buff, len); - break; - - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) - return PATCH(lock, vcpu_is_preempted, insn_buff, len); - break; -#endif - default: - break; - } - - return paravirt_patch_default(type, insn_buff, addr, len); -} diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9c214d7085a4..5e1f38179f49 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -63,14 +63,9 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { */ .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, - /* - * .sp1 is cpu_current_top_of_stack. The init task never - * runs user code, but cpu_current_top_of_stack should still - * be well defined before the first context switch. - */ +#ifdef CONFIG_X86_32 .sp1 = TOP_OF_INIT_STACK, -#ifdef CONFIG_X86_32 .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, #endif @@ -161,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, #endif /* Kernel thread ? */ - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); kthread_frame_init(frame, sp, arg); return 0; @@ -177,6 +172,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, task_user_gs(p) = get_user_gs(current_pt_regs()); #endif + if (unlikely(p->flags & PF_IO_WORKER)) { + /* + * An IO thread is a user space thread, but it doesn't + * return to ret_after_fork(). + * + * In order to indicate that to tools like gdb, + * we reset the stack and instruction pointers. + * + * It does the same kernel frame setup to return to a kernel + * function that a kernel thread does. + */ + childregs->sp = 0; + childregs->ip = 0; + kthread_frame_init(frame, sp, arg); + return 0; + } + /* Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) ret = set_new_tls(p, tls); @@ -451,7 +463,7 @@ void speculative_store_bypass_ht_init(void) * First HT sibling to come up on the core. Link shared state of * the first HT sibling to itself. The siblings on the same core * which come up later will see the shared state pointer and link - * themself to the state of this CPU. + * themselves to the state of this CPU. */ st->shared_state = st; } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 11065dc03f5b..eda37df016f0 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -89,7 +89,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) /* * Assumption here is that last_value, a global accumulator, always goes * forward. If we are less than that, we should not be much smaller. - * We assume there is an error marging we're inside, and then the correction + * We assume there is an error margin we're inside, and then the correction * does not sacrifice accuracy. * * For reads: global may have changed between test and return, diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 94b33885f8d2..f469153eca8a 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) * - Write protect disabled * - No task switch * - Don't do FP software emulation. - * - Proctected mode enabled + * - Protected mode enabled */ movl %cr0, %eax andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index a4d9a261425b..c53271aebb64 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) * - Write protect disabled * - No task switch * - Don't do FP software emulation. - * - Proctected mode enabled + * - Protected mode enabled */ movq %cr0, %rax andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d883176ef2ce..72920af0b3c0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -65,7 +65,7 @@ RESERVE_BRK(dmi_alloc, 65536); /* * Range of the BSS area. The size of the BSS area is determined - * at link time, with RESERVE_BRK*() facility reserving additional + * at link time, with RESERVE_BRK() facility reserving additional * chunks. */ unsigned long _brk_start = (unsigned long)__brk_base; @@ -633,11 +633,16 @@ static void __init trim_snb_memory(void) printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); /* - * Reserve all memory below the 1 MB mark that has not - * already been reserved. + * SandyBridge integrated graphics devices have a bug that prevents + * them from accessing certain memory ranges, namely anything below + * 1M and in the pages listed in bad_pages[] above. + * + * To avoid these pages being ever accessed by SNB gfx devices + * reserve all memory below the 1 MB mark and bad_pages that have + * not already been reserved at boot time. */ memblock_reserve(0, 1<<20); - + for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { if (memblock_reserve(bad_pages[i], PAGE_SIZE)) printk(KERN_WARNING "failed to reserve 0x%08lx\n", @@ -645,18 +650,6 @@ static void __init trim_snb_memory(void) } } -/* - * Here we put platform-specific memory range workarounds, i.e. - * memory known to be corrupt or otherwise in need to be reserved on - * specific platforms. - * - * If this gets used more widely it could use a real dispatch mechanism. - */ -static void __init trim_platform_memory_ranges(void) -{ - trim_snb_memory(); -} - static void __init trim_bios_range(void) { /* @@ -725,11 +718,41 @@ static int __init parse_reservelow(char *p) early_param("reservelow", parse_reservelow); -static void __init trim_low_memory_range(void) +static void __init early_reserve_memory(void) { + /* + * Reserve the memory occupied by the kernel between _text and + * __end_of_kernel_reserve symbols. Any kernel sections after the + * __end_of_kernel_reserve symbol must be explicitly reserved with a + * separate memblock_reserve() or they will be discarded. + */ + memblock_reserve(__pa_symbol(_text), + (unsigned long)__end_of_kernel_reserve - (unsigned long)_text); + + /* + * The first 4Kb of memory is a BIOS owned area, but generally it is + * not listed as such in the E820 table. + * + * Reserve the first memory page and typically some additional + * memory (64KiB by default) since some BIOSes are known to corrupt + * low memory. See the Kconfig help text for X86_RESERVE_LOW. + * + * In addition, make sure page 0 is always reserved because on + * systems with L1TF its contents can be leaked to user processes. + */ memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); + + early_reserve_initrd(); + + if (efi_enabled(EFI_BOOT)) + efi_memblock_x86_reserve_range(); + + memblock_x86_reserve_range_setup_data(); + + reserve_ibft_region(); + reserve_bios_regions(); } - + /* * Dump out kernel offset information on panic. */ @@ -764,29 +787,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) void __init setup_arch(char **cmdline_p) { - /* - * Reserve the memory occupied by the kernel between _text and - * __end_of_kernel_reserve symbols. Any kernel sections after the - * __end_of_kernel_reserve symbol must be explicitly reserved with a - * separate memblock_reserve() or they will be discarded. - */ - memblock_reserve(__pa_symbol(_text), - (unsigned long)__end_of_kernel_reserve - (unsigned long)_text); - - /* - * Make sure page 0 is always reserved because on systems with - * L1TF its contents can be leaked to user processes. - */ - memblock_reserve(0, PAGE_SIZE); - - early_reserve_initrd(); - - /* - * At this point everything still needed from the boot loader - * or BIOS or kernel text should be early reserved or marked not - * RAM in e820. All other memory is free game. - */ - #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); @@ -822,7 +822,6 @@ void __init setup_arch(char **cmdline_p) idt_setup_early_traps(); early_cpu_init(); - arch_init_ideal_nops(); jump_label_init(); static_call_init(); early_ioremap_init(); @@ -910,8 +909,18 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); - if (efi_enabled(EFI_BOOT)) - efi_memblock_x86_reserve_range(); + /* + * Do some memory reservations *before* memory is added to + * memblock, so memblock allocations won't overwrite it. + * Do it after early param, so we could get (unlikely) panic from + * serial. + * + * After this point everything still needed from the boot loader or + * firmware or kernel text should be early reserved or marked not + * RAM in e820. All other memory is free game. + */ + early_reserve_memory(); + #ifdef CONFIG_MEMORY_HOTPLUG /* * Memory used by the kernel cannot be hot-removed because Linux @@ -938,9 +947,6 @@ void __init setup_arch(char **cmdline_p) x86_report_nx(); - /* after early param, so could get panic from serial */ - memblock_x86_reserve_range_setup_data(); - if (acpi_mps_check()) { #ifdef CONFIG_X86_LOCAL_APIC disable_apic = 1; @@ -1032,14 +1038,12 @@ void __init setup_arch(char **cmdline_p) */ find_smp_config(); - reserve_ibft_region(); - early_alloc_pgt_buf(); /* * Need to conclude brk, before e820__memblock_setup() - * it could use memblock_find_in_range, could overlap with - * brk area. + * it could use memblock_find_in_range, could overlap with + * brk area. */ reserve_brk(); @@ -1054,8 +1058,6 @@ void __init setup_arch(char **cmdline_p) */ sev_setup_arch(); - reserve_bios_regions(); - efi_fake_memmap(); efi_find_mirror(); efi_esrt_init(); @@ -1081,8 +1083,12 @@ void __init setup_arch(char **cmdline_p) reserve_real_mode(); - trim_platform_memory_ranges(); - trim_low_memory_range(); + /* + * Reserving memory causing GPU hangs on Sandy Bridge integrated + * graphics devices should be done after we allocated memory under + * 1M for the real mode trampoline. + */ + trim_snb_memory(); init_mem_mapping(); @@ -1129,6 +1135,8 @@ void __init setup_arch(char **cmdline_p) reserve_initrd(); acpi_table_upgrade(); + /* Look for ACPI tables and reserve memory occupied by them. */ + acpi_boot_table_init(); vsmp_init(); @@ -1136,11 +1144,6 @@ void __init setup_arch(char **cmdline_p) early_platform_quirks(); - /* - * Parse the ACPI tables for possible boot-time SMP configuration. - */ - acpi_boot_table_init(); - early_acpi_boot_init(); initmem_init(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index fd945ce78554..0941d2f44f2a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -224,7 +224,6 @@ void __init setup_per_cpu_areas(void) per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); - setup_stack_canary_segment(cpu); /* * Copy data used in early init routines from the * initial arrays to the per cpu data areas. These diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c index cdc04d091242..0aa9f13efd57 100644 --- a/arch/x86/kernel/sev-es-shared.c +++ b/arch/x86/kernel/sev-es-shared.c @@ -24,7 +24,7 @@ static bool __init sev_es_check_cpu_features(void) return true; } -static void sev_es_terminate(unsigned int reason) +static void __noreturn sev_es_terminate(unsigned int reason) { u64 val = GHCB_SEV_TERMINATE; @@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) * make it accessible to the hypervisor. * * In particular, check for: - * - Hypervisor CPUID bit * - Availability of CPUID leaf 0x8000001f * - SEV CPUID bit. * @@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) * can't be checked here. */ - if ((fn == 1 && !(regs->cx & BIT(31)))) - /* Hypervisor bit */ - goto fail; - else if (fn == 0x80000000 && (regs->ax < 0x8000001f)) + if (fn == 0x80000000 && (regs->ax < 0x8000001f)) /* SEV leaf check */ goto fail; else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) @@ -210,12 +206,8 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) return; fail: - sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE); - VMGEXIT(); - - /* Shouldn't get here - if we do halt the machine */ - while (true) - asm volatile("hlt\n"); + /* Terminate the guest */ + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); } static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index 04a780abb512..73873b007838 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -137,29 +137,41 @@ static __always_inline bool on_vc_stack(struct pt_regs *regs) } /* - * This function handles the case when an NMI is raised in the #VC exception - * handler entry code. In this case, the IST entry for #VC must be adjusted, so - * that any subsequent #VC exception will not overwrite the stack contents of the - * interrupted #VC handler. + * This function handles the case when an NMI is raised in the #VC + * exception handler entry code, before the #VC handler has switched off + * its IST stack. In this case, the IST entry for #VC must be adjusted, + * so that any nested #VC exception will not overwrite the stack + * contents of the interrupted #VC handler. * * The IST entry is adjusted unconditionally so that it can be also be - * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested - * sev_es_ist_exit() call may adjust back the IST entry too early. + * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a + * nested sev_es_ist_exit() call may adjust back the IST entry too + * early. + * + * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run + * on the NMI IST stack, as they are only called from NMI handling code + * right now. */ void noinstr __sev_es_ist_enter(struct pt_regs *regs) { unsigned long old_ist, new_ist; /* Read old IST entry */ - old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); - /* Make room on the IST stack */ + /* + * If NMI happened while on the #VC IST stack, set the new IST + * value below regs->sp, so that the interrupted stack frame is + * not overwritten by subsequent #VC exceptions. + */ if (on_vc_stack(regs)) - new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); - else - new_ist = old_ist - sizeof(old_ist); + new_ist = regs->sp; - /* Store old IST entry */ + /* + * Reserve additional 8 bytes and store old IST value so this + * adjustment can be unrolled in __sev_es_ist_exit(). + */ + new_ist -= sizeof(old_ist); *(unsigned long *)new_ist = old_ist; /* Set new IST entry */ @@ -251,39 +263,54 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); } -static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) { char buffer[MAX_INSN_SIZE]; - enum es_result ret; int res; - if (user_mode(ctxt->regs)) { - res = insn_fetch_from_user_inatomic(ctxt->regs, buffer); - if (!res) { - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; - ctxt->fi.cr2 = ctxt->regs->ip; - return ES_EXCEPTION; - } + res = insn_fetch_from_user_inatomic(ctxt->regs, buffer); + if (!res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } - if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res)) - return ES_DECODE_FAILED; - } else { - res = vc_fetch_insn_kernel(ctxt, buffer); - if (res) { - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = X86_PF_INSTR; - ctxt->fi.cr2 = ctxt->regs->ip; - return ES_EXCEPTION; - } + if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res)) + return ES_DECODE_FAILED; - insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1); - insn_get_length(&ctxt->insn); + if (ctxt->insn.immediate.got) + return ES_OK; + else + return ES_DECODE_FAILED; +} + +static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int res, ret; + + res = vc_fetch_insn_kernel(ctxt, buffer); + if (res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; } - ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED; + ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); + if (ret < 0) + return ES_DECODE_FAILED; + else + return ES_OK; +} - return ret; +static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + if (user_mode(ctxt->regs)) + return __vc_decode_user_insn(ctxt); + else + return __vc_decode_kern_insn(ctxt); } static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index ea794a083c44..a06cb107c0e8 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -492,7 +492,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, * SS descriptor, but we do need SS to be valid. It's possible * that the old SS is entirely bogus -- this can happen if the * signal we're trying to deliver is #GP or #SS caused by a bad - * SS value. We also have a compatbility issue here: DOSEMU + * SS value. We also have a compatibility issue here: DOSEMU * relies on the contents of the SS register indicating the * SS value at the time of the signal, even though that code in * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU @@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) { - /* - * This function is fundamentally broken as currently - * implemented. - * - * The idea is that we want to trigger a call to the - * restart_block() syscall and that we want in_ia32_syscall(), - * in_x32_syscall(), etc. to match whatever they were in the - * syscall being restarted. We assume that the syscall - * instruction at (regs->ip - 2) matches whatever syscall - * instruction we used to enter in the first place. - * - * The problem is that we can get here when ptrace pokes - * syscall-like values into regs even if we're not in a syscall - * at all. - * - * For now, we maintain historical behavior and guess based on - * stored state. We could do better by saving the actual - * syscall arch in restart_block or (with caveats on x32) by - * checking if regs->ip points to 'int $0x80'. The current - * behavior is incorrect if a tracer has a different bitness - * than the tracee. - */ #ifdef CONFIG_IA32_EMULATION - if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (current->restart_block.arch_data & TS_COMPAT) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index a5330ff498f0..0e5d0a7e203b 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -29,7 +29,7 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(NSIGFPE != 15); BUILD_BUG_ON(NSIGSEGV != 9); BUILD_BUG_ON(NSIGBUS != 5); - BUILD_BUG_ON(NSIGTRAP != 5); + BUILD_BUG_ON(NSIGTRAP != 6); BUILD_BUG_ON(NSIGCHLD != 6); BUILD_BUG_ON(NSIGSYS != 2); @@ -138,6 +138,9 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20); BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14); + BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10); + CHECK_CSI_OFFSET(_sigpoll); CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index eff4ce3b10da..06db901fabe8 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -67,7 +67,7 @@ * 5AP. symmetric IO mode (normal Linux operation) not affected. * 'noapic' mode has vector 0xf filled out properly. * 6AP. 'noapic' mode might be affected - fixed in later steppings - * 7AP. We do not assume writes to the LVT deassering IRQs + * 7AP. We do not assume writes to the LVT deasserting IRQs * 8AP. We do not enable low power mode (deep sleep) during MP bootup * 9AP. We do not use mixed mode * @@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait) } /* * Don't wait longer than 10 ms if the caller didn't - * reqeust it. If wait is true, the machine hangs here if + * request it. If wait is true, the machine hangs here if * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 02813a7f3a7c..0ad5214f598a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) return false; } +static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) +{ + if (c->phys_proc_id == o->phys_proc_id && + c->cpu_die_id == o->cpu_die_id) + return true; + return false; +} + +/* + * Unlike the other levels, we do not enforce keeping a + * multicore group inside a NUMA node. If this happens, we will + * discard the MC level of the topology later. + */ +static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) +{ + if (c->phys_proc_id == o->phys_proc_id) + return true; + return false; +} + /* - * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. + * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs. * - * These are Intel CPUs that enumerate an LLC that is shared by - * multiple NUMA nodes. The LLC on these systems is shared for - * off-package data access but private to the NUMA node (half - * of the package) for on-package access. + * Any Intel CPU that has multiple nodes per package and does not + * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology. * - * CPUID (the source of the information about the LLC) can only - * enumerate the cache as being shared *or* unshared, but not - * this particular configuration. The CPU in this case enumerates - * the cache to be shared across the entire package (spanning both - * NUMA nodes). + * When in SNC mode, these CPUs enumerate an LLC that is shared + * by multiple NUMA nodes. The LLC is shared for off-package data + * access but private to the NUMA node (half of the package) for + * on-package access. CPUID (the source of the information about + * the LLC) can only enumerate the cache as shared or unshared, + * but not this particular configuration. */ -static const struct x86_cpu_id snc_cpu[] = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), +static const struct x86_cpu_id intel_cod_cpu[] = { + X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */ + X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */ + X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */ {} }; static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) { + const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu); int cpu1 = c->cpu_index, cpu2 = o->cpu_index; + bool intel_snc = id && id->driver_data; /* Do not match if we do not have a valid APICID for cpu: */ if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) @@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) * means 'c' does not share the LLC of 'o'. This will be * reflected to userspace. */ - if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) + if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc) return false; return topology_sane(c, o, "llc"); } -/* - * Unlike the other levels, we do not enforce keeping a - * multicore group inside a NUMA node. If this happens, we will - * discard the MC level of the topology later. - */ -static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) -{ - if (c->phys_proc_id == o->phys_proc_id) - return true; - return false; -} - -static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) -{ - if ((c->phys_proc_id == o->phys_proc_id) && - (c->cpu_die_id == o->cpu_die_id)) - return true; - return false; -} - #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) static inline int x86_sched_itmt_flags(void) @@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu) for_each_cpu(i, cpu_sibling_setup_mask) { o = &cpu_data(i); + if (match_pkg(c, o) && !topology_same_node(c, o)) + x86_has_numa_in_package = true; + if ((i == cpu) || (has_smt && match_smt(c, o))) link_mask(topology_sibling_cpumask, cpu, i); if ((i == cpu) || (has_mp && match_llc(c, o))) link_mask(cpu_llc_shared_mask, cpu, i); + if ((i == cpu) || (has_mp && match_die(c, o))) + link_mask(topology_die_cpumask, cpu, i); } + threads = cpumask_weight(topology_sibling_cpumask(cpu)); + if (threads > __max_smt_threads) + __max_smt_threads = threads; + /* * This needs a separate iteration over the cpus because we rely on all * topology_sibling_cpumask links to be set-up. @@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu) /* * Does this new cpu bringup a new core? */ - if (cpumask_weight( - topology_sibling_cpumask(cpu)) == 1) { + if (threads == 1) { /* * for each core in package, increment * the booted_cores for this new cpu @@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu) } else if (i != cpu && !c->booted_cores) c->booted_cores = cpu_data(i).booted_cores; } - if (match_pkg(c, o) && !topology_same_node(c, o)) - x86_has_numa_in_package = true; - - if ((i == cpu) || (has_mp && match_die(c, o))) - link_mask(topology_die_cpumask, cpu, i); } - - threads = cpumask_weight(topology_sibling_cpumask(cpu)); - if (threads > __max_smt_threads) - __max_smt_threads = threads; } /* maps the cpu to the sched domain representing multi-core */ @@ -1407,7 +1409,7 @@ void __init calculate_max_logical_packages(void) int ncpus; /* - * Today neither Intel nor AMD support heterogenous systems so + * Today neither Intel nor AMD support heterogeneous systems so * extrapolate the boot cpu's data to all packages. */ ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); @@ -1659,13 +1661,17 @@ void play_dead_common(void) local_irq_disable(); } -static bool wakeup_cpu0(void) +/** + * cond_wakeup_cpu0 - Wake up CPU0 if needed. + * + * If NMI wants to wake up CPU0, start CPU0. + */ +void cond_wakeup_cpu0(void) { if (smp_processor_id() == 0 && enable_start_cpu0) - return true; - - return false; + start_cpu0(); } +EXPORT_SYMBOL_GPL(cond_wakeup_cpu0); /* * We need to flush the caches before going to sleep, lest we have @@ -1734,11 +1740,8 @@ static inline void mwait_play_dead(void) __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); - /* - * If NMI wants to wake up CPU0, start CPU0. - */ - if (wakeup_cpu0()) - start_cpu0(); + + cond_wakeup_cpu0(); } } @@ -1749,11 +1752,8 @@ void hlt_play_dead(void) while (1) { native_halt(); - /* - * If NMI wants to wake up CPU0, start CPU0. - */ - if (wakeup_cpu0()) - start_cpu0(); + + cond_wakeup_cpu0(); } } @@ -1865,9 +1865,6 @@ static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) return true; } -#include <asm/cpu_device_id.h> -#include <asm/intel-family.h> - #define X86_MATCH(model) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL) diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8627fda8d993..15b058eefc4e 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -29,12 +29,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, } } -/* - * This function returns an error if it detects any unreliable features of the - * stack. Otherwise it guarantees that the stack trace is reliable. - * - * If the task is not 'current', the caller *must* ensure the task is inactive. - */ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task) { diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index 9442c4136c38..ea028e736831 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -34,7 +34,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void break; case NOP: - code = ideal_nops[NOP_ATOMIC5]; + code = x86_nops[5]; break; case JMP: @@ -66,7 +66,7 @@ static void __static_call_validate(void *insn, bool tail) return; } else { if (opcode == CALL_INSN_OPCODE || - !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5) || + !memcmp(insn, x86_nops[5], 5) || !memcmp(insn, xor5rax, 5)) return; } diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 653b7f617b61..8a56a6d80098 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -10,7 +10,7 @@ * EFI Quirks * Several EFI systems do not correctly advertise their boot framebuffers. * Hence, we use this static table of known broken machines and fix up the - * information so framebuffer drivers can load corectly. + * information so framebuffer drivers can load correctly. */ #include <linux/dmi.h> diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 4c09ba110204..f9af561c3cd4 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -49,6 +49,30 @@ bool tboot_enabled(void) return tboot != NULL; } +/* noinline to prevent gcc from warning about dereferencing constant fixaddr */ +static noinline __init bool check_tboot_version(void) +{ + if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) { + pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr); + return false; + } + + if (tboot->version < 5) { + pr_warn("tboot version is invalid: %u\n", tboot->version); + return false; + } + + pr_info("found shared page at phys addr 0x%llx:\n", + boot_params.tboot_addr); + pr_debug("version: %d\n", tboot->version); + pr_debug("log_addr: 0x%08x\n", tboot->log_addr); + pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); + pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); + pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); + + return true; +} + void __init tboot_probe(void) { /* Look for valid page-aligned address for shared page. */ @@ -66,25 +90,9 @@ void __init tboot_probe(void) /* Map and check for tboot UUID. */ set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr); - tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE); - if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) { - pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr); + tboot = (void *)fix_to_virt(FIX_TBOOT_BASE); + if (!check_tboot_version()) tboot = NULL; - return; - } - if (tboot->version < 5) { - pr_warn("tboot version is invalid: %u\n", tboot->version); - tboot = NULL; - return; - } - - pr_info("found shared page at phys addr 0x%llx:\n", - boot_params.tboot_addr); - pr_debug("version: %d\n", tboot->version); - pr_debug("log_addr: 0x%08x\n", tboot->log_addr); - pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); - pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); - pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); } static pgd_t *tboot_pg_dir; diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 64a496a0687f..3c883e064242 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -164,17 +164,11 @@ int do_set_thread_area(struct task_struct *p, int idx, savesegment(fs, sel); if (sel == modified_sel) loadsegment(fs, sel); - - savesegment(gs, sel); - if (sel == modified_sel) - load_gs_index(sel); #endif -#ifdef CONFIG_X86_32_LAZY_GS savesegment(gs, sel); if (sel == modified_sel) - loadsegment(gs, sel); -#endif + load_gs_index(sel); } else { #ifdef CONFIG_X86_64 if (p->thread.fsindex == modified_sel) diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index f5477eab5692..bd83748e2bde 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -113,7 +113,7 @@ int arch_register_cpu(int num) * Two known BSP/CPU0 dependencies: Resume from suspend/hibernate * depends on BSP. PIC interrupts depend on BSP. * - * If the BSP depencies are under control, one can tell kernel to + * If the BSP dependencies are under control, one can tell kernel to * enable BSP hotplug. This basically adds a control file and * one can attempt to offline BSP. */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ac1874a2a70e..853ea7a80806 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -395,7 +395,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault) /* * Adjust our frame so that we return straight to the #GP * vector with the expected RSP value. This is safe because - * we won't enable interupts or schedule before we invoke + * we won't enable interrupts or schedule before we invoke * general_protection, so nothing will clobber the stack * frame we just set up. * @@ -498,14 +498,15 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, { u8 insn_buf[MAX_INSN_SIZE]; struct insn insn; + int ret; if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) return GP_NO_HINT; - kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); - insn_get_modrm(&insn); - insn_get_sib(&insn); + ret = insn_decode_kernel(&insn, insn_buf); + if (ret < 0) + return GP_NO_HINT; *addr = (unsigned long)insn_get_addr_ref(&insn, regs); if (*addr == -1UL) @@ -556,7 +557,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) tsk->thread.trap_nr = X86_TRAP_GP; if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0)) - return; + goto exit; show_signal(tsk, SIGSEGV, "", desc, regs, error_code); force_sig(SIGSEGV); @@ -889,9 +890,6 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs)) dr6 &= ~DR_STEP; - if (kprobe_debug_handler(regs)) - goto out; - /* * The kernel doesn't use INT1 */ @@ -978,6 +976,10 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, goto out_irq; } + /* #DB for bus lock can only be triggered from userspace. */ + if (dr6 & DR_BUS_LOCK) + handle_bus_lock(regs); + /* Add the virtual_dr6 bits for signals. */ dr6 |= current->thread.virtual_dr6; if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp) @@ -1057,7 +1059,7 @@ static void math_error(struct pt_regs *regs, int trapnr) goto exit; if (fixup_vdso_exception(regs, trapnr, 0, 0)) - return; + goto exit; force_sig_fault(SIGFPE, si_code, (void __user *)uprobe_get_trap_addr(regs)); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index f70dffc2771f..57ec01192180 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -14,6 +14,7 @@ #include <linux/percpu.h> #include <linux/timex.h> #include <linux/static_key.h> +#include <linux/static_call.h> #include <asm/hpet.h> #include <asm/timer.h> @@ -254,7 +255,7 @@ unsigned long long sched_clock(void) bool using_native_sched_clock(void) { - return pv_ops.time.sched_clock == native_sched_clock; + return static_call_query(pv_sched_clock) == native_sched_clock; } #else unsigned long long @@ -739,7 +740,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void) * 2) Reference counter. If available we use the HPET or the * PMTIMER as a reference to check the sanity of that value. * We use separate TSC readouts and check inside of the - * reference read for any possible disturbance. We dicard + * reference read for any possible disturbance. We discard * disturbed values here as well. We do that around the PIT * calibration delay loop as we have to wait for a certain * amount of time anyway. @@ -1079,7 +1080,7 @@ static void tsc_resume(struct clocksource *cs) * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which * is smaller than the cycle_last reference value due to a TSC which - * is slighty behind. This delta is nowhere else observable, but in + * is slightly behind. This delta is nowhere else observable, but in * that case it results in a forward time jump in the range of hours * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm @@ -1264,7 +1265,7 @@ EXPORT_SYMBOL(convert_art_to_tsc); * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used - * by timekeeping code to verify comparibility of two cycle + * by timekeeping code to verify comparability of two cycle * values. */ diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 3d3c761eb74a..50a4515fe0ad 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -472,7 +472,7 @@ retry: /* * Add the result to the previous adjustment value. * - * The adjustement value is slightly off by the overhead of the + * The adjustment value is slightly off by the overhead of the * sync mechanism (observed values are ~200 TSC cycles), but this * really depends on CPU, node distance and frequency. So * compensating for this is hard to get right. Experiments show diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index f6225bf22c02..8daa70b0d2da 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, * by whether the operand is a register or a memory location. * If operand is a register, return as many bytes as the operand * size. If operand is memory, return only the two least - * siginificant bytes. + * significant bytes. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) *data_size = insn->opnd_bytes; @@ -356,7 +356,7 @@ bool fixup_umip_exception(struct pt_regs *regs) if (!nr_copied) return false; - if (!insn_decode(&insn, regs, buf, nr_copied)) + if (!insn_decode_from_regs(&insn, regs, buf, nr_copied)) return false; umip_inst = identify_insn(&insn); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index a2b413394917..b63cf8f7745e 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -276,12 +276,12 @@ static bool is_prefix_bad(struct insn *insn) static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) { + enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32; u32 volatile *good_insns; + int ret; - insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); - /* has the side-effect of processing the entire instruction */ - insn_get_length(insn); - if (!insn_complete(insn)) + ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m); + if (ret < 0) return -ENOEXEC; if (is_prefix_bad(insn)) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index a788d5120d4d..f6b93a35ce14 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -84,6 +84,18 @@ config KVM_INTEL To compile this as a module, choose M here: the module will be called kvm-intel. +config X86_SGX_KVM + bool "Software Guard eXtensions (SGX) Virtualization" + depends on X86_SGX && KVM_INTEL + help + + Enables KVM guests to create SGX enclaves. + + This includes support to expose "raw" unreclaimable enclave memory to + guests via a device node, e.g. /dev/sgx_vepc. + + If unsure, say N. + config KVM_AMD tristate "KVM for AMD processors support" depends on KVM diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 1b4766fe1de2..c589db5d91b3 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -Iarch/x86/kvm +ccflags-y += -I $(srctree)/arch/x86/kvm ccflags-$(CONFIG_KVM_WERROR) += -Werror ifeq ($(CONFIG_FRAME_POINTER),y) @@ -23,6 +23,8 @@ kvm-$(CONFIG_KVM_XEN) += xen.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o +kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o + kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6bd2f8b830e4..19606a341888 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -18,6 +18,7 @@ #include <asm/processor.h> #include <asm/user.h> #include <asm/fpu/xstate.h> +#include <asm/sgx.h> #include "cpuid.h" #include "lapic.h" #include "mmu.h" @@ -28,7 +29,7 @@ * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ -u32 kvm_cpu_caps[NCAPINTS] __read_mostly; +u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_GPL(kvm_cpu_caps); static u32 xstate_required_size(u64 xstate_bv, bool compacted) @@ -53,6 +54,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) } #define F feature_bit +#define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) @@ -170,6 +172,21 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.guest_supported_xcr0 = (best->eax | ((u64)best->edx << 32)) & supported_xcr0; + /* + * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate + * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's + * requested XCR0 value. The enclave's XFRM must be a subset of XCRO + * at the time of EENTER, thus adjust the allowed XFRM by the guest's + * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to + * '1' even on CPUs that don't support XSAVE. + */ + best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1); + if (best) { + best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff; + best->edx &= vcpu->arch.guest_supported_xcr0 >> 32; + best->ecx |= XFEATURE_MASK_FPSSE; + } + kvm_update_pv_runtime(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); @@ -347,13 +364,13 @@ out: return r; } -static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) +/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ +static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) { const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); struct kvm_cpuid_entry2 entry; reverse_cpuid_check(leaf); - kvm_cpu_caps[leaf] &= mask; cpuid_count(cpuid.function, cpuid.index, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); @@ -361,6 +378,27 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); } +static __always_inline +void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) +{ + /* Use kvm_cpu_cap_mask for non-scattered leafs. */ + BUILD_BUG_ON(leaf < NCAPINTS); + + kvm_cpu_caps[leaf] = mask; + + __kvm_cpu_cap_mask(leaf); +} + +static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) +{ + /* Use kvm_cpu_cap_init_scattered for scattered leafs. */ + BUILD_BUG_ON(leaf >= NCAPINTS); + + kvm_cpu_caps[leaf] &= mask; + + __kvm_cpu_cap_mask(leaf); +} + void kvm_set_cpu_caps(void) { unsigned int f_nx = is_efer_nx() ? F(NX) : 0; @@ -371,12 +409,13 @@ void kvm_set_cpu_caps(void) unsigned int f_gbpages = 0; unsigned int f_lm = 0; #endif + memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); - BUILD_BUG_ON(sizeof(kvm_cpu_caps) > + BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > sizeof(boot_cpu_data.x86_capability)); memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, - sizeof(kvm_cpu_caps)); + sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); kvm_cpu_cap_mask(CPUID_1_ECX, /* @@ -407,7 +446,7 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_mask(CPUID_7_0_EBX, - F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | + F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | @@ -418,7 +457,8 @@ void kvm_set_cpu_caps(void) F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | - F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ + F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | + F(SGX_LC) ); /* Set LA57 based on hardware capability. */ if (cpuid_ecx(7) & F(LA57)) @@ -457,6 +497,10 @@ void kvm_set_cpu_caps(void) F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) ); + kvm_cpu_cap_init_scattered(CPUID_12_EAX, + SF(SGX1) | SF(SGX2) + ); + kvm_cpu_cap_mask(CPUID_8000_0001_ECX, F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | @@ -514,6 +558,10 @@ void kvm_set_cpu_caps(void) */ kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); + kvm_cpu_cap_mask(CPUID_8000_001F_EAX, + 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | + F(SME_COHERENT)); + kvm_cpu_cap_mask(CPUID_C000_0001_EDX, F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | @@ -778,6 +826,38 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = 0; } break; + case 0x12: + /* Intel SGX */ + if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + + /* + * Index 0: Sub-features, MISCSELECT (a.k.a extended features) + * and max enclave sizes. The SGX sub-features and MISCSELECT + * are restricted by kernel and KVM capabilities (like most + * feature flags), while enclave size is unrestricted. + */ + cpuid_entry_override(entry, CPUID_12_EAX); + entry->ebx &= SGX_MISC_EXINFO; + + entry = do_host_cpuid(array, function, 1); + if (!entry) + goto out; + + /* + * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la + * feature flags. Advertise all supported flags, including + * privileged attributes that require explicit opt-in from + * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is + * expected to derive it from supported XCR0. + */ + entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | + SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | + SGX_ATTR_KSS; + entry->ebx &= 0; + break; /* Intel PT */ case 0x14: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { @@ -869,8 +949,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) break; /* Support memory encryption cpuid if host supports it */ case 0x8000001F: - if (!boot_cpu_has(X86_FEATURE_SEV)) + if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + else + cpuid_entry_override(entry, CPUID_8000_001F_EAX); break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: @@ -1033,7 +1115,7 @@ EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); * - Centaur: 0xc0000000 - 0xcfffffff * * The Hypervisor class is further subdivided into sub-classes that each act as - * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu + * their own independent class associated with a 0x100 byte range. E.g. if Qemu * is advertising support for both HyperV and KVM, the resulting Hypervisor * CPUID sub-classes are: * diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 2a0c5064497f..c99edfff7f82 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -3,11 +3,12 @@ #define ARCH_X86_KVM_CPUID_H #include "x86.h" +#include "reverse_cpuid.h" #include <asm/cpu.h> #include <asm/processor.h> #include <uapi/asm/kvm_para.h> -extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; +extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; void kvm_set_cpu_caps(void); void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); @@ -58,144 +59,8 @@ static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); } -struct cpuid_reg { - u32 function; - u32 index; - int reg; -}; - -static const struct cpuid_reg reverse_cpuid[] = { - [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, - [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, - [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, - [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, - [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, - [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, - [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, - [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, - [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, - [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, - [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, - [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, - [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, - [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, - [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, -}; - -/* - * Reverse CPUID and its derivatives can only be used for hardware-defined - * feature words, i.e. words whose bits directly correspond to a CPUID leaf. - * Retrieving a feature bit or masking guest CPUID from a Linux-defined word - * is nonsensical as the bit number/mask is an arbitrary software-defined value - * and can't be used by KVM to query/control guest capabilities. And obviously - * the leaf being queried must have an entry in the lookup table. - */ -static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) -{ - BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); - BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); - BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); - BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); - BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); - BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); -} - -/* - * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain - * the hardware defined bit number (stored in bits 4:0) and a software defined - * "word" (stored in bits 31:5). The word is used to index into arrays of - * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has(). - */ -static __always_inline u32 __feature_bit(int x86_feature) -{ - reverse_cpuid_check(x86_feature / 32); - return 1 << (x86_feature & 31); -} - -#define feature_bit(name) __feature_bit(X86_FEATURE_##name) - -static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) -{ - unsigned int x86_leaf = x86_feature / 32; - - reverse_cpuid_check(x86_leaf); - return reverse_cpuid[x86_leaf]; -} - -static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, - u32 reg) -{ - switch (reg) { - case CPUID_EAX: - return &entry->eax; - case CPUID_EBX: - return &entry->ebx; - case CPUID_ECX: - return &entry->ecx; - case CPUID_EDX: - return &entry->edx; - default: - BUILD_BUG(); - return NULL; - } -} - -static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature) -{ - const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); - - return __cpuid_entry_get_reg(entry, cpuid.reg); -} - -static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature) -{ - u32 *reg = cpuid_entry_get_reg(entry, x86_feature); - - return *reg & __feature_bit(x86_feature); -} - -static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature) -{ - return cpuid_entry_get(entry, x86_feature); -} - -static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature) -{ - u32 *reg = cpuid_entry_get_reg(entry, x86_feature); - - *reg &= ~__feature_bit(x86_feature); -} - -static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature) -{ - u32 *reg = cpuid_entry_get_reg(entry, x86_feature); - - *reg |= __feature_bit(x86_feature); -} - -static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, - unsigned int x86_feature, - bool set) -{ - u32 *reg = cpuid_entry_get_reg(entry, x86_feature); - - /* - * Open coded instead of using cpuid_entry_{clear,set}() to coerce the - * compiler into using CMOV instead of Jcc when possible. - */ - if (set) - *reg |= __feature_bit(x86_feature); - else - *reg &= ~__feature_bit(x86_feature); -} - static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, - enum cpuid_leafs leaf) + unsigned int leaf) { u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); @@ -248,6 +113,14 @@ static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); } +static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 0, 0); + return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx); +} + static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -308,7 +181,7 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); @@ -316,7 +189,7 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); @@ -324,7 +197,7 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index f7970ba6219f..77e1c89a95a7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3222,7 +3222,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, } /* - * Now load segment descriptors. If fault happenes at this stage + * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, @@ -4220,7 +4220,7 @@ static bool valid_cr(int nr) } } -static int check_cr_read(struct x86_emulate_ctxt *ctxt) +static int check_cr_access(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); @@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int check_cr_write(struct x86_emulate_ctxt *ctxt) -{ - u64 new_val = ctxt->src.val64; - int cr = ctxt->modrm_reg; - u64 efer = 0; - - static u64 cr_reserved_bits[] = { - 0xffffffff00000000ULL, - 0, 0, 0, /* CR3 checked later */ - CR4_RESERVED_BITS, - 0, 0, 0, - CR8_RESERVED_BITS, - }; - - if (!valid_cr(cr)) - return emulate_ud(ctxt); - - if (new_val & cr_reserved_bits[cr]) - return emulate_gp(ctxt, 0); - - switch (cr) { - case 0: { - u64 cr4; - if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || - ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) - return emulate_gp(ctxt, 0); - - cr4 = ctxt->ops->get_cr(ctxt, 4); - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - - if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && - !(cr4 & X86_CR4_PAE)) - return emulate_gp(ctxt, 0); - - break; - } - case 3: { - u64 rsvd = 0; - - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - if (efer & EFER_LMA) { - u64 maxphyaddr; - u32 eax, ebx, ecx, edx; - - eax = 0x80000008; - ecx = 0; - if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, - &edx, true)) - maxphyaddr = eax & 0xff; - else - maxphyaddr = 36; - rsvd = rsvd_bits(maxphyaddr, 63); - if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) - rsvd &= ~X86_CR3_PCID_NOFLUSH; - } - - if (new_val & rsvd) - return emulate_gp(ctxt, 0); - - break; - } - case 4: { - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - - if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) - return emulate_gp(ctxt, 0); - - break; - } - } - - return X86EMUL_CONTINUE; -} - static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; @@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = { D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */ /* 0x20 - 0x2F */ - DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), + DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, - check_cr_write), + check_cr_access), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 58fa8c029867..f98370a39936 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -520,10 +520,10 @@ static u64 get_time_ref_counter(struct kvm *kvm) u64 tsc; /* - * The guest has not set up the TSC page or the clock isn't - * stable, fall back to get_kvmclock_ns. + * Fall back to get_kvmclock_ns() when TSC page hasn't been set up, + * is broken, disabled or being updated. */ - if (!hv->tsc_ref.tsc_sequence) + if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) return div_u64(get_kvmclock_ns(kvm), 100); vcpu = kvm_get_vcpu(kvm, 0); @@ -1077,6 +1077,21 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, return true; } +/* + * Don't touch TSC page values if the guest has opted for TSC emulation after + * migration. KVM doesn't fully support reenlightenment notifications and TSC + * access emulation and Hyper-V is known to expect the values in TSC page to + * stay constant before TSC access emulation is disabled from guest side + * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC + * frequency and guest visible TSC value across migration (and prevent it when + * TSC scaling is unsupported). + */ +static inline bool tsc_page_update_unsafe(struct kvm_hv *hv) +{ + return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && + hv->hv_tsc_emulation_control; +} + void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock) { @@ -1087,7 +1102,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); - if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) + if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || + hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) return; mutex_lock(&hv->hv_lock); @@ -1101,7 +1117,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, */ if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), &tsc_seq, sizeof(tsc_seq)))) + goto out_err; + + if (tsc_seq && tsc_page_update_unsafe(hv)) { + if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) + goto out_err; + + hv->hv_tsc_page_status = HV_TSC_PAGE_SET; goto out_unlock; + } /* * While we're computing and writing the parameters, force the @@ -1110,15 +1134,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, hv->tsc_ref.tsc_sequence = 0; if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) - goto out_unlock; + goto out_err; if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) - goto out_unlock; + goto out_err; /* Ensure sequence is zero before writing the rest of the struct. */ smp_wmb(); if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) - goto out_unlock; + goto out_err; /* * Now switch to the TSC page mechanism by writing the sequence. @@ -1131,8 +1155,45 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, smp_wmb(); hv->tsc_ref.tsc_sequence = tsc_seq; - kvm_write_guest(kvm, gfn_to_gpa(gfn), - &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); + if (kvm_write_guest(kvm, gfn_to_gpa(gfn), + &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) + goto out_err; + + hv->hv_tsc_page_status = HV_TSC_PAGE_SET; + goto out_unlock; + +out_err: + hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; +out_unlock: + mutex_unlock(&hv->hv_lock); +} + +void kvm_hv_invalidate_tsc_page(struct kvm *kvm) +{ + struct kvm_hv *hv = to_kvm_hv(kvm); + u64 gfn; + + if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || + hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET || + tsc_page_update_unsafe(hv)) + return; + + mutex_lock(&hv->hv_lock); + + if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) + goto out_unlock; + + /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */ + if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET) + hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING; + + gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; + + hv->tsc_ref.tsc_sequence = 0; + if (kvm_write_guest(kvm, gfn_to_gpa(gfn), + &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) + hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; + out_unlock: mutex_unlock(&hv->hv_lock); } @@ -1193,8 +1254,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, } case HV_X64_MSR_REFERENCE_TSC: hv->hv_tsc_page = data; - if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) + if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { + if (!host) + hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; + else + hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); + } else { + hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; + } break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: return kvm_hv_msr_set_crash_data(kvm, @@ -1229,6 +1297,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, hv->hv_tsc_emulation_control = data; break; case HV_X64_MSR_TSC_EMULATION_STATUS: + if (data && !host) + return 1; + hv->hv_tsc_emulation_status = data; break; case HV_X64_MSR_TIME_REF_COUNT: diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index e951af1fcb2c..60547d5cb6d7 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -133,6 +133,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock); +void kvm_hv_invalidate_tsc_page(struct kvm *kvm); void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 8a4de3f12820..d5b72a08e566 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -269,7 +269,7 @@ int kvm_set_routing_entry(struct kvm *kvm, const struct kvm_irq_routing_entry *ue) { /* We can't check irqchip_in_kernel() here as some callers are - * currently inititalizing the irqchip. Other callers should therefore + * currently initializing the irqchip. Other callers should therefore * check kvm_arch_can_set_irq_routing() before calling this function. */ switch (ue->type) { diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 2e11da2f5621..3db5c42c9ecd 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -62,7 +62,12 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); } -static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) +/* + * The "raw" register helpers are only for cases where the full 64 bits of a + * register are read/written irrespective of current vCPU mode. In other words, + * odds are good you shouldn't be using the raw variants. + */ +static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) { if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) return 0; @@ -73,8 +78,8 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) return vcpu->arch.regs[reg]; } -static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, - unsigned long val) +static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, + unsigned long val) { if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) return; @@ -85,22 +90,22 @@ static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) { - return kvm_register_read(vcpu, VCPU_REGS_RIP); + return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); } static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) { - kvm_register_write(vcpu, VCPU_REGS_RIP, val); + kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); } static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) { - return kvm_register_read(vcpu, VCPU_REGS_RSP); + return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); } static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) { - kvm_register_write(vcpu, VCPU_REGS_RSP, val); + kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); } static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cc369b9ad8f1..152591f9243a 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } + + /* Check if there are APF page ready requests pending */ + if (enabled) + kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); } static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) @@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) if (value & MSR_IA32_APICBASE_ENABLE) { kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); static_branch_slow_dec_deferred(&apic_hw_disabled); + /* Check if there are APF page ready requests pending */ + kvm_make_request(KVM_REQ_APF_READY, vcpu); } else { static_branch_inc(&apic_hw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); @@ -2869,7 +2875,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) return; if (is_guest_mode(vcpu)) { - r = kvm_x86_ops.nested_ops->check_events(vcpu); + r = kvm_check_nested_events(vcpu); if (r < 0) return; /* diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index c68bfc3e2402..88d0ed5225a4 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -59,7 +59,8 @@ static __always_inline u64 rsvd_bits(int s, int e) return ((2ULL << (e - s)) - 1) << s; } -void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask); +void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); +void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); @@ -73,6 +74,10 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); +int kvm_mmu_load(struct kvm_vcpu *vcpu); +void kvm_mmu_unload(struct kvm_vcpu *vcpu); +void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); + static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) @@ -102,8 +107,8 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) if (!VALID_PAGE(root_hpa)) return; - static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu), - vcpu->arch.mmu->shadow_root_level); + static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa, + vcpu->arch.mmu->shadow_root_level); } int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, @@ -124,7 +129,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * write-protects guest page to sync the guest modification, b) another one is * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences * between these two sorts are: - * 1) the first case clears SPTE_MMU_WRITEABLE bit. + * 1) the first case clears MMU-writable bit. * 2) the first case requires flushing tlb immediately avoiding corrupting * shadow page table between all vcpus so it should be in the protection of * mmu-lock. And the another case does not need to flush tlb until returning @@ -135,17 +140,17 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * So, there is the problem: the first case can meet the corrupted tlb caused * by another case which write-protects pages but without flush tlb * immediately. In order to making the first case be aware this problem we let - * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit - * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. + * it flush tlb if we try to write-protect a spte whose MMU-writable bit + * is set, it works since another case never touches MMU-writable bit. * * Anyway, whenever a spte is updated (only permission and status bits are - * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes + * changed) we need to check whether the spte with MMU-writable becomes * readonly, if that happens, we need to flush tlb. Fortunately, * mmu_spte_update() has already handled it perfectly. * - * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: + * The rules to use MMU-writable and PT_WRITABLE_MASK: * - if we want to see if it has writable tlb entry or if the spte can be - * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most + * writable on the mmu mapping, check MMU-writable, this is the most * case, otherwise * - if we fix page fault on the spte or do write-protection by dirty logging, * check PT_WRITABLE_MASK. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d75524bc8423..4b3ee244ebe0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -48,6 +48,7 @@ #include <asm/memtype.h> #include <asm/cmpxchg.h> #include <asm/io.h> +#include <asm/set_memory.h> #include <asm/vmx.h> #include <asm/kvm_page_track.h> #include "trace.h" @@ -215,10 +216,10 @@ bool is_nx_huge_page_enabled(void) static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned int access) { - u64 mask = make_mmio_spte(vcpu, gfn, access); + u64 spte = make_mmio_spte(vcpu, gfn, access); - trace_mark_mmio_spte(sptep, gfn, mask); - mmu_spte_set(sptep, mask); + trace_mark_mmio_spte(sptep, gfn, spte); + mmu_spte_set(sptep, spte); } static gfn_t get_mmio_spte_gfn(u64 spte) @@ -236,17 +237,6 @@ static unsigned get_mmio_spte_access(u64 spte) return spte & shadow_mmio_access_mask; } -static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - kvm_pfn_t pfn, unsigned int access) -{ - if (unlikely(is_noslot_pfn(pfn))) { - mark_mmio_spte(vcpu, sptep, gfn, access); - return true; - } - - return false; -} - static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) { u64 kvm_gen, spte_gen, gen; @@ -725,8 +715,7 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) * handling slots that are not large page aligned. */ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, - struct kvm_memory_slot *slot, - int level) + const struct kvm_memory_slot *slot, int level) { unsigned long idx; @@ -1118,7 +1107,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) rmap_printk("spte %p %llx\n", sptep, *sptep); if (pt_protect) - spte &= ~SPTE_MMU_WRITEABLE; + spte &= ~shadow_mmu_writable_mask; spte = spte & ~PT_WRITABLE_MASK; return mmu_spte_update(sptep, spte); @@ -1308,26 +1297,25 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, return flush; } -static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { return kvm_zap_rmapp(kvm, rmap_head, slot); } -static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t pte) { u64 *sptep; struct rmap_iterator iter; int need_flush = 0; u64 new_spte; - pte_t *ptep = (pte_t *)data; kvm_pfn_t new_pfn; - WARN_ON(pte_huge(*ptep)); - new_pfn = pte_pfn(*ptep); + WARN_ON(pte_huge(pte)); + new_pfn = pte_pfn(pte); restart: for_each_rmap_spte(rmap_head, &iter, sptep) { @@ -1336,7 +1324,7 @@ restart: need_flush = 1; - if (pte_write(*ptep)) { + if (pte_write(pte)) { pte_list_remove(rmap_head, sptep); goto restart; } else { @@ -1424,93 +1412,52 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) -static __always_inline int -kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, - int level, - unsigned long data)) +typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, pte_t pte); + +static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, + struct kvm_gfn_range *range, + rmap_handler_t handler) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; struct slot_rmap_walk_iterator iterator; - int ret = 0; - int i; - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn_start, gfn_end; + bool ret = false; - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn_start = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for_each_slot_rmap_range(memslot, PG_LEVEL_4K, - KVM_MAX_HUGEPAGE_LEVEL, - gfn_start, gfn_end - 1, - &iterator) - ret |= handler(kvm, iterator.rmap, memslot, - iterator.gfn, iterator.level, data); - } - } + for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + range->start, range->end - 1, &iterator) + ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, + iterator.level, range->pte); return ret; } -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, int level, - unsigned long data)) -{ - return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); -} - -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - int r; + bool flush; - r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); + flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); if (is_tdp_mmu_enabled(kvm)) - r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end); + flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); - return r; + return flush; } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int r; + bool flush; - r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); + flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp); if (is_tdp_mmu_enabled(kvm)) - r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte); + flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); - return r; + return flush; } -static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { u64 *sptep; struct rmap_iterator iter; @@ -1519,13 +1466,12 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, for_each_rmap_spte(rmap_head, &iter, sptep) young |= mmu_spte_age(sptep); - trace_kvm_age_page(gfn, level, slot, young); return young; } -static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, unsigned long data) +static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, pte_t unused) { u64 *sptep; struct rmap_iterator iter; @@ -1547,29 +1493,31 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); + kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int young = false; + bool young; + + young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp); - young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); if (is_tdp_mmu_enabled(kvm)) - young |= kvm_tdp_mmu_age_hva_range(kvm, start, end); + young |= kvm_tdp_mmu_age_gfn_range(kvm, range); return young; } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int young = false; + bool young; + + young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp); - young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); if (is_tdp_mmu_enabled(kvm)) - young |= kvm_tdp_mmu_test_age_hva(kvm, hva); + young |= kvm_tdp_mmu_test_age_gfn(kvm, range); return young; } @@ -2421,6 +2369,15 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); + /* + * Note, this check is intentionally soft, it only guarantees that one + * page is available, while the caller may end up allocating as many as + * four pages, e.g. for PAE roots or for 5-level paging. Temporarily + * exceeding the (arbitrary by default) limit will not harm the host, + * being too agressive may unnecessarily kill the guest, and getting an + * exact count is far more trouble than it's worth, especially in the + * page fault paths. + */ if (!kvm_mmu_available_pages(vcpu->kvm)) return -ENOSPC; return 0; @@ -2561,9 +2518,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp; int ret; - if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) - return 0; - sp = sptep_to_sp(sptep); ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, @@ -2593,6 +2547,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); + if (unlikely(is_noslot_pfn(pfn))) { + mark_mmio_spte(vcpu, sptep, gfn, pte_access); + return RET_PF_EMULATE; + } + if (is_shadow_present_pte(*sptep)) { /* * If we overwrite a PTE page pointer with a 2MB PMD, unlink @@ -2626,9 +2585,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, KVM_PAGES_PER_HPAGE(level)); - if (unlikely(is_mmio_spte(*sptep))) - ret = RET_PF_EMULATE; - /* * The fault is fully spurious if and only if the new SPTE and old SPTE * are identical, and emulation is not required. @@ -2745,7 +2701,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) } static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - struct kvm_memory_slot *slot) + const struct kvm_memory_slot *slot) { unsigned long hva; pte_t *pte; @@ -2771,8 +2727,9 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, return level; } -int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t pfn, int max_level) +int kvm_mmu_max_mapping_level(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t pfn, int max_level) { struct kvm_lpage_info *linfo; @@ -2946,9 +2903,19 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, return true; } - if (unlikely(is_noslot_pfn(pfn))) + if (unlikely(is_noslot_pfn(pfn))) { vcpu_cache_mmio_info(vcpu, gva, gfn, access & shadow_mmio_access_mask); + /* + * If MMIO caching is disabled, emulate immediately without + * touching the shadow page tables as attempting to install an + * MMIO SPTE will just be an expensive nop. + */ + if (unlikely(!shadow_mmio_value)) { + *ret_val = RET_PF_EMULATE; + return true; + } + } return false; } @@ -3061,6 +3028,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (!is_shadow_present_pte(spte)) break; + if (!is_shadow_present_pte(spte)) + break; + sp = sptep_to_sp(iterator.sptep); if (!is_last_spte(spte, sp->role.level)) break; @@ -3150,12 +3120,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); - if (kvm_mmu_put_root(kvm, sp)) { - if (is_tdp_mmu_page(sp)) - kvm_tdp_mmu_free_root(kvm, sp); - else if (sp->role.invalid) - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); - } + if (is_tdp_mmu_page(sp)) + kvm_tdp_mmu_put_root(kvm, sp, false); + else if (!--sp->root_count && sp->role.invalid) + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); *root_hpa = INVALID_PAGE; } @@ -3193,14 +3161,17 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); - } else { - for (i = 0; i < 4; ++i) - if (mmu->pae_root[i] != 0) - mmu_free_root_page(kvm, - &mmu->pae_root[i], - &invalid_list); - mmu->root_hpa = INVALID_PAGE; + } else if (mmu->pae_root) { + for (i = 0; i < 4; ++i) { + if (!IS_VALID_PAE_ROOT(mmu->pae_root[i])) + continue; + + mmu_free_root_page(kvm, &mmu->pae_root[i], + &invalid_list); + mmu->pae_root[i] = INVALID_PAE_ROOT; + } } + mmu->root_hpa = INVALID_PAGE; mmu->root_pgd = 0; } @@ -3226,155 +3197,208 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, { struct kvm_mmu_page *sp; - write_lock(&vcpu->kvm->mmu_lock); - - if (make_mmu_pages_available(vcpu)) { - write_unlock(&vcpu->kvm->mmu_lock); - return INVALID_PAGE; - } sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); ++sp->root_count; - write_unlock(&vcpu->kvm->mmu_lock); return __pa(sp->spt); } static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) { - u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level; + struct kvm_mmu *mmu = vcpu->arch.mmu; + u8 shadow_root_level = mmu->shadow_root_level; hpa_t root; unsigned i; + int r; + + write_lock(&vcpu->kvm->mmu_lock); + r = make_mmu_pages_available(vcpu); + if (r < 0) + goto out_unlock; if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); - - if (!VALID_PAGE(root)) - return -ENOSPC; - vcpu->arch.mmu->root_hpa = root; + mmu->root_hpa = root; } else if (shadow_root_level >= PT64_ROOT_4LEVEL) { - root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, - true); - - if (!VALID_PAGE(root)) - return -ENOSPC; - vcpu->arch.mmu->root_hpa = root; + root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true); + mmu->root_hpa = root; } else if (shadow_root_level == PT32E_ROOT_LEVEL) { + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } + for (i = 0; i < 4; ++i) { - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); + WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, true); - if (!VALID_PAGE(root)) - return -ENOSPC; - vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; + mmu->pae_root[i] = root | PT_PRESENT_MASK | + shadow_me_mask; } - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); - } else - BUG(); + mmu->root_hpa = __pa(mmu->pae_root); + } else { + WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); + r = -EIO; + goto out_unlock; + } /* root_pgd is ignored for direct MMUs. */ - vcpu->arch.mmu->root_pgd = 0; - - return 0; + mmu->root_pgd = 0; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + return r; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { - u64 pdptr, pm_mask; + struct kvm_mmu *mmu = vcpu->arch.mmu; + u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; hpa_t root; - int i; + unsigned i; + int r; - root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu); + root_pgd = mmu->get_guest_pgd(vcpu); root_gfn = root_pgd >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; /* + * On SVM, reading PDPTRs might access guest memory, which might fault + * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. + */ + if (mmu->root_level == PT32E_ROOT_LEVEL) { + for (i = 0; i < 4; ++i) { + pdptrs[i] = mmu->get_pdptr(vcpu, i); + if (!(pdptrs[i] & PT_PRESENT_MASK)) + continue; + + if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) + return 1; + } + } + + write_lock(&vcpu->kvm->mmu_lock); + r = make_mmu_pages_available(vcpu); + if (r < 0) + goto out_unlock; + + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ - if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); - + if (mmu->root_level >= PT64_ROOT_4LEVEL) { root = mmu_alloc_root(vcpu, root_gfn, 0, - vcpu->arch.mmu->shadow_root_level, false); - if (!VALID_PAGE(root)) - return -ENOSPC; - vcpu->arch.mmu->root_hpa = root; + mmu->shadow_root_level, false); + mmu->root_hpa = root; goto set_root_pgd; } + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } + /* * We shadow a 32 bit page table. This may be a legacy 2-level * or a PAE 3-level page table. In either case we need to be aware that * the shadow page table may be a PAE or a long mode page table. */ - pm_mask = PT_PRESENT_MASK; - if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) + pm_mask = PT_PRESENT_MASK | shadow_me_mask; + if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; + if (WARN_ON_ONCE(!mmu->lm_root)) { + r = -EIO; + goto out_unlock; + } + + mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask; + } + for (i = 0; i < 4; ++i) { - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); - if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { - pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); - if (!(pdptr & PT_PRESENT_MASK)) { - vcpu->arch.mmu->pae_root[i] = 0; + WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); + + if (mmu->root_level == PT32E_ROOT_LEVEL) { + if (!(pdptrs[i] & PT_PRESENT_MASK)) { + mmu->pae_root[i] = INVALID_PAE_ROOT; continue; } - root_gfn = pdptr >> PAGE_SHIFT; - if (mmu_check_root(vcpu, root_gfn)) - return 1; + root_gfn = pdptrs[i] >> PAGE_SHIFT; } root = mmu_alloc_root(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, false); - if (!VALID_PAGE(root)) - return -ENOSPC; - vcpu->arch.mmu->pae_root[i] = root | pm_mask; + mmu->pae_root[i] = root | pm_mask; } - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); + + if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) + mmu->root_hpa = __pa(mmu->lm_root); + else + mmu->root_hpa = __pa(mmu->pae_root); + +set_root_pgd: + mmu->root_pgd = root_pgd; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + + return 0; +} + +static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu *mmu = vcpu->arch.mmu; + u64 *lm_root, *pae_root; /* - * If we shadow a 32 bit page table with a long mode page - * table we enter this path. + * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP + * tables are allocated and initialized at root creation as there is no + * equivalent level in the guest's NPT to shadow. Allocate the tables + * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. */ - if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { - if (vcpu->arch.mmu->lm_root == NULL) { - /* - * The additional page necessary for this is only - * allocated on demand. - */ + if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL || + mmu->shadow_root_level < PT64_ROOT_4LEVEL) + return 0; - u64 *lm_root; + /* + * This mess only works with 4-level paging and needs to be updated to + * work with 5-level paging. + */ + if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL)) + return -EIO; - lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); - if (lm_root == NULL) - return 1; + if (mmu->pae_root && mmu->lm_root) + return 0; - lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; + /* + * The special roots should always be allocated in concert. Yell and + * bail if KVM ends up in a state where only one of the roots is valid. + */ + if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root)) + return -EIO; - vcpu->arch.mmu->lm_root = lm_root; - } + /* + * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and + * doesn't need to be decrypted. + */ + pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); + if (!pae_root) + return -ENOMEM; - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); + lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); + if (!lm_root) { + free_page((unsigned long)pae_root); + return -ENOMEM; } -set_root_pgd: - vcpu->arch.mmu->root_pgd = root_pgd; + mmu->pae_root = pae_root; + mmu->lm_root = lm_root; return 0; } -static int mmu_alloc_roots(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.mmu->direct_map) - return mmu_alloc_direct_roots(vcpu); - else - return mmu_alloc_shadow_roots(vcpu); -} - void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) { int i; @@ -3422,7 +3446,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu->pae_root[i]; - if (root && VALID_PAGE(root)) { + if (IS_VALID_PAE_ROOT(root)) { root &= PT64_BASE_ADDR_MASK; sp = to_shadow_page(root); mmu_sync_children(vcpu, sp); @@ -3554,11 +3578,12 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) __is_rsvd_bits_set(rsvd_check, sptes[level], level); if (reserved) { - pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", + pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n", __func__, addr); for (level = root; level >= leaf; level--) - pr_err("------ spte 0x%llx level %d.\n", - sptes[level], level); + pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx", + sptes[level], level, + rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]); } return reserved; @@ -3653,6 +3678,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); bool async; + /* + * Retry the page fault if the gfn hit a memslot that is being deleted + * or moved. This ensures any existing SPTEs for the old memslot will + * be zapped before KVM inserts a new MMIO SPTE for the gfn. + */ + if (slot && (slot->flags & KVM_MEMSLOT_INVALID)) + return true; + /* Don't expose private memslots to L2. */ if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) { *pfn = KVM_PFN_NOSLOT; @@ -4615,12 +4648,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, struct kvm_mmu *context = &vcpu->arch.guest_mmu; union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); - context->shadow_root_level = new_role.base.level; - __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false); - if (new_role.as_u64 != context->mmu_role.as_u64) + if (new_role.as_u64 != context->mmu_role.as_u64) { shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); + + /* + * Override the level set by the common init helper, nested TDP + * always uses the host's TDP configuration. + */ + context->shadow_root_level = new_role.base.level; + } } EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); @@ -4802,16 +4840,23 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); if (r) goto out; - r = mmu_alloc_roots(vcpu); - kvm_mmu_sync_roots(vcpu); + r = mmu_alloc_special_roots(vcpu); + if (r) + goto out; + if (vcpu->arch.mmu->direct_map) + r = mmu_alloc_direct_roots(vcpu); + else + r = mmu_alloc_shadow_roots(vcpu); if (r) goto out; + + kvm_mmu_sync_roots(vcpu); + kvm_mmu_load_pgd(vcpu); static_call(kvm_x86_tlb_flush_current)(vcpu); out: return r; } -EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { @@ -4820,7 +4865,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); } -EXPORT_SYMBOL_GPL(kvm_mmu_unload); static bool need_remote_flush(u64 old, u64 new) { @@ -4961,7 +5005,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, /* * No need to care whether allocation memory is successful - * or not since pte prefetch is skiped if it does not have + * or not since pte prefetch is skipped if it does not have * enough objects in the cache. */ mmu_topup_memory_caches(vcpu, true); @@ -5169,10 +5213,10 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_ static __always_inline bool slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, - gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) + gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, + bool flush) { struct slot_rmap_walk_iterator iterator; - bool flush = false; for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, end_gfn, &iterator) { @@ -5180,7 +5224,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, flush |= fn(kvm, iterator.rmap, memslot); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { - if (flush && lock_flush_tlb) { + if (flush && flush_on_yield) { kvm_flush_remote_tlbs_with_address(kvm, start_gfn, iterator.gfn - start_gfn + 1); @@ -5190,36 +5234,32 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, } } - if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs_with_address(kvm, start_gfn, - end_gfn - start_gfn + 1); - flush = false; - } - return flush; } static __always_inline bool slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, - bool lock_flush_tlb) + bool flush_on_yield) { return slot_handle_level_range(kvm, memslot, fn, start_level, end_level, memslot->base_gfn, memslot->base_gfn + memslot->npages - 1, - lock_flush_tlb); + flush_on_yield, false); } static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, - slot_level_handler fn, bool lock_flush_tlb) + slot_level_handler fn, bool flush_on_yield) { return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, - PG_LEVEL_4K, lock_flush_tlb); + PG_LEVEL_4K, flush_on_yield); } static void free_mmu_pages(struct kvm_mmu *mmu) { + if (!tdp_enabled && mmu->pae_root) + set_memory_encrypted((unsigned long)mmu->pae_root, 1); free_page((unsigned long)mmu->pae_root); free_page((unsigned long)mmu->lm_root); } @@ -5240,9 +5280,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) * while the PDP table is a per-vCPU construct that's allocated at MMU * creation. When emulating 32-bit mode, cr3 is only 32 bits even on * x86_64. Therefore we need to allocate the PDP table in the first - * 4GB of memory, which happens to fit the DMA32 zone. Except for - * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can - * skip allocating the PDP table. + * 4GB of memory, which happens to fit the DMA32 zone. TDP paging + * generally doesn't use PAE paging and can skip allocating the PDP + * table. The main exception, handled here, is SVM's 32-bit NPT. The + * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit + * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots(). */ if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) return 0; @@ -5252,8 +5294,22 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) return -ENOMEM; mmu->pae_root = page_address(page); + + /* + * CR3 is only 32 bits when PAE paging is used, thus it's impossible to + * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so + * that KVM's writes and the CPU's reads get along. Note, this is + * only necessary when using shadow paging, as 64-bit NPT can get at + * the C-bit even when shadowing 32-bit NPT, and SME isn't supported + * by 32-bit kernels (when KVM itself uses 32-bit NPT). + */ + if (!tdp_enabled) + set_memory_decrypted((unsigned long)mmu->pae_root, 1); + else + WARN_ON_ONCE(shadow_me_mask); + for (i = 0; i < 4; ++i) - mmu->pae_root[i] = INVALID_PAGE; + mmu->pae_root[i] = INVALID_PAE_ROOT; return 0; } @@ -5365,6 +5421,15 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) */ kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; + /* In order to ensure all threads see this change when + * handling the MMU reload signal, this must happen in the + * same critical section as kvm_reload_remote_mmus, and + * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages + * could drop the MMU lock and yield. + */ + if (is_tdp_mmu_enabled(kvm)) + kvm_tdp_mmu_invalidate_all_roots(kvm); + /* * Notify all vcpus to reload its shadow page table and flush TLB. * Then all vcpus will switch to new shadow page table with the new @@ -5377,10 +5442,13 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) kvm_zap_obsolete_pages(kvm); - if (is_tdp_mmu_enabled(kvm)) - kvm_tdp_mmu_zap_all(kvm); - write_unlock(&kvm->mmu_lock); + + if (is_tdp_mmu_enabled(kvm)) { + read_lock(&kvm->mmu_lock); + kvm_tdp_mmu_zap_invalidated_roots(kvm); + read_unlock(&kvm->mmu_lock); + } } static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) @@ -5420,7 +5488,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i; - bool flush; + bool flush = false; write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { @@ -5433,20 +5501,31 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) if (start >= end) continue; - slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, - PG_LEVEL_4K, - KVM_MAX_HUGEPAGE_LEVEL, - start, end - 1, true); + flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, + PG_LEVEL_4K, + KVM_MAX_HUGEPAGE_LEVEL, + start, end - 1, true, flush); } } + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); + + write_unlock(&kvm->mmu_lock); + if (is_tdp_mmu_enabled(kvm)) { - flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); + flush = false; + + read_lock(&kvm->mmu_lock); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) + flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, + gfn_end, flush, true); if (flush) - kvm_flush_remote_tlbs(kvm); - } + kvm_flush_remote_tlbs_with_address(kvm, gfn_start, + gfn_end); - write_unlock(&kvm->mmu_lock); + read_unlock(&kvm->mmu_lock); + } } static bool slot_rmap_write_protect(struct kvm *kvm, @@ -5465,10 +5544,14 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); - if (is_tdp_mmu_enabled(kvm)) - flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); write_unlock(&kvm->mmu_lock); + if (is_tdp_mmu_enabled(kvm)) { + read_lock(&kvm->mmu_lock); + flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); + read_unlock(&kvm->mmu_lock); + } + /* * We can flush all the TLBs out of the mmu lock without TLB * corruption since we just change the spte from writable to @@ -5476,9 +5559,9 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, * spte from present to present (changing the spte from present * to nonpresent will flush all the TLBs immediately), in other * words, the only case we care is mmu_spte_update() where we - * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE - * instead of PT_WRITABLE_MASK, that means it does not depend - * on PT_WRITABLE_MASK anymore. + * have checked Host-writable | MMU-writable instead of + * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK + * anymore. */ if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); @@ -5529,21 +5612,32 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, { /* FIXME: const-ify all uses of struct kvm_memory_slot. */ struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot; + bool flush; write_lock(&kvm->mmu_lock); - slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); + flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); - if (is_tdp_mmu_enabled(kvm)) - kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); + if (flush) + kvm_arch_flush_remote_tlbs_memslot(kvm, slot); write_unlock(&kvm->mmu_lock); + + if (is_tdp_mmu_enabled(kvm)) { + flush = false; + + read_lock(&kvm->mmu_lock); + flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush); + if (flush) + kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + read_unlock(&kvm->mmu_lock); + } } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { /* * All current use cases for flushing the TLBs for a specific memslot - * are related to dirty logging, and do the TLB flush out of mmu_lock. + * related to dirty logging, and many do the TLB flush out of mmu_lock. * The interaction between the various operations on memslot must be * serialized by slots_locks to ensure the TLB flush from one operation * is observed by any other operation on the same memslot. @@ -5560,10 +5654,14 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); - if (is_tdp_mmu_enabled(kvm)) - flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); write_unlock(&kvm->mmu_lock); + if (is_tdp_mmu_enabled(kvm)) { + read_lock(&kvm->mmu_lock); + flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); + read_unlock(&kvm->mmu_lock); + } + /* * It's also safe to flush TLBs out of mmu lock here as currently this * function is only used for dirty logging, in which case flushing TLB @@ -5701,25 +5799,6 @@ static void mmu_destroy_caches(void) kmem_cache_destroy(mmu_page_header_cache); } -static void kvm_set_mmio_spte_mask(void) -{ - u64 mask; - - /* - * Set a reserved PA bit in MMIO SPTEs to generate page faults with - * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT - * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports - * 52-bit physical addresses then there are no reserved PA bits in the - * PTEs and so the reserved PA approach must be disabled. - */ - if (shadow_phys_bits < 52) - mask = BIT_ULL(51) | PT_PRESENT_MASK; - else - mask = 0; - - kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); -} - static bool get_nx_auto_mode(void) { /* Return true when CPU has the bug, and mitigations are ON */ @@ -5785,8 +5864,6 @@ int kvm_mmu_module_init(void) kvm_mmu_reset_all_pte_masks(); - kvm_set_mmio_spte_mask(); - pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); @@ -5884,6 +5961,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page *sp; unsigned int ratio; LIST_HEAD(invalid_list); + bool flush = false; ulong to_zap; rcu_idx = srcu_read_lock(&kvm->srcu); @@ -5905,19 +5983,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) lpage_disallowed_link); WARN_ON_ONCE(!sp->lpage_disallowed); if (is_tdp_mmu_page(sp)) { - kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, - sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); + flush |= kvm_tdp_mmu_zap_sp(kvm, sp); } else { kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->lpage_disallowed); } if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { - kvm_mmu_commit_zap_page(kvm, &invalid_list); + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); cond_resched_rwlock_write(&kvm->mmu_lock); + flush = false; } } - kvm_mmu_commit_zap_page(kvm, &invalid_list); + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c index ced15fd58fde..cedc17b2f60e 100644 --- a/arch/x86/kvm/mmu/mmu_audit.c +++ b/arch/x86/kvm/mmu/mmu_audit.c @@ -70,7 +70,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu->pae_root[i]; - if (root && VALID_PAGE(root)) { + if (IS_VALID_PAE_ROOT(root)) { root &= PT64_BASE_ADDR_MASK; sp = to_shadow_page(root); __mmu_spte_walk(vcpu, sp, fn, 2); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index ec4fc28b325a..d64ccb417c60 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -20,6 +20,16 @@ extern bool dbg; #define MMU_WARN_ON(x) do { } while (0) #endif +/* + * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT + * bit, and thus are guaranteed to be non-zero when valid. And, when a guest + * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, + * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use + * '0' instead of INVALID_PAGE to indicate an invalid PAE root. + */ +#define INVALID_PAE_ROOT 0 +#define IS_VALID_PAE_ROOT(x) (!!(x)) + struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; @@ -40,7 +50,11 @@ struct kvm_mmu_page { u64 *spt; /* hold the gfn of each spte inside spt */ gfn_t *gfns; - int root_count; /* Currently serving as active root */ + /* Currently serving as active root */ + union { + int root_count; + refcount_t tdp_mmu_root_count; + }; unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ DECLARE_BITMAP(unsync_child_bitmap, 512); @@ -59,7 +73,7 @@ struct kvm_mmu_page { #ifdef CONFIG_X86_64 bool tdp_mmu_page; - /* Used for freeing the page asyncronously if it is a TDP MMU page. */ + /* Used for freeing the page asynchronously if it is a TDP MMU page. */ struct rcu_head rcu_head; #endif }; @@ -78,6 +92,16 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) return to_shadow_page(__pa(sptep)); } +static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) +{ + return role.smm ? 1 : 0; +} + +static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) +{ + return kvm_mmu_role_as_id(sp->role); +} + static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) { /* @@ -103,22 +127,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, u64 start_gfn, u64 pages); -static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - BUG_ON(!sp->root_count); - lockdep_assert_held(&kvm->mmu_lock); - - ++sp->root_count; -} - -static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - lockdep_assert_held(&kvm->mmu_lock); - --sp->root_count; - - return !sp->root_count; -} - /* * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). * @@ -141,8 +149,9 @@ enum { #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) -int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t pfn, int max_level); +int kvm_mmu_max_mapping_level(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t pfn, int max_level); int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 55d7b473ac44..70b7e44e3035 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -503,6 +503,7 @@ error: #endif walker->fault.address = addr; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; + walker->fault.async_page_fault = false; trace_kvm_mmu_walker_error(walker->fault.error_code); return 0; @@ -1084,7 +1085,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) nr_present++; - host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; + host_writable = sp->spt[i] & shadow_host_writable_mask; set_spte_ret |= set_spte(vcpu, &sp->spt[i], pte_access, PG_LEVEL_4K, diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index ef55f0bc4ccf..66d43cec0c31 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -16,13 +16,20 @@ #include "spte.h" #include <asm/e820/api.h> +#include <asm/vmx.h> +static bool __read_mostly enable_mmio_caching = true; +module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); + +u64 __read_mostly shadow_host_writable_mask; +u64 __read_mostly shadow_mmu_writable_mask; u64 __read_mostly shadow_nx_mask; u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ u64 __read_mostly shadow_user_mask; u64 __read_mostly shadow_accessed_mask; u64 __read_mostly shadow_dirty_mask; u64 __read_mostly shadow_mmio_value; +u64 __read_mostly shadow_mmio_mask; u64 __read_mostly shadow_mmio_access_mask; u64 __read_mostly shadow_present_mask; u64 __read_mostly shadow_me_mask; @@ -38,7 +45,6 @@ static u64 generation_mmio_spte_mask(u64 gen) u64 mask; WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); - BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; @@ -48,16 +54,18 @@ static u64 generation_mmio_spte_mask(u64 gen) u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) { u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; - u64 mask = generation_mmio_spte_mask(gen); + u64 spte = generation_mmio_spte_mask(gen); u64 gpa = gfn << PAGE_SHIFT; + WARN_ON_ONCE(!shadow_mmio_value); + access &= shadow_mmio_access_mask; - mask |= shadow_mmio_value | access; - mask |= gpa | shadow_nonpresent_or_rsvd_mask; - mask |= (gpa & shadow_nonpresent_or_rsvd_mask) + spte |= shadow_mmio_value | access; + spte |= gpa | shadow_nonpresent_or_rsvd_mask; + spte |= (gpa & shadow_nonpresent_or_rsvd_mask) << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; - return mask; + return spte; } static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) @@ -86,13 +94,20 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte) { - u64 spte = 0; + u64 spte = SPTE_MMU_PRESENT_MASK; int ret = 0; if (ad_disabled) - spte |= SPTE_AD_DISABLED_MASK; + spte |= SPTE_TDP_AD_DISABLED_MASK; else if (kvm_vcpu_ad_need_write_protect(vcpu)) - spte |= SPTE_AD_WRPROT_ONLY_MASK; + spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; + + /* + * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set + * if PAE paging may be employed (shadow paging or any 32-bit KVM). + */ + WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) && + (spte & SPTE_TDP_AD_MASK)); /* * For the EPT case, shadow_present_mask is 0 if hardware @@ -124,7 +139,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, kvm_is_mmio_pfn(pfn)); if (host_writable) - spte |= SPTE_HOST_WRITEABLE; + spte |= shadow_host_writable_mask; else pte_access &= ~ACC_WRITE_MASK; @@ -134,7 +149,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, spte |= (u64)pfn << PAGE_SHIFT; if (pte_access & ACC_WRITE_MASK) { - spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; + spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; /* * Optimization: for pte sync, if spte was writable the hash @@ -150,7 +165,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, __func__, gfn); ret |= SET_SPTE_WRITE_PROTECTED_PT; pte_access &= ~ACC_WRITE_MASK; - spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); + spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); } } @@ -161,19 +176,20 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, spte = mark_spte_for_access_track(spte); out: + WARN_ON(is_mmio_spte(spte)); *new_spte = spte; return ret; } u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) { - u64 spte; + u64 spte = SPTE_MMU_PRESENT_MASK; - spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | - shadow_user_mask | shadow_x_mask | shadow_me_mask; + spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | + shadow_user_mask | shadow_x_mask | shadow_me_mask; if (ad_disabled) - spte |= SPTE_AD_DISABLED_MASK; + spte |= SPTE_TDP_AD_DISABLED_MASK; else spte |= shadow_accessed_mask; @@ -188,7 +204,7 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) new_spte |= (u64)new_pfn << PAGE_SHIFT; new_spte &= ~PT_WRITABLE_MASK; - new_spte &= ~SPTE_HOST_WRITEABLE; + new_spte &= ~shadow_host_writable_mask; new_spte = mark_spte_for_access_track(new_spte); @@ -242,53 +258,68 @@ u64 mark_spte_for_access_track(u64 spte) return spte; } -void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) +void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) { BUG_ON((u64)(unsigned)access_mask != access_mask); - WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); - shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; + + if (!enable_mmio_caching) + mmio_value = 0; + + /* + * Disable MMIO caching if the MMIO value collides with the bits that + * are used to hold the relocated GFN when the L1TF mitigation is + * enabled. This should never fire as there is no known hardware that + * can trigger this condition, e.g. SME/SEV CPUs that require a custom + * MMIO value are not susceptible to L1TF. + */ + if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << + SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) + mmio_value = 0; + + /* + * The masked MMIO value must obviously match itself and a removed SPTE + * must not get a false positive. Removed SPTEs and MMIO SPTEs should + * never collide as MMIO must set some RWX bits, and removed SPTEs must + * not set any RWX bits. + */ + if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || + WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) + mmio_value = 0; + + shadow_mmio_value = mmio_value; + shadow_mmio_mask = mmio_mask; shadow_mmio_access_mask = access_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); -/* - * Sets the shadow PTE masks used by the MMU. - * - * Assumptions: - * - Setting either @accessed_mask or @dirty_mask requires setting both - * - At least one of @accessed_mask or @acc_track_mask must be set - */ -void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, - u64 acc_track_mask, u64 me_mask) +void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) { - BUG_ON(!dirty_mask != !accessed_mask); - BUG_ON(!accessed_mask && !acc_track_mask); - BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); - - shadow_user_mask = user_mask; - shadow_accessed_mask = accessed_mask; - shadow_dirty_mask = dirty_mask; - shadow_nx_mask = nx_mask; - shadow_x_mask = x_mask; - shadow_present_mask = p_mask; - shadow_acc_track_mask = acc_track_mask; - shadow_me_mask = me_mask; + shadow_user_mask = VMX_EPT_READABLE_MASK; + shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; + shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; + shadow_nx_mask = 0ull; + shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; + shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; + shadow_acc_track_mask = VMX_EPT_RWX_MASK; + shadow_me_mask = 0ull; + + shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; + shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; + + /* + * EPT Misconfigurations are generated if the value of bits 2:0 + * of an EPT paging-structure entry is 110b (write/execute). + */ + kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, + VMX_EPT_RWX_MASK, 0); } -EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); +EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); void kvm_mmu_reset_all_pte_masks(void) { u8 low_phys_bits; - - shadow_user_mask = 0; - shadow_accessed_mask = 0; - shadow_dirty_mask = 0; - shadow_nx_mask = 0; - shadow_x_mask = 0; - shadow_present_mask = 0; - shadow_acc_track_mask = 0; + u64 mask; shadow_phys_bits = kvm_get_shadow_phys_bits(); @@ -315,4 +346,30 @@ void kvm_mmu_reset_all_pte_masks(void) shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); + + shadow_user_mask = PT_USER_MASK; + shadow_accessed_mask = PT_ACCESSED_MASK; + shadow_dirty_mask = PT_DIRTY_MASK; + shadow_nx_mask = PT64_NX_MASK; + shadow_x_mask = 0; + shadow_present_mask = PT_PRESENT_MASK; + shadow_acc_track_mask = 0; + shadow_me_mask = sme_me_mask; + + shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE; + shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE; + + /* + * Set a reserved PA bit in MMIO SPTEs to generate page faults with + * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT + * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports + * 52-bit physical addresses then there are no reserved PA bits in the + * PTEs and so the reserved PA approach must be disabled. + */ + if (shadow_phys_bits < 52) + mask = BIT_ULL(51) | PT_PRESENT_MASK; + else + mask = 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 6de3950fd704..bca0ba11cccf 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -5,18 +5,33 @@ #include "mmu_internal.h" -#define PT_FIRST_AVAIL_BITS_SHIFT 10 -#define PT64_SECOND_AVAIL_BITS_SHIFT 54 +/* + * A MMU present SPTE is backed by actual memory and may or may not be present + * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it + * is ignored by all flavors of SPTEs and checking a low bit often generates + * better code than for a high bit, e.g. 56+. MMU present checks are pervasive + * enough that the improved code generation is noticeable in KVM's footprint. + */ +#define SPTE_MMU_PRESENT_MASK BIT_ULL(11) /* - * The mask used to denote special SPTEs, which can be either MMIO SPTEs or - * Access Tracking SPTEs. + * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also + * be restricted to using write-protection (for L2 when CPU dirty logging, i.e. + * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that + * is must be employed for a given TDP SPTE. + * + * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE + * paging, including NPT PAE. This scheme works because legacy shadow paging + * is guaranteed to have A/D bits and write-protection is forced only for + * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it + * must be restricted to 64-bit KVM. */ -#define SPTE_SPECIAL_MASK (3ULL << 52) -#define SPTE_AD_ENABLED_MASK (0ULL << 52) -#define SPTE_AD_DISABLED_MASK (1ULL << 52) -#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) -#define SPTE_MMIO_MASK (3ULL << 52) +#define SPTE_TDP_AD_SHIFT 52 +#define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT) +#define SPTE_TDP_AD_ENABLED_MASK (0ULL << SPTE_TDP_AD_SHIFT) +#define SPTE_TDP_AD_DISABLED_MASK (1ULL << SPTE_TDP_AD_SHIFT) +#define SPTE_TDP_AD_WRPROT_ONLY_MASK (2ULL << SPTE_TDP_AD_SHIFT) +static_assert(SPTE_TDP_AD_ENABLED_MASK == 0); #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) @@ -51,16 +66,46 @@ (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) +/* Bits 9 and 10 are ignored by all non-EPT PTEs. */ +#define DEFAULT_SPTE_HOST_WRITEABLE BIT_ULL(9) +#define DEFAULT_SPTE_MMU_WRITEABLE BIT_ULL(10) + +/* + * The mask/shift to use for saving the original R/X bits when marking the PTE + * as not-present for access tracking purposes. We do not save the W bit as the + * PTEs being access tracked also need to be dirty tracked, so the W bit will be + * restored only when a write is attempted to the page. This mask obviously + * must not overlap the A/D type mask. + */ +#define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \ + PT64_EPT_EXECUTABLE_MASK) +#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54 +#define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \ + SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) +static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK)); + +/* + * Low ignored bits are at a premium for EPT, use high ignored bits, taking care + * to not overlap the A/D type mask or the saved access bits of access-tracked + * SPTEs when A/D bits are disabled. + */ +#define EPT_SPTE_HOST_WRITABLE BIT_ULL(57) +#define EPT_SPTE_MMU_WRITABLE BIT_ULL(58) -#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) -#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) +static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK)); +static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK)); +static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); +static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); + +/* Defined only to keep the above static asserts readable. */ +#undef SHADOW_ACC_TRACK_SAVED_MASK /* - * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of + * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of * the memslots generation and is derived as follows: * - * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 - * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62 + * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 + * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 * * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in * the MMIO generation number, as doing so would require stealing a bit from @@ -71,39 +116,44 @@ */ #define MMIO_SPTE_GEN_LOW_START 3 -#define MMIO_SPTE_GEN_LOW_END 11 +#define MMIO_SPTE_GEN_LOW_END 10 -#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT +#define MMIO_SPTE_GEN_HIGH_START 52 #define MMIO_SPTE_GEN_HIGH_END 62 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ MMIO_SPTE_GEN_LOW_START) #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ MMIO_SPTE_GEN_HIGH_START) +static_assert(!(SPTE_MMU_PRESENT_MASK & + (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) /* remember to adjust the comment above as well if you change these */ -static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9); +static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11); #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) +extern u64 __read_mostly shadow_host_writable_mask; +extern u64 __read_mostly shadow_mmu_writable_mask; extern u64 __read_mostly shadow_nx_mask; extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ extern u64 __read_mostly shadow_user_mask; extern u64 __read_mostly shadow_accessed_mask; extern u64 __read_mostly shadow_dirty_mask; extern u64 __read_mostly shadow_mmio_value; +extern u64 __read_mostly shadow_mmio_mask; extern u64 __read_mostly shadow_mmio_access_mask; extern u64 __read_mostly shadow_present_mask; extern u64 __read_mostly shadow_me_mask; /* - * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; + * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED_MASK; * shadow_acc_track_mask is the set of bits to be cleared in non-accessed * pages. */ @@ -121,28 +171,21 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5 /* - * The mask/shift to use for saving the original R/X bits when marking the PTE - * as not-present for access tracking purposes. We do not save the W bit as the - * PTEs being access tracked also need to be dirty tracked, so the W bit will be - * restored only when a write is attempted to the page. - */ -#define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \ - PT64_EPT_EXECUTABLE_MASK) -#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT - -/* * If a thread running without exclusive control of the MMU lock must perform a * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a * non-present intermediate value. Other threads which encounter this value * should not modify the SPTE. * - * This constant works because it is considered non-present on both AMD and - * Intel CPUs and does not create a L1TF vulnerability because the pfn section - * is zeroed out. + * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on + * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF + * vulnerability. Use only low bits to avoid 64-bit immediates. * * Only used by the TDP MMU. */ -#define REMOVED_SPTE (1ull << 59) +#define REMOVED_SPTE 0x5a0ULL + +/* Removed SPTEs must not be misconstrued as shadow present PTEs. */ +static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK)); static inline bool is_removed_spte(u64 spte) { @@ -167,7 +210,13 @@ extern u8 __read_mostly shadow_phys_bits; static inline bool is_mmio_spte(u64 spte) { - return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; + return (spte & shadow_mmio_mask) == shadow_mmio_value && + likely(shadow_mmio_value); +} + +static inline bool is_shadow_present_pte(u64 pte) +{ + return !!(pte & SPTE_MMU_PRESENT_MASK); } static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) @@ -177,25 +226,30 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool spte_ad_enabled(u64 spte) { - MMU_WARN_ON(is_mmio_spte(spte)); - return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; + MMU_WARN_ON(!is_shadow_present_pte(spte)); + return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK; } static inline bool spte_ad_need_write_protect(u64 spte) { - MMU_WARN_ON(is_mmio_spte(spte)); - return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; + MMU_WARN_ON(!is_shadow_present_pte(spte)); + /* + * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED_MASK is '0', + * and non-TDP SPTEs will never set these bits. Optimize for 64-bit + * TDP and do the A/D type check unconditionally. + */ + return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK; } static inline u64 spte_shadow_accessed_mask(u64 spte) { - MMU_WARN_ON(is_mmio_spte(spte)); + MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } static inline u64 spte_shadow_dirty_mask(u64 spte) { - MMU_WARN_ON(is_mmio_spte(spte)); + MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } @@ -204,11 +258,6 @@ static inline bool is_access_track_spte(u64 spte) return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; } -static inline bool is_shadow_present_pte(u64 pte) -{ - return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte); -} - static inline bool is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; @@ -246,8 +295,8 @@ static inline bool is_dirty_spte(u64 spte) static inline bool spte_can_locklessly_be_made_writable(u64 spte) { - return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == - (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); + return (spte & shadow_host_writable_mask) && + (spte & shadow_mmu_writable_mask); } static inline u64 get_mmio_spte_generation(u64 spte) diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index e5f148106e20..b3ed302c1a35 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -21,6 +21,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level) } /* + * Return the TDP iterator to the root PT and allow it to continue its + * traversal over the paging structure from there. + */ +void tdp_iter_restart(struct tdp_iter *iter) +{ + iter->yielded_gfn = iter->next_last_level_gfn; + iter->level = iter->root_level; + + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); + tdp_iter_refresh_sptep(iter); + + iter->valid = true; +} + +/* * Sets a TDP iterator to walk a pre-order traversal of the paging structure * rooted at root_pt, starting with the walk to translate next_last_level_gfn. */ @@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); iter->next_last_level_gfn = next_last_level_gfn; - iter->yielded_gfn = iter->next_last_level_gfn; iter->root_level = root_level; iter->min_level = min_level; - iter->level = root_level; - iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt; - - iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); - tdp_iter_refresh_sptep(iter); + iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt; + iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt)); - iter->valid = true; + tdp_iter_restart(iter); } /* @@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter) iter->valid = false; } -tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter) -{ - return iter->pt_path[iter->root_level - 1]; -} - diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index 4cc177d75c4a..b1748b988d3a 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -36,6 +36,8 @@ struct tdp_iter { int min_level; /* The iterator's current level within the paging structure */ int level; + /* The address space ID, i.e. SMM vs. regular. */ + int as_id; /* A snapshot of the value at sptep */ u64 old_spte; /* @@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); -tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter); +void tdp_iter_restart(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index d78915019b08..88f69a6cc492 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -27,6 +27,15 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm) INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); } +static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, + bool shared) +{ + if (shared) + lockdep_assert_held_read(&kvm->mmu_lock); + else + lockdep_assert_held_write(&kvm->mmu_lock); +} + void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) { if (!kvm->arch.tdp_mmu_enabled) @@ -41,32 +50,85 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) rcu_barrier(); } -static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) +static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, + gfn_t start, gfn_t end, bool can_yield, bool flush, + bool shared); + +static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) { - if (kvm_mmu_put_root(kvm, root)) - kvm_tdp_mmu_free_root(kvm, root); + free_page((unsigned long)sp->spt); + kmem_cache_free(mmu_page_header_cache, sp); } -static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, - struct kvm_mmu_page *root) +/* + * This is called through call_rcu in order to free TDP page table memory + * safely with respect to other kernel threads that may be operating on + * the memory. + * By only accessing TDP MMU page table memory in an RCU read critical + * section, and freeing it after a grace period, lockless access to that + * memory won't use it after it is freed. + */ +static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) { - lockdep_assert_held_write(&kvm->mmu_lock); + struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, + rcu_head); - if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) - return false; + tdp_mmu_free_sp(sp); +} - kvm_mmu_get_root(kvm, root); - return true; +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared) +{ + gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); + + kvm_lockdep_assert_mmu_lock_held(kvm, shared); + if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) + return; + + WARN_ON(!root->tdp_mmu_page); + + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + list_del_rcu(&root->link); + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + + zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared); + + call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); } -static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, - struct kvm_mmu_page *root) +/* + * Finds the next valid root after root (or the first valid root if root + * is NULL), takes a reference on it, and returns that next root. If root + * is not NULL, this thread should have already taken a reference on it, and + * that reference will be dropped. If no valid root is found, this + * function will return NULL. + */ +static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, + struct kvm_mmu_page *prev_root, + bool shared) { struct kvm_mmu_page *next_root; - next_root = list_next_entry(root, link); - tdp_mmu_put_root(kvm, root); + rcu_read_lock(); + + if (prev_root) + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, + &prev_root->link, + typeof(*prev_root), link); + else + next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, + typeof(*next_root), link); + + while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, + &next_root->link, typeof(*next_root), link); + + rcu_read_unlock(); + + if (prev_root) + kvm_tdp_mmu_put_root(kvm, prev_root, shared); + return next_root; } @@ -75,35 +137,24 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, * This makes it safe to release the MMU lock and yield within the loop, but * if exiting the loop early, the caller must drop the reference to the most * recent root. (Unless keeping a live reference is desirable.) + * + * If shared is set, this function is operating under the MMU lock in read + * mode. In the unlikely event that this thread must free a root, the lock + * will be temporarily dropped and reacquired in write mode. */ -#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \ - for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \ - typeof(*_root), link); \ - tdp_mmu_next_root_valid(_kvm, _root); \ - _root = tdp_mmu_next_root(_kvm, _root)) - -#define for_each_tdp_mmu_root(_kvm, _root) \ - list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) - -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield); - -void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) -{ - gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - - lockdep_assert_held_write(&kvm->mmu_lock); - - WARN_ON(root->root_count); - WARN_ON(!root->tdp_mmu_page); - - list_del(&root->link); - - zap_gfn_range(kvm, root, 0, max_gfn, false); - - free_page((unsigned long)root->spt); - kmem_cache_free(mmu_page_header_cache, root); -} +#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ + for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ + _root; \ + _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ + if (kvm_mmu_page_as_id(_root) != _as_id) { \ + } else + +#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ + list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ + lockdep_is_held_type(&kvm->mmu_lock, 0) || \ + lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ + if (kvm_mmu_page_as_id(_root) != _as_id) { \ + } else static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, int level) @@ -137,86 +188,46 @@ static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, return sp; } -static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) +hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) { union kvm_mmu_page_role role; struct kvm *kvm = vcpu->kvm; struct kvm_mmu_page *root; - role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); + lockdep_assert_held_write(&kvm->mmu_lock); - write_lock(&kvm->mmu_lock); + role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); /* Check for an existing root before allocating a new one. */ - for_each_tdp_mmu_root(kvm, root) { - if (root->role.word == role.word) { - kvm_mmu_get_root(kvm, root); - write_unlock(&kvm->mmu_lock); - return root; - } + for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { + if (root->role.word == role.word && + kvm_tdp_mmu_get_root(kvm, root)) + goto out; } root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); - root->root_count = 1; - - list_add(&root->link, &kvm->arch.tdp_mmu_roots); - - write_unlock(&kvm->mmu_lock); - - return root; -} - -hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_page *root; + refcount_set(&root->tdp_mmu_root_count, 1); - root = get_tdp_mmu_vcpu_root(vcpu); - if (!root) - return INVALID_PAGE; + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); +out: return __pa(root->spt); } -static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) -{ - free_page((unsigned long)sp->spt); - kmem_cache_free(mmu_page_header_cache, sp); -} - -/* - * This is called through call_rcu in order to free TDP page table memory - * safely with respect to other kernel threads that may be operating on - * the memory. - * By only accessing TDP MMU page table memory in an RCU read critical - * section, and freeing it after a grace period, lockless access to that - * memory won't use it after it is freed. - */ -static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) -{ - struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, - rcu_head); - - tdp_mmu_free_sp(sp); -} - static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, bool shared); -static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) -{ - return sp->role.smm ? 1 : 0; -} - static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) { - bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); - if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) return; if (is_accessed_spte(old_spte) && - (!is_accessed_spte(new_spte) || pfn_changed)) + (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || + spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); } @@ -301,11 +312,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, * * Given a page table that has been removed from the TDP paging structure, * iterates through the page table to clear SPTEs and free child page tables. + * + * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU + * protection. Since this thread removed it from the paging structure, + * this thread will be responsible for ensuring the page is freed. Hence the + * early rcu_dereferences in the function. */ -static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, +static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, bool shared) { - struct kvm_mmu_page *sp = sptep_to_sp(pt); + struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); int level = sp->role.level; gfn_t base_gfn = sp->gfn; u64 old_child_spte; @@ -318,7 +334,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, tdp_mmu_unlink_page(kvm, sp, shared); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - sptep = pt + i; + sptep = rcu_dereference(pt) + i; gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); if (shared) { @@ -404,7 +420,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * If this warning were to trigger it would indicate that there was a * missing MMU notifier or a race with some notifier handler. * A present, leaf SPTE should never be directly replaced with another - * present leaf SPTE pointing to a differnt PFN. A notifier handler + * present leaf SPTE pointing to a different PFN. A notifier handler * should be zapping the SPTE before the main MM's page table is * changed, or the SPTE should be zeroed, and the TLBs flushed by the * thread before replacement. @@ -418,7 +434,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, /* * Crash the host to prevent error propagation and guest data - * courruption. + * corruption. */ BUG(); } @@ -455,7 +471,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, if (was_leaf && is_dirty_spte(old_spte) && - (!is_dirty_spte(new_spte) || pfn_changed)) + (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) kvm_set_pfn_dirty(spte_to_pfn(old_spte)); /* @@ -479,8 +495,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, } /* - * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the - * associated bookkeeping + * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically + * and handle the associated bookkeeping, but do not mark the page dirty + * in KVM's dirty bitmaps. * * @kvm: kvm instance * @iter: a tdp_iter instance currently on the SPTE that should be set @@ -488,30 +505,39 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * Returns: true if the SPTE was set, false if it was not. If false is returned, * this function will have no side-effects. */ -static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) +static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) { - u64 *root_pt = tdp_iter_root_pt(iter); - struct kvm_mmu_page *root = sptep_to_sp(root_pt); - int as_id = kvm_mmu_page_as_id(root); - lockdep_assert_held_read(&kvm->mmu_lock); /* * Do not change removed SPTEs. Only the thread that froze the SPTE * may modify it. */ - if (iter->old_spte == REMOVED_SPTE) + if (is_removed_spte(iter->old_spte)) return false; if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, new_spte) != iter->old_spte) return false; - handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, - iter->level, true); + __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, + new_spte, iter->level, true); + handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); + + return true; +} + +static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) +{ + if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) + return false; + handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, + iter->old_spte, new_spte, iter->level); return true; } @@ -533,12 +559,12 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, /* * No other thread can overwrite the removed SPTE as they * must either wait on the MMU lock or use - * tdp_mmu_set_spte_atomic which will not overrite the + * tdp_mmu_set_spte_atomic which will not overwrite the * special removed SPTE value. No bookkeeping is needed * here since the SPTE is going from non-present * to non-present. */ - WRITE_ONCE(*iter->sptep, 0); + WRITE_ONCE(*rcu_dereference(iter->sptep), 0); return true; } @@ -564,10 +590,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte, bool record_acc_track, bool record_dirty_log) { - tdp_ptep_t root_pt = tdp_iter_root_pt(iter); - struct kvm_mmu_page *root = sptep_to_sp(root_pt); - int as_id = kvm_mmu_page_as_id(root); - lockdep_assert_held_write(&kvm->mmu_lock); /* @@ -577,17 +599,17 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, * should be used. If operating under the MMU lock in write mode, the * use of the removed SPTE should not be necessary. */ - WARN_ON(iter->old_spte == REMOVED_SPTE); + WARN_ON(is_removed_spte(iter->old_spte)); WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); - __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, - iter->level, false); + __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, + new_spte, iter->level, false); if (record_acc_track) handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); if (record_dirty_log) - handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, + handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, iter->old_spte, new_spte, iter->level); } @@ -642,7 +664,8 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, * Return false if a yield was not needed. */ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, - struct tdp_iter *iter, bool flush) + struct tdp_iter *iter, bool flush, + bool shared) { /* Ensure forward progress has been made before yielding. */ if (iter->next_last_level_gfn == iter->yielded_gfn) @@ -654,14 +677,16 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, if (flush) kvm_flush_remote_tlbs(kvm); - cond_resched_rwlock_write(&kvm->mmu_lock); + if (shared) + cond_resched_rwlock_read(&kvm->mmu_lock); + else + cond_resched_rwlock_write(&kvm->mmu_lock); + rcu_read_lock(); WARN_ON(iter->gfn > iter->next_last_level_gfn); - tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], - iter->root_level, iter->min_level, - iter->next_last_level_gfn); + tdp_iter_restart(iter); return true; } @@ -674,24 +699,33 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, * non-root pages mapping GFNs strictly within that range. Returns true if * SPTEs have been cleared and a TLB flush is needed before releasing the * MMU lock. + * * If can_yield is true, will release the MMU lock and reschedule if the * scheduler needs the CPU or there is contention on the MMU lock. If this * function cannot yield, it will not release the MMU lock or reschedule and * the caller must ensure it does not supply too large a GFN range, or the * operation can cause a soft lockup. + * + * If shared is true, this thread holds the MMU lock in read mode and must + * account for the possibility that other threads are modifying the paging + * structures concurrently. If shared is false, this thread should hold the + * MMU lock in write mode. */ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield) + gfn_t start, gfn_t end, bool can_yield, bool flush, + bool shared) { struct tdp_iter iter; - bool flush_needed = false; + + kvm_lockdep_assert_mmu_lock_held(kvm, shared); rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { +retry: if (can_yield && - tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) { - flush_needed = false; + tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { + flush = false; continue; } @@ -708,12 +742,21 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, !is_last_spte(iter.old_spte, iter.level)) continue; - tdp_mmu_set_spte(kvm, &iter, 0); - flush_needed = true; + if (!shared) { + tdp_mmu_set_spte(kvm, &iter, 0); + flush = true; + } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { + /* + * The iter must explicitly re-read the SPTE because + * the atomic cmpxchg failed. + */ + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + goto retry; + } } rcu_read_unlock(); - return flush_needed; + return flush; } /* @@ -721,14 +764,21 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, * non-root pages mapping GFNs strictly within that range. Returns true if * SPTEs have been cleared and a TLB flush is needed before releasing the * MMU lock. + * + * If shared is true, this thread holds the MMU lock in read mode and must + * account for the possibility that other threads are modifying the paging + * structures concurrently. If shared is false, this thread should hold the + * MMU in write mode. */ -bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) +bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, + gfn_t end, bool can_yield, bool flush, + bool shared) { struct kvm_mmu_page *root; - bool flush = false; - for_each_tdp_mmu_root_yield_safe(kvm, root) - flush |= zap_gfn_range(kvm, root, start, end, true); + for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared) + flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, + shared); return flush; } @@ -736,13 +786,115 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) void kvm_tdp_mmu_zap_all(struct kvm *kvm) { gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - bool flush; + bool flush = false; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) + flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, + flush, false); - flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn); if (flush) kvm_flush_remote_tlbs(kvm); } +static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, + struct kvm_mmu_page *prev_root) +{ + struct kvm_mmu_page *next_root; + + if (prev_root) + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, + &prev_root->link, + typeof(*prev_root), link); + else + next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, + typeof(*next_root), link); + + while (next_root && !(next_root->role.invalid && + refcount_read(&next_root->tdp_mmu_root_count))) + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, + &next_root->link, + typeof(*next_root), link); + + return next_root; +} + +/* + * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each + * invalidated root, they will not be freed until this function drops the + * reference. Before dropping that reference, tear down the paging + * structure so that whichever thread does drop the last reference + * only has to do a trivial amount of work. Since the roots are invalid, + * no new SPTEs should be created under them. + */ +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) +{ + gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); + struct kvm_mmu_page *next_root; + struct kvm_mmu_page *root; + bool flush = false; + + lockdep_assert_held_read(&kvm->mmu_lock); + + rcu_read_lock(); + + root = next_invalidated_root(kvm, NULL); + + while (root) { + next_root = next_invalidated_root(kvm, root); + + rcu_read_unlock(); + + flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush, + true); + + /* + * Put the reference acquired in + * kvm_tdp_mmu_invalidate_roots + */ + kvm_tdp_mmu_put_root(kvm, root, true); + + root = next_root; + + rcu_read_lock(); + } + + rcu_read_unlock(); + + if (flush) + kvm_flush_remote_tlbs(kvm); +} + +/* + * Mark each TDP MMU root as invalid so that other threads + * will drop their references and allow the root count to + * go to 0. + * + * Also take a reference on all roots so that this thread + * can do the bulk of the work required to free the roots + * once they are invalidated. Without this reference, a + * vCPU thread might drop the last reference to a root and + * get stuck with tearing down the entire paging structure. + * + * Roots which have a zero refcount should be skipped as + * they're already being torn down. + * Already invalid roots should be referenced again so that + * they aren't freed before kvm_tdp_mmu_zap_all_fast is + * done with them. + * + * This has essentially the same effect for the TDP MMU + * as updating mmu_valid_gen does for the shadow MMU. + */ +void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) +{ + struct kvm_mmu_page *root; + + lockdep_assert_held_write(&kvm->mmu_lock); + list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) + if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) + root->role.invalid = true; +} + /* * Installs a last-level SPTE to handle a TDP page fault. * (NPT/EPT violation/misconfiguration) @@ -785,12 +937,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, new_spte); ret = RET_PF_EMULATE; - } else + } else { trace_kvm_mmu_set_spte(iter->level, iter->gfn, rcu_dereference(iter->sptep)); + } - trace_kvm_mmu_set_spte(iter->level, iter->gfn, - rcu_dereference(iter->sptep)); if (!prefault) vcpu->stat.pf_fixed++; @@ -890,199 +1041,139 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return ret; } -static __always_inline int -kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, - gfn_t start, - gfn_t end, - unsigned long data)) +bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, + bool flush) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; struct kvm_mmu_page *root; - int ret = 0; - int as_id; - - for_each_tdp_mmu_root_yield_safe(kvm, root) { - as_id = kvm_mmu_page_as_id(root); - slots = __kvm_memslots(kvm, as_id); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn_start, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn_start = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - ret |= handler(kvm, memslot, root, gfn_start, - gfn_end, data); - } - } + for_each_tdp_mmu_root(kvm, root, range->slot->as_id) + flush |= zap_gfn_range(kvm, root, range->start, range->end, + range->may_block, flush, false); - return ret; + return flush; } -static int zap_gfn_range_hva_wrapper(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, - gfn_t end, unsigned long unused) -{ - return zap_gfn_range(kvm, root, start, end, false); -} +typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range); -int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end) +static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, + struct kvm_gfn_range *range, + tdp_handler_t handler) { - return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, - zap_gfn_range_hva_wrapper); + struct kvm_mmu_page *root; + struct tdp_iter iter; + bool ret = false; + + rcu_read_lock(); + + /* + * Don't support rescheduling, none of the MMU notifiers that funnel + * into this helper allow blocking; it'd be dead, wasteful code. + */ + for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { + tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) + ret |= handler(kvm, &iter, range); + } + + rcu_read_unlock(); + + return ret; } /* * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero * if any of the GFNs in the range have been accessed. */ -static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, gfn_t end, - unsigned long unused) +static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - int young = 0; u64 new_spte = 0; - rcu_read_lock(); + /* If we have a non-accessed entry we don't need to change the pte. */ + if (!is_accessed_spte(iter->old_spte)) + return false; - tdp_root_for_each_leaf_pte(iter, root, start, end) { + new_spte = iter->old_spte; + + if (spte_ad_enabled(new_spte)) { + new_spte &= ~shadow_accessed_mask; + } else { /* - * If we have a non-accessed entry we don't need to change the - * pte. + * Capture the dirty status of the page, so that it doesn't get + * lost when the SPTE is marked for access tracking. */ - if (!is_accessed_spte(iter.old_spte)) - continue; - - new_spte = iter.old_spte; - - if (spte_ad_enabled(new_spte)) { - clear_bit((ffs(shadow_accessed_mask) - 1), - (unsigned long *)&new_spte); - } else { - /* - * Capture the dirty status of the page, so that it doesn't get - * lost when the SPTE is marked for access tracking. - */ - if (is_writable_pte(new_spte)) - kvm_set_pfn_dirty(spte_to_pfn(new_spte)); - - new_spte = mark_spte_for_access_track(new_spte); - } - new_spte &= ~shadow_dirty_mask; - - tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte); - young = 1; + if (is_writable_pte(new_spte)) + kvm_set_pfn_dirty(spte_to_pfn(new_spte)); - trace_kvm_age_page(iter.gfn, iter.level, slot, young); + new_spte = mark_spte_for_access_track(new_spte); } - rcu_read_unlock(); + tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); - return young; + return true; } -int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end) +bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, - age_gfn_range); + return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); } -static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, - unsigned long unused2) +static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - - tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) - if (is_accessed_spte(iter.old_spte)) - return 1; - - return 0; + return is_accessed_spte(iter->old_spte); } -int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0, - test_age_gfn); + return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); } -/* - * Handle the changed_pte MMU notifier for the TDP MMU. - * data is a pointer to the new pte_t mapping the HVA specified by the MMU - * notifier. - * Returns non-zero if a flush is needed before releasing the MMU lock. - */ -static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, - unsigned long data) +static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - pte_t *ptep = (pte_t *)data; - kvm_pfn_t new_pfn; u64 new_spte; - int need_flush = 0; - - rcu_read_lock(); - WARN_ON(pte_huge(*ptep)); + /* Huge pages aren't expected to be modified without first being zapped. */ + WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); - new_pfn = pte_pfn(*ptep); - - tdp_root_for_each_pte(iter, root, gfn, gfn + 1) { - if (iter.level != PG_LEVEL_4K) - continue; - - if (!is_shadow_present_pte(iter.old_spte)) - break; - - tdp_mmu_set_spte(kvm, &iter, 0); - - kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1); + if (iter->level != PG_LEVEL_4K || + !is_shadow_present_pte(iter->old_spte)) + return false; - if (!pte_write(*ptep)) { - new_spte = kvm_mmu_changed_pte_notifier_make_spte( - iter.old_spte, new_pfn); + /* + * Note, when changing a read-only SPTE, it's not strictly necessary to + * zero the SPTE before setting the new PFN, but doing so preserves the + * invariant that the PFN of a present * leaf SPTE can never change. + * See __handle_changed_spte(). + */ + tdp_mmu_set_spte(kvm, iter, 0); - tdp_mmu_set_spte(kvm, &iter, new_spte); - } + if (!pte_write(range->pte)) { + new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, + pte_pfn(range->pte)); - need_flush = 1; + tdp_mmu_set_spte(kvm, iter, new_spte); } - if (need_flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); - - rcu_read_unlock(); - - return 0; + return true; } -int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, - pte_t *host_ptep) +/* + * Handle the changed_pte MMU notifier for the TDP MMU. + * data is a pointer to the new pte_t mapping the HVA specified by the MMU + * notifier. + * Returns non-zero if a flush is needed before releasing the MMU lock. + */ +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1, - (unsigned long)host_ptep, - set_tdp_spte); + bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); + + /* FIXME: return 'flush' instead of flushing here. */ + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); + + return false; } /* @@ -1103,7 +1194,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, for_each_tdp_pte_min_level(iter, root->spt, root->role.level, min_level, start, end) { - if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) +retry: + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; if (!is_shadow_present_pte(iter.old_spte) || @@ -1113,7 +1205,15 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, new_spte = iter.old_spte & ~PT_WRITABLE_MASK; - tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); + if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, + new_spte)) { + /* + * The iter must explicitly re-read the SPTE because + * the atomic cmpxchg failed. + */ + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + goto retry; + } spte_set = true; } @@ -1130,17 +1230,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, int min_level) { struct kvm_mmu_page *root; - int root_as_id; bool spte_set = false; - for_each_tdp_mmu_root_yield_safe(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; + lockdep_assert_held_read(&kvm->mmu_lock); + for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, slot->base_gfn + slot->npages, min_level); - } return spte_set; } @@ -1162,7 +1258,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, rcu_read_lock(); tdp_root_for_each_leaf_pte(iter, root, start, end) { - if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) +retry: + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; if (spte_ad_need_write_protect(iter.old_spte)) { @@ -1177,7 +1274,15 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, continue; } - tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); + if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, + new_spte)) { + /* + * The iter must explicitly re-read the SPTE because + * the atomic cmpxchg failed. + */ + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + goto retry; + } spte_set = true; } @@ -1195,17 +1300,13 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot) { struct kvm_mmu_page *root; - int root_as_id; bool spte_set = false; - for_each_tdp_mmu_root_yield_safe(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; + lockdep_assert_held_read(&kvm->mmu_lock); + for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, slot->base_gfn + slot->npages); - } return spte_set; } @@ -1267,37 +1368,32 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, bool wrprot) { struct kvm_mmu_page *root; - int root_as_id; lockdep_assert_held_write(&kvm->mmu_lock); - for_each_tdp_mmu_root(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; - + for_each_tdp_mmu_root(kvm, root, slot->as_id) clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); - } } /* * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. */ -static void zap_collapsible_spte_range(struct kvm *kvm, +static bool zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - struct kvm_memory_slot *slot) + const struct kvm_memory_slot *slot, + bool flush) { gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; kvm_pfn_t pfn; - bool spte_set = false; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { - if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { - spte_set = false; +retry: + if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { + flush = false; continue; } @@ -1311,38 +1407,43 @@ static void zap_collapsible_spte_range(struct kvm *kvm, pfn, PG_LEVEL_NUM)) continue; - tdp_mmu_set_spte(kvm, &iter, 0); - - spte_set = true; + if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { + /* + * The iter must explicitly re-read the SPTE because + * the atomic cmpxchg failed. + */ + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + goto retry; + } + flush = true; } rcu_read_unlock(); - if (spte_set) - kvm_flush_remote_tlbs(kvm); + + return flush; } /* * Clear non-leaf entries (and free associated page tables) which could * be replaced by large mappings, for GFNs within the slot. */ -void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - struct kvm_memory_slot *slot) +bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot, + bool flush) { struct kvm_mmu_page *root; - int root_as_id; - for_each_tdp_mmu_root_yield_safe(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; + lockdep_assert_held_read(&kvm->mmu_lock); - zap_collapsible_spte_range(kvm, root, slot); - } + for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) + flush = zap_collapsible_spte_range(kvm, root, slot, flush); + + return flush; } /* * Removes write access on the last level SPTE mapping this GFN and unsets the - * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted. + * MMU-writable bit to ensure future writes continue to be intercepted. * Returns true if an SPTE was set and a TLB flush is needed. */ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, @@ -1359,7 +1460,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, break; new_spte = iter.old_spte & - ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); + ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; @@ -1372,24 +1473,19 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, /* * Removes write access on the last level SPTE mapping this GFN and unsets the - * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted. + * MMU-writable bit to ensure future writes continue to be intercepted. * Returns true if an SPTE was set and a TLB flush is needed. */ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { struct kvm_mmu_page *root; - int root_as_id; bool spte_set = false; lockdep_assert_held_write(&kvm->mmu_lock); - for_each_tdp_mmu_root(kvm, root) { - root_as_id = kvm_mmu_page_as_id(root); - if (root_as_id != slot->as_id) - continue; - + for_each_tdp_mmu_root(kvm, root, slot->as_id) spte_set |= write_protect_gfn(kvm, root, gfn); - } + return spte_set; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 3b761c111bff..5fdf63090451 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -6,24 +6,60 @@ #include <linux/kvm_host.h> hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); -void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); -bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end); +__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, + struct kvm_mmu_page *root) +{ + if (root->role.invalid) + return false; + + return refcount_inc_not_zero(&root->tdp_mmu_root_count); +} + +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared); + +bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, + gfn_t end, bool can_yield, bool flush, + bool shared); +static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, + gfn_t start, gfn_t end, bool flush, + bool shared) +{ + return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush, + shared); +} +static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); + + /* + * Don't allow yielding, as the caller may have a flush pending. Note, + * if mmu_lock is held for write, zapping will never yield in this case, + * but explicitly disallow it for safety. The TDP MMU does not yield + * until it has made forward progress (steps sideways), and when zapping + * a single shadow page that it's guaranteed to see (thus the mmu_lock + * requirement), its "step sideways" will always step beyond the bounds + * of the shadow page's gfn range and stop iterating before yielding. + */ + lockdep_assert_held_write(&kvm->mmu_lock); + return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), + sp->gfn, end, false, false, false); +} + void kvm_tdp_mmu_zap_all(struct kvm *kvm); +void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault); -int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end); - -int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end); -int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva); - -int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, - pte_t *host_ptep); +bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, + bool flush); +bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, int min_level); @@ -33,8 +69,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); -void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - struct kvm_memory_slot *slot); +bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot, + bool flush); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 7b30bc967af3..67e753edfa22 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -103,7 +103,7 @@ static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, /* returns general purpose PMC with the specified MSR. Note that it can be * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a - * paramenter to tell them apart. + * parameter to tell them apart. */ static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h new file mode 100644 index 000000000000..a19d473d0184 --- /dev/null +++ b/arch/x86/kvm/reverse_cpuid.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_X86_KVM_REVERSE_CPUID_H +#define ARCH_X86_KVM_REVERSE_CPUID_H + +#include <uapi/asm/kvm.h> +#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> + +/* + * Hardware-defined CPUID leafs that are scattered in the kernel, but need to + * be directly used by KVM. Note, these word values conflict with the kernel's + * "bug" caps, but KVM doesn't use those. + */ +enum kvm_only_cpuid_leafs { + CPUID_12_EAX = NCAPINTS, + NR_KVM_CPU_CAPS, + + NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, +}; + +#define KVM_X86_FEATURE(w, f) ((w)*32 + (f)) + +/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ +#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0) +#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) + +struct cpuid_reg { + u32 function; + u32 index; + int reg; +}; + +static const struct cpuid_reg reverse_cpuid[] = { + [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, + [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, + [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, + [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, + [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, + [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, + [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, + [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, + [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, + [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, + [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, + [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, + [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, + [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, + [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, + [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, + [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, +}; + +/* + * Reverse CPUID and its derivatives can only be used for hardware-defined + * feature words, i.e. words whose bits directly correspond to a CPUID leaf. + * Retrieving a feature bit or masking guest CPUID from a Linux-defined word + * is nonsensical as the bit number/mask is an arbitrary software-defined value + * and can't be used by KVM to query/control guest capabilities. And obviously + * the leaf being queried must have an entry in the lookup table. + */ +static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) +{ + BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); + BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); + BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); + BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); + BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); + BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); +} + +/* + * Translate feature bits that are scattered in the kernel's cpufeatures word + * into KVM feature words that align with hardware's definitions. + */ +static __always_inline u32 __feature_translate(int x86_feature) +{ + if (x86_feature == X86_FEATURE_SGX1) + return KVM_X86_FEATURE_SGX1; + else if (x86_feature == X86_FEATURE_SGX2) + return KVM_X86_FEATURE_SGX2; + + return x86_feature; +} + +static __always_inline u32 __feature_leaf(int x86_feature) +{ + return __feature_translate(x86_feature) / 32; +} + +/* + * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain + * the hardware defined bit number (stored in bits 4:0) and a software defined + * "word" (stored in bits 31:5). The word is used to index into arrays of + * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has(). + */ +static __always_inline u32 __feature_bit(int x86_feature) +{ + x86_feature = __feature_translate(x86_feature); + + reverse_cpuid_check(x86_feature / 32); + return 1 << (x86_feature & 31); +} + +#define feature_bit(name) __feature_bit(X86_FEATURE_##name) + +static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) +{ + unsigned int x86_leaf = __feature_leaf(x86_feature); + + reverse_cpuid_check(x86_leaf); + return reverse_cpuid[x86_leaf]; +} + +static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, + u32 reg) +{ + switch (reg) { + case CPUID_EAX: + return &entry->eax; + case CPUID_EBX: + return &entry->ebx; + case CPUID_ECX: + return &entry->ecx; + case CPUID_EDX: + return &entry->edx; + default: + BUILD_BUG(); + return NULL; + } +} + +static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); + + return __cpuid_entry_get_reg(entry, cpuid.reg); +} + +static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + return *reg & __feature_bit(x86_feature); +} + +static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + return cpuid_entry_get(entry, x86_feature); +} + +static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + *reg &= ~__feature_bit(x86_feature); +} + +static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + *reg |= __feature_bit(x86_feature); +} + +static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature, + bool set) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + /* + * Open coded instead of using cpuid_entry_{clear,set}() to coerce the + * compiler into using CMOV instead of Jcc when possible. + */ + if (set) + *reg |= __feature_bit(x86_feature); + else + *reg &= ~__feature_bit(x86_feature); +} + +#endif /* ARCH_X86_KVM_REVERSE_CPUID_H */ diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 78bdcfac4e40..712b4e0de481 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -270,7 +270,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) if (id >= AVIC_MAX_PHYSICAL_ID_COUNT) return -EINVAL; - if (!svm->vcpu.arch.apic->regs) + if (!vcpu->arch.apic->regs) return -EINVAL; if (kvm_apicv_activated(vcpu->kvm)) { @@ -281,7 +281,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return ret; } - svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); + svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs); /* Setting AVIC backing page address in the phy APIC ID table */ entry = avic_get_physical_id_entry(vcpu, id); @@ -315,15 +315,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, } } -int avic_incomplete_ipi_interception(struct vcpu_svm *svm) +int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); u32 icrh = svm->vmcb->control.exit_info_1 >> 32; u32 icrl = svm->vmcb->control.exit_info_1; u32 id = svm->vmcb->control.exit_info_2 >> 32; u32 index = svm->vmcb->control.exit_info_2 & 0xFF; - struct kvm_lapic *apic = svm->vcpu.arch.apic; + struct kvm_lapic *apic = vcpu->arch.apic; - trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); + trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); switch (id) { case AVIC_IPI_FAILURE_INVALID_INT_TYPE: @@ -347,11 +348,11 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) * set the appropriate IRR bits on the valid target * vcpus. So, we just need to kick the appropriate vcpu. */ - avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh); + avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh); break; case AVIC_IPI_FAILURE_INVALID_TARGET: WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", - index, svm->vcpu.vcpu_id, icrh, icrl); + index, vcpu->vcpu_id, icrh, icrl); break; case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: WARN_ONCE(1, "Invalid backing page\n"); @@ -539,8 +540,9 @@ static bool is_avic_unaccelerated_access_trap(u32 offset) return ret; } -int avic_unaccelerated_access_interception(struct vcpu_svm *svm) +int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); int ret = 0; u32 offset = svm->vmcb->control.exit_info_1 & AVIC_UNACCEL_ACCESS_OFFSET_MASK; @@ -550,7 +552,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm) AVIC_UNACCEL_ACCESS_WRITE_MASK; bool trap = is_avic_unaccelerated_access_trap(offset); - trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, + trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset, trap, write, vector); if (trap) { /* Handling Trap */ @@ -558,7 +560,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm) ret = avic_unaccel_trap_write(svm); } else { /* Handling Fault */ - ret = kvm_emulate_instruction(&svm->vcpu, 0); + ret = kvm_emulate_instruction(vcpu, 0); } return ret; @@ -572,7 +574,7 @@ int avic_init_vcpu(struct vcpu_svm *svm) if (!avic || !irqchip_in_kernel(vcpu->kvm)) return 0; - ret = avic_init_backing_page(&svm->vcpu); + ret = avic_init_backing_page(vcpu); if (ret) return ret; @@ -727,7 +729,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) struct amd_svm_iommu_ir *ir; /** - * In some cases, the existing irte is updaed and re-set, + * In some cases, the existing irte is updated and re-set, * so we need to check here if it's already been * added * to the ir_list. */ @@ -838,7 +840,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, * Here, we setup with legacy mode in the following cases: * 1. When cannot target interrupt to a specific vcpu. * 2. Unsetting posted interrupt. - * 3. APIC virtialization is disabled for the vcpu. + * 3. APIC virtualization is disabled for the vcpu. * 4. IRQ has incompatible delivery mode (SMI, INIT, etc) */ if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 35891d9a1099..540d43ba2cf4 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -29,6 +29,8 @@ #include "lapic.h" #include "svm.h" +#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK + static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, struct x86_exception *fault) { @@ -92,12 +94,12 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb *hsave = svm->nested.hsave; WARN_ON(mmu_is_nested(vcpu)); vcpu->arch.mmu = &vcpu->arch.guest_mmu; - kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer, + kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, + svm->vmcb01.ptr->save.efer, svm->nested.ctl.nested_cr3); vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; @@ -123,7 +125,7 @@ void recalc_intercepts(struct vcpu_svm *svm) return; c = &svm->vmcb->control; - h = &svm->nested.hsave->control; + h = &svm->vmcb01.ptr->control; g = &svm->nested.ctl; for (i = 0; i < MAX_INTERCEPT; i++) @@ -213,69 +215,92 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) return true; } -static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) +/* + * Bits 11:0 of bitmap address are ignored by hardware + */ +static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) { - struct vcpu_svm *svm = to_svm(vcpu); - - if (WARN_ON(!is_guest_mode(vcpu))) - return true; - - if (!nested_svm_vmrun_msrpm(svm)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = - KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; - return false; - } + u64 addr = PAGE_ALIGN(pa); - return true; + return kvm_vcpu_is_legal_gpa(vcpu, addr) && + kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); } -static bool nested_vmcb_check_controls(struct vmcb_control_area *control) +static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, + struct vmcb_control_area *control) { - if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0) + if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) return false; - if (control->asid == 0) + if (CC(control->asid == 0)) return false; - if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && - !npt_enabled) + if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) + return false; + + if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, + MSRPM_SIZE))) + return false; + if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, + IOPM_SIZE))) return false; return true; } -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) +static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu, + struct vmcb_save_area *save) { - struct kvm_vcpu *vcpu = &svm->vcpu; - bool vmcb12_lma; + /* + * These checks are also performed by KVM_SET_SREGS, + * except that EFER.LMA is not checked by SVM against + * CR0.PG && EFER.LME. + */ + if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { + if (CC(!(save->cr4 & X86_CR4_PAE)) || + CC(!(save->cr0 & X86_CR0_PE)) || + CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + return false; + } - if ((vmcb12->save.efer & EFER_SVME) == 0) + if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) return false; - if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW)) + return true; +} + +/* Common checks that apply to both L1 and L2 state. */ +static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, + struct vmcb_save_area *save) +{ + /* + * FIXME: these should be done after copying the fields, + * to avoid TOC/TOU races. For these save area checks + * the possible damage is limited since kvm_set_cr0 and + * kvm_set_cr4 handle failure; EFER_SVME is an exception + * so it is force-set later in nested_prepare_vmcb_save. + */ + if (CC(!(save->efer & EFER_SVME))) return false; - if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7)) + if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || + CC(save->cr0 & ~0xffffffffULL)) return false; - vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG); + if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) + return false; - if (vmcb12_lma) { - if (!(vmcb12->save.cr4 & X86_CR4_PAE) || - !(vmcb12->save.cr0 & X86_CR0_PE) || - kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3)) - return false; - } - if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) + if (!nested_vmcb_check_cr3_cr4(vcpu, save)) return false; - return nested_vmcb_check_controls(&vmcb12->control); + if (CC(!kvm_valid_efer(vcpu, save->efer))) + return false; + + return true; } -static void load_nested_vmcb_control(struct vcpu_svm *svm, - struct vmcb_control_area *control) +static void nested_load_control_from_vmcb12(struct vcpu_svm *svm, + struct vmcb_control_area *control) { copy_vmcb_control_area(&svm->nested.ctl, control); @@ -287,9 +312,9 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm, /* * Synchronize fields that are written by the processor, so that - * they can be copied back into the nested_vmcb. + * they can be copied back into the vmcb12. */ -void sync_nested_vmcb_control(struct vcpu_svm *svm) +void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) { u32 mask; svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; @@ -317,8 +342,8 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm) * Transfer any event that L0 or L1 wanted to inject into L2 to * EXIT_INT_INFO. */ -static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, - struct vmcb *vmcb12) +static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, + struct vmcb *vmcb12) { struct kvm_vcpu *vcpu = &svm->vcpu; u32 exit_int_info = 0; @@ -362,12 +387,12 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt) { - if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) return -EINVAL; if (!nested_npt && is_pae_paging(vcpu) && (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) { - if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) + if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) return -EINVAL; } @@ -386,20 +411,56 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, return 0; } -static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) +void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) { + if (!svm->nested.vmcb02.ptr) + return; + + /* FIXME: merge g_pat from vmcb01 and vmcb12. */ + svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; +} + +static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) +{ + bool new_vmcb12 = false; + + nested_vmcb02_compute_g_pat(svm); + /* Load the nested guest state */ - svm->vmcb->save.es = vmcb12->save.es; - svm->vmcb->save.cs = vmcb12->save.cs; - svm->vmcb->save.ss = vmcb12->save.ss; - svm->vmcb->save.ds = vmcb12->save.ds; - svm->vmcb->save.gdtr = vmcb12->save.gdtr; - svm->vmcb->save.idtr = vmcb12->save.idtr; + if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { + new_vmcb12 = true; + svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; + } + + if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { + svm->vmcb->save.es = vmcb12->save.es; + svm->vmcb->save.cs = vmcb12->save.cs; + svm->vmcb->save.ss = vmcb12->save.ss; + svm->vmcb->save.ds = vmcb12->save.ds; + svm->vmcb->save.cpl = vmcb12->save.cpl; + vmcb_mark_dirty(svm->vmcb, VMCB_SEG); + } + + if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { + svm->vmcb->save.gdtr = vmcb12->save.gdtr; + svm->vmcb->save.idtr = vmcb12->save.idtr; + vmcb_mark_dirty(svm->vmcb, VMCB_DT); + } + kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); - svm_set_efer(&svm->vcpu, vmcb12->save.efer); + + /* + * Force-set EFER_SVME even though it is checked earlier on the + * VMCB12, because the guest can flip the bit between the check + * and now. Clearing EFER_SVME would call svm_free_nested. + */ + svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); + svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); - svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; + + svm->vcpu.arch.cr2 = vmcb12->save.cr2; + kvm_rax_write(&svm->vcpu, vmcb12->save.rax); kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); kvm_rip_write(&svm->vcpu, vmcb12->save.rip); @@ -408,15 +469,41 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) svm->vmcb->save.rax = vmcb12->save.rax; svm->vmcb->save.rsp = vmcb12->save.rsp; svm->vmcb->save.rip = vmcb12->save.rip; - svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; - svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; - svm->vmcb->save.cpl = vmcb12->save.cpl; + + /* These bits will be set properly on the first execution when new_vmc12 is true */ + if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { + svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; + svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; + vmcb_mark_dirty(svm->vmcb, VMCB_DR); + } } -static void nested_prepare_vmcb_control(struct vcpu_svm *svm) +static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) { const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; + /* + * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, + * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. + */ + + /* + * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id, + * avic_physical_id. + */ + WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK); + + /* Copied from vmcb01. msrpm_base can be overwritten later. */ + svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl; + svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa; + svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa; + + /* Done at vmrun: asid. */ + + /* Also overwritten later if necessary. */ + svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + + /* nested_cr3. */ if (nested_npt_enabled(svm)) nested_svm_init_mmu_context(&svm->vcpu); @@ -425,7 +512,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) svm->vmcb->control.int_ctl = (svm->nested.ctl.int_ctl & ~mask) | - (svm->nested.hsave->control.int_ctl & mask); + (svm->vmcb01.ptr->control.int_ctl & mask); svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; @@ -440,17 +527,28 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) enter_guest_mode(&svm->vcpu); /* - * Merge guest and host intercepts - must be called with vcpu in - * guest-mode to take affect here + * Merge guest and host intercepts - must be called with vcpu in + * guest-mode to take effect. */ recalc_intercepts(svm); +} - vmcb_mark_all_dirty(svm->vmcb); +static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) +{ + /* + * Some VMCB state is shared between L1 and L2 and thus has to be + * moved at the time of nested vmrun and vmexit. + * + * VMLOAD/VMSAVE state would also belong in this category, but KVM + * always performs VMLOAD and VMSAVE from the VMCB01. + */ + to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; } -int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, +int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vmcb *vmcb12) { + struct vcpu_svm *svm = to_svm(vcpu); int ret; trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, @@ -468,9 +566,14 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, svm->nested.vmcb12_gpa = vmcb12_gpa; - load_nested_vmcb_control(svm, &vmcb12->control); - nested_prepare_vmcb_control(svm); - nested_prepare_vmcb_save(svm, vmcb12); + + WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); + + nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); + + svm_switch_vmcb(svm, &svm->nested.vmcb02); + nested_vmcb02_prepare_control(svm); + nested_vmcb02_prepare_save(svm, vmcb12); ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, nested_npt_enabled(svm)); @@ -478,44 +581,48 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, return ret; if (!npt_enabled) - svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested; + vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; svm_set_gif(svm, true); return 0; } -int nested_svm_vmrun(struct vcpu_svm *svm) +int nested_svm_vmrun(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); int ret; struct vmcb *vmcb12; - struct vmcb *hsave = svm->nested.hsave; - struct vmcb *vmcb = svm->vmcb; struct kvm_host_map map; u64 vmcb12_gpa; - if (is_smm(&svm->vcpu)) { - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + ++vcpu->stat.nested_run; + + if (is_smm(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmcb12_gpa = svm->vmcb->save.rax; - ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); + ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); if (ret == -EINVAL) { - kvm_inject_gp(&svm->vcpu, 0); + kvm_inject_gp(vcpu, 0); return 1; } else if (ret) { - return kvm_skip_emulated_instruction(&svm->vcpu); + return kvm_skip_emulated_instruction(vcpu); } - ret = kvm_skip_emulated_instruction(&svm->vcpu); + ret = kvm_skip_emulated_instruction(vcpu); vmcb12 = map.hva; if (WARN_ON_ONCE(!svm->nested.initialized)) return -EINVAL; - if (!nested_vmcb_checks(svm, vmcb12)) { + nested_load_control_from_vmcb12(svm, &vmcb12->control); + + if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || + !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_info_1 = 0; @@ -525,36 +632,25 @@ int nested_svm_vmrun(struct vcpu_svm *svm) /* Clear internal status */ - kvm_clear_exception_queue(&svm->vcpu); - kvm_clear_interrupt_queue(&svm->vcpu); + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); /* - * Save the old vmcb, so we don't need to pick what we save, but can - * restore everything when a VMEXIT occurs + * Since vmcb01 is not in use, we can use it to store some of the L1 + * state. */ - hsave->save.es = vmcb->save.es; - hsave->save.cs = vmcb->save.cs; - hsave->save.ss = vmcb->save.ss; - hsave->save.ds = vmcb->save.ds; - hsave->save.gdtr = vmcb->save.gdtr; - hsave->save.idtr = vmcb->save.idtr; - hsave->save.efer = svm->vcpu.arch.efer; - hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); - hsave->save.cr4 = svm->vcpu.arch.cr4; - hsave->save.rflags = kvm_get_rflags(&svm->vcpu); - hsave->save.rip = kvm_rip_read(&svm->vcpu); - hsave->save.rsp = vmcb->save.rsp; - hsave->save.rax = vmcb->save.rax; - if (npt_enabled) - hsave->save.cr3 = vmcb->save.cr3; - else - hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); - - copy_vmcb_control_area(&hsave->control, &vmcb->control); + svm->vmcb01.ptr->save.efer = vcpu->arch.efer; + svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu); + svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4; + svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu); + svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu); + + if (!npt_enabled) + svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu); svm->nested.nested_run_pending = 1; - if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12)) + if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) goto out_exit_err; if (nested_svm_vmrun_msrpm(svm)) @@ -571,7 +667,7 @@ out_exit_err: nested_svm_vmexit(svm); out: - kvm_vcpu_unmap(&svm->vcpu, &map, true); + kvm_vcpu_unmap(vcpu, &map, true); return ret; } @@ -594,27 +690,30 @@ void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) int nested_svm_vmexit(struct vcpu_svm *svm) { - int rc; + struct kvm_vcpu *vcpu = &svm->vcpu; struct vmcb *vmcb12; - struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct kvm_host_map map; + int rc; + + /* Triple faults in L2 should never escape. */ + WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); - rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); + rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); if (rc) { if (rc == -EINVAL) - kvm_inject_gp(&svm->vcpu, 0); + kvm_inject_gp(vcpu, 0); return 1; } vmcb12 = map.hva; /* Exit Guest-Mode */ - leave_guest_mode(&svm->vcpu); + leave_guest_mode(vcpu); svm->nested.vmcb12_gpa = 0; WARN_ON_ONCE(svm->nested.nested_run_pending); - kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); + kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); /* in case we halted in L2 */ svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; @@ -628,14 +727,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->save.gdtr = vmcb->save.gdtr; vmcb12->save.idtr = vmcb->save.idtr; vmcb12->save.efer = svm->vcpu.arch.efer; - vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu); - vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu); + vmcb12->save.cr0 = kvm_read_cr0(vcpu); + vmcb12->save.cr3 = kvm_read_cr3(vcpu); vmcb12->save.cr2 = vmcb->save.cr2; vmcb12->save.cr4 = svm->vcpu.arch.cr4; - vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu); - vmcb12->save.rip = kvm_rip_read(&svm->vcpu); - vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu); - vmcb12->save.rax = kvm_rax_read(&svm->vcpu); + vmcb12->save.rflags = kvm_get_rflags(vcpu); + vmcb12->save.rip = kvm_rip_read(vcpu); + vmcb12->save.rsp = kvm_rsp_read(vcpu); + vmcb12->save.rax = kvm_rax_read(vcpu); vmcb12->save.dr7 = vmcb->save.dr7; vmcb12->save.dr6 = svm->vcpu.arch.dr6; vmcb12->save.cpl = vmcb->save.cpl; @@ -647,7 +746,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.exit_info_2 = vmcb->control.exit_info_2; if (vmcb12->control.exit_code != SVM_EXIT_ERR) - nested_vmcb_save_pending_event(svm, vmcb12); + nested_save_pending_event_to_vmcb12(svm, vmcb12); if (svm->nrips_enabled) vmcb12->control.next_rip = vmcb->control.next_rip; @@ -662,37 +761,39 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.pause_filter_thresh = svm->vmcb->control.pause_filter_thresh; - /* Restore the original control entries */ - copy_vmcb_control_area(&vmcb->control, &hsave->control); + nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); - /* On vmexit the GIF is set to false */ + svm_switch_vmcb(svm, &svm->vmcb01); + WARN_ON_ONCE(svm->vmcb->control.exit_code != SVM_EXIT_VMRUN); + + /* + * On vmexit the GIF is set to false and + * no event can be injected in L1. + */ svm_set_gif(svm, false); + svm->vmcb->control.exit_int_info = 0; - svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = - svm->vcpu.arch.l1_tsc_offset; + svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; + if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) { + svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; + vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + } svm->nested.ctl.nested_cr3 = 0; - /* Restore selected save entries */ - svm->vmcb->save.es = hsave->save.es; - svm->vmcb->save.cs = hsave->save.cs; - svm->vmcb->save.ss = hsave->save.ss; - svm->vmcb->save.ds = hsave->save.ds; - svm->vmcb->save.gdtr = hsave->save.gdtr; - svm->vmcb->save.idtr = hsave->save.idtr; - kvm_set_rflags(&svm->vcpu, hsave->save.rflags); - kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED); - svm_set_efer(&svm->vcpu, hsave->save.efer); - svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); - svm_set_cr4(&svm->vcpu, hsave->save.cr4); - kvm_rax_write(&svm->vcpu, hsave->save.rax); - kvm_rsp_write(&svm->vcpu, hsave->save.rsp); - kvm_rip_write(&svm->vcpu, hsave->save.rip); - svm->vmcb->save.dr7 = DR7_FIXED_1; - svm->vmcb->save.cpl = 0; - svm->vmcb->control.exit_int_info = 0; + /* + * Restore processor state that had been saved in vmcb01 + */ + kvm_set_rflags(vcpu, svm->vmcb->save.rflags); + svm_set_efer(vcpu, svm->vmcb->save.efer); + svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE); + svm_set_cr4(vcpu, svm->vmcb->save.cr4); + kvm_rax_write(vcpu, svm->vmcb->save.rax); + kvm_rsp_write(vcpu, svm->vmcb->save.rsp); + kvm_rip_write(vcpu, svm->vmcb->save.rip); - vmcb_mark_all_dirty(svm->vmcb); + svm->vcpu.arch.dr7 = DR7_FIXED_1; + kvm_update_dr7(&svm->vcpu); trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, vmcb12->control.exit_info_1, @@ -701,50 +802,62 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.exit_int_info_err, KVM_ISA_SVM); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + kvm_vcpu_unmap(vcpu, &map, true); - nested_svm_uninit_mmu_context(&svm->vcpu); + nested_svm_uninit_mmu_context(vcpu); - rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false); + rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false); if (rc) return 1; - if (npt_enabled) - svm->vmcb->save.cr3 = hsave->save.cr3; - /* * Drop what we picked up for L2 via svm_complete_interrupts() so it * doesn't end up in L1. */ svm->vcpu.arch.nmi_injected = false; - kvm_clear_exception_queue(&svm->vcpu); - kvm_clear_interrupt_queue(&svm->vcpu); + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); + + /* + * If we are here following the completion of a VMRUN that + * is being single-stepped, queue the pending #DB intercept + * right now so that it an be accounted for before we execute + * L1's next instruction. + */ + if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF)) + kvm_queue_exception(&(svm->vcpu), DB_VECTOR); return 0; } +static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) +{ + nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN); +} + int svm_allocate_nested(struct vcpu_svm *svm) { - struct page *hsave_page; + struct page *vmcb02_page; if (svm->nested.initialized) return 0; - hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!hsave_page) + vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!vmcb02_page) return -ENOMEM; - svm->nested.hsave = page_address(hsave_page); + svm->nested.vmcb02.ptr = page_address(vmcb02_page); + svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); svm->nested.msrpm = svm_vcpu_alloc_msrpm(); if (!svm->nested.msrpm) - goto err_free_hsave; + goto err_free_vmcb02; svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); svm->nested.initialized = true; return 0; -err_free_hsave: - __free_page(hsave_page); +err_free_vmcb02: + __free_page(vmcb02_page); return -ENOMEM; } @@ -756,8 +869,8 @@ void svm_free_nested(struct vcpu_svm *svm) svm_vcpu_free_msrpm(svm->nested.msrpm); svm->nested.msrpm = NULL; - __free_page(virt_to_page(svm->nested.hsave)); - svm->nested.hsave = NULL; + __free_page(virt_to_page(svm->nested.vmcb02.ptr)); + svm->nested.vmcb02.ptr = NULL; svm->nested.initialized = false; } @@ -767,18 +880,19 @@ void svm_free_nested(struct vcpu_svm *svm) */ void svm_leave_nested(struct vcpu_svm *svm) { - if (is_guest_mode(&svm->vcpu)) { - struct vmcb *hsave = svm->nested.hsave; - struct vmcb *vmcb = svm->vmcb; + struct kvm_vcpu *vcpu = &svm->vcpu; + if (is_guest_mode(vcpu)) { svm->nested.nested_run_pending = 0; - leave_guest_mode(&svm->vcpu); - copy_vmcb_control_area(&vmcb->control, &hsave->control); - nested_svm_uninit_mmu_context(&svm->vcpu); + leave_guest_mode(vcpu); + + svm_switch_vmcb(svm, &svm->nested.vmcb02); + + nested_svm_uninit_mmu_context(vcpu); vmcb_mark_all_dirty(svm->vmcb); } - kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); + kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); } static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) @@ -887,16 +1001,15 @@ int nested_svm_exit_handled(struct vcpu_svm *svm) return vmexit; } -int nested_svm_check_permissions(struct vcpu_svm *svm) +int nested_svm_check_permissions(struct kvm_vcpu *vcpu) { - if (!(svm->vcpu.arch.efer & EFER_SVME) || - !is_paging(&svm->vcpu)) { - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - if (svm->vmcb->save.cpl) { - kvm_inject_gp(&svm->vcpu, 0); + if (to_svm(vcpu)->vmcb->save.cpl) { + kvm_inject_gp(vcpu, 0); return 1; } @@ -944,50 +1057,11 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) nested_svm_vmexit(svm); } -static void nested_svm_smi(struct vcpu_svm *svm) -{ - svm->vmcb->control.exit_code = SVM_EXIT_SMI; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - - nested_svm_vmexit(svm); -} - -static void nested_svm_nmi(struct vcpu_svm *svm) -{ - svm->vmcb->control.exit_code = SVM_EXIT_NMI; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - - nested_svm_vmexit(svm); -} - -static void nested_svm_intr(struct vcpu_svm *svm) -{ - trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); - - svm->vmcb->control.exit_code = SVM_EXIT_INTR; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - - nested_svm_vmexit(svm); -} - static inline bool nested_exit_on_init(struct vcpu_svm *svm) { return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); } -static void nested_svm_init(struct vcpu_svm *svm) -{ - svm->vmcb->control.exit_code = SVM_EXIT_INIT; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - - nested_svm_vmexit(svm); -} - - static int svm_check_nested_events(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1001,12 +1075,18 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) return -EBUSY; if (!nested_exit_on_init(svm)) return 0; - nested_svm_init(svm); + nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); return 0; } if (vcpu->arch.exception.pending) { - if (block_nested_events) + /* + * Only a pending nested run can block a pending exception. + * Otherwise an injected NMI/interrupt should either be + * lost or delivered to the nested hypervisor in the EXITINTINFO + * vmcb field, while delivering the pending exception. + */ + if (svm->nested.nested_run_pending) return -EBUSY; if (!nested_exit_on_exception(svm)) return 0; @@ -1019,7 +1099,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) return -EBUSY; if (!nested_exit_on_smi(svm)) return 0; - nested_svm_smi(svm); + nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); return 0; } @@ -1028,7 +1108,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) return -EBUSY; if (!nested_exit_on_nmi(svm)) return 0; - nested_svm_nmi(svm); + nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); return 0; } @@ -1037,7 +1117,8 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) return -EBUSY; if (!nested_exit_on_intr(svm)) return 0; - nested_svm_intr(svm); + trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); + nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); return 0; } @@ -1056,8 +1137,8 @@ int nested_svm_exit_special(struct vcpu_svm *svm) case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); - if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] & - excp_bits) + if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & + excp_bits) return NESTED_EXIT_HOST; else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && svm->vcpu.arch.apf.host_apf_flags) @@ -1121,10 +1202,9 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu, if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, sizeof(user_vmcb->control))) return -EFAULT; - if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, + if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, sizeof(user_vmcb->save))) return -EFAULT; - out: return kvm_state.size; } @@ -1134,7 +1214,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *kvm_state) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb *hsave = svm->nested.hsave; struct vmcb __user *user_vmcb = (struct vmcb __user *) &user_kvm_nested_state->data.svm[0]; struct vmcb_control_area *ctl; @@ -1179,8 +1258,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; ret = -ENOMEM; - ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); - save = kzalloc(sizeof(*save), GFP_KERNEL); + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); + save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); if (!ctl || !save) goto out_free; @@ -1191,12 +1270,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, goto out_free; ret = -EINVAL; - if (!nested_vmcb_check_controls(ctl)) + if (!nested_vmcb_check_controls(vcpu, ctl)) goto out_free; /* * Processor state contains L2 state. Check that it is - * valid for guest mode (see nested_vmcb_checks). + * valid for guest mode (see nested_vmcb_check_save). */ cr0 = kvm_read_cr0(vcpu); if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) @@ -1205,27 +1284,48 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, /* * Validate host state saved from before VMRUN (see * nested_svm_check_permissions). - * TODO: validate reserved bits for all saved state. */ - if (!(save->cr0 & X86_CR0_PG)) + if (!(save->cr0 & X86_CR0_PG) || + !(save->cr0 & X86_CR0_PE) || + (save->rflags & X86_EFLAGS_VM) || + !nested_vmcb_valid_sregs(vcpu, save)) goto out_free; /* - * All checks done, we can enter guest mode. L1 control fields - * come from the nested save state. Guest state is already - * in the registers, the save area of the nested state instead - * contains saved L1 state. + * All checks done, we can enter guest mode. Userspace provides + * vmcb12.control, which will be combined with L1 and stored into + * vmcb02, and the L1 save state which we store in vmcb01. + * L2 registers if needed are moved from the current VMCB to VMCB02. */ svm->nested.nested_run_pending = !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); - copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); - hsave->save = *save; - svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; - load_nested_vmcb_control(svm, ctl); - nested_prepare_vmcb_control(svm); + if (svm->current_vmcb == &svm->vmcb01) + svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; + + svm->vmcb01.ptr->save.es = save->es; + svm->vmcb01.ptr->save.cs = save->cs; + svm->vmcb01.ptr->save.ss = save->ss; + svm->vmcb01.ptr->save.ds = save->ds; + svm->vmcb01.ptr->save.gdtr = save->gdtr; + svm->vmcb01.ptr->save.idtr = save->idtr; + svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED; + svm->vmcb01.ptr->save.efer = save->efer; + svm->vmcb01.ptr->save.cr0 = save->cr0; + svm->vmcb01.ptr->save.cr3 = save->cr3; + svm->vmcb01.ptr->save.cr4 = save->cr4; + svm->vmcb01.ptr->save.rax = save->rax; + svm->vmcb01.ptr->save.rsp = save->rsp; + svm->vmcb01.ptr->save.rip = save->rip; + svm->vmcb01.ptr->save.cpl = 0; + + nested_load_control_from_vmcb12(svm, ctl); + + svm_switch_vmcb(svm, &svm->nested.vmcb02); + + nested_vmcb02_prepare_control(svm); kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); ret = 0; @@ -1236,8 +1336,31 @@ out_free: return ret; } +static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (WARN_ON(!is_guest_mode(vcpu))) + return true; + + if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, + nested_npt_enabled(svm))) + return false; + + if (!nested_svm_vmrun_msrpm(svm)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = + KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return false; + } + + return true; +} + struct kvm_x86_nested_ops svm_nested_ops = { .check_events = svm_check_nested_events, + .triple_fault = nested_svm_triple_fault, .get_nested_state_pages = svm_get_nested_state_pages, .get_state = svm_get_nested_state, .set_state = svm_set_nested_state, diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 035da07500e8..fdf587f19c5f 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr) static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, enum pmu_type type) { + struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); + switch (msr) { case MSR_F15H_PERF_CTL0: case MSR_F15H_PERF_CTL1: @@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, case MSR_F15H_PERF_CTL3: case MSR_F15H_PERF_CTL4: case MSR_F15H_PERF_CTL5: + if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) + return NULL; + fallthrough; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: if (type != PMU_TYPE_EVNTSEL) return NULL; @@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, case MSR_F15H_PERF_CTR3: case MSR_F15H_PERF_CTR4: case MSR_F15H_PERF_CTR5: + if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) + return NULL; + fallthrough; case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: if (type != PMU_TYPE_COUNTER) return NULL; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 874ea309279f..1356ee095cd5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -14,6 +14,7 @@ #include <linux/psp-sev.h> #include <linux/pagemap.h> #include <linux/swap.h> +#include <linux/misc_cgroup.h> #include <linux/processor.h> #include <linux/trace_events.h> #include <asm/fpu/internal.h> @@ -28,12 +29,40 @@ #define __ex(x) __kvm_handle_fault_on_reboot(x) +#ifndef CONFIG_KVM_AMD_SEV +/* + * When this config is not defined, SEV feature is not supported and APIs in + * this file are not used but this file still gets compiled into the KVM AMD + * module. + * + * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum + * misc_res_type {} defined in linux/misc_cgroup.h. + * + * Below macros allow compilation to succeed. + */ +#define MISC_CG_RES_SEV MISC_CG_RES_TYPES +#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES +#endif + +#ifdef CONFIG_KVM_AMD_SEV +/* enable/disable SEV support */ +static bool sev_enabled = true; +module_param_named(sev, sev_enabled, bool, 0444); + +/* enable/disable SEV-ES support */ +static bool sev_es_enabled = true; +module_param_named(sev_es, sev_es_enabled, bool, 0444); +#else +#define sev_enabled false +#define sev_es_enabled false +#endif /* CONFIG_KVM_AMD_SEV */ + static u8 sev_enc_bit; -static int sev_flush_asids(void); static DECLARE_RWSEM(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); unsigned int max_sev_asid; static unsigned int min_sev_asid; +static unsigned long sev_me_mask; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; @@ -45,9 +74,15 @@ struct enc_region { unsigned long size; }; -static int sev_flush_asids(void) +/* Called with the sev_bitmap_lock held, or on shutdown */ +static int sev_flush_asids(int min_asid, int max_asid) { - int ret, error = 0; + int ret, pos, error = 0; + + /* Check if there are any ASIDs to reclaim before performing a flush */ + pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); + if (pos >= max_asid) + return -EBUSY; /* * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, @@ -66,17 +101,15 @@ static int sev_flush_asids(void) return ret; } +static inline bool is_mirroring_enc_context(struct kvm *kvm) +{ + return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; +} + /* Must be called with the sev_bitmap_lock held */ static bool __sev_recycle_asids(int min_asid, int max_asid) { - int pos; - - /* Check if there are any ASIDs to reclaim before performing a flush */ - pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid); - if (pos >= max_asid) - return false; - - if (sev_flush_asids()) + if (sev_flush_asids(min_asid, max_asid)) return false; /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ @@ -89,8 +122,19 @@ static bool __sev_recycle_asids(int min_asid, int max_asid) static int sev_asid_new(struct kvm_sev_info *sev) { - int pos, min_asid, max_asid; + int pos, min_asid, max_asid, ret; bool retry = true; + enum misc_res_type type; + + type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; + WARN_ON(sev->misc_cg); + sev->misc_cg = get_current_misc_cg(); + ret = misc_cg_try_charge(type, sev->misc_cg, 1); + if (ret) { + put_misc_cg(sev->misc_cg); + sev->misc_cg = NULL; + return ret; + } mutex_lock(&sev_bitmap_lock); @@ -108,7 +152,8 @@ again: goto again; } mutex_unlock(&sev_bitmap_lock); - return -EBUSY; + ret = -EBUSY; + goto e_uncharge; } __set_bit(pos, sev_asid_bitmap); @@ -116,6 +161,11 @@ again: mutex_unlock(&sev_bitmap_lock); return pos + 1; +e_uncharge: + misc_cg_uncharge(type, sev->misc_cg, 1); + put_misc_cg(sev->misc_cg); + sev->misc_cg = NULL; + return ret; } static int sev_get_asid(struct kvm *kvm) @@ -125,14 +175,15 @@ static int sev_get_asid(struct kvm *kvm) return sev->asid; } -static void sev_asid_free(int asid) +static void sev_asid_free(struct kvm_sev_info *sev) { struct svm_cpu_data *sd; int cpu, pos; + enum misc_res_type type; mutex_lock(&sev_bitmap_lock); - pos = asid - 1; + pos = sev->asid - 1; __set_bit(pos, sev_reclaim_asid_bitmap); for_each_possible_cpu(cpu) { @@ -141,53 +192,51 @@ static void sev_asid_free(int asid) } mutex_unlock(&sev_bitmap_lock); + + type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; + misc_cg_uncharge(type, sev->misc_cg, 1); + put_misc_cg(sev->misc_cg); + sev->misc_cg = NULL; } static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) { - struct sev_data_decommission *decommission; - struct sev_data_deactivate *data; + struct sev_data_decommission decommission; + struct sev_data_deactivate deactivate; if (!handle) return; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return; - - /* deactivate handle */ - data->handle = handle; + deactivate.handle = handle; /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */ down_read(&sev_deactivate_lock); - sev_guest_deactivate(data, NULL); + sev_guest_deactivate(&deactivate, NULL); up_read(&sev_deactivate_lock); - kfree(data); - - decommission = kzalloc(sizeof(*decommission), GFP_KERNEL); - if (!decommission) - return; - /* decommission handle */ - decommission->handle = handle; - sev_guest_decommission(decommission, NULL); - - kfree(decommission); + decommission.handle = handle; + sev_guest_decommission(&decommission, NULL); } static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + bool es_active = argp->id == KVM_SEV_ES_INIT; int asid, ret; + if (kvm->created_vcpus) + return -EINVAL; + ret = -EBUSY; if (unlikely(sev->active)) return ret; + sev->es_active = es_active; asid = sev_asid_new(sev); if (asid < 0) - return ret; + goto e_no_asid; + sev->asid = asid; ret = sev_platform_init(&argp->error); if (ret) @@ -200,35 +249,23 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) return 0; e_free: - sev_asid_free(asid); + sev_asid_free(sev); + sev->asid = 0; +e_no_asid: + sev->es_active = false; return ret; } -static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) -{ - if (!sev_es) - return -ENOTTY; - - to_kvm_svm(kvm)->sev_info.es_active = true; - - return sev_guest_init(kvm, argp); -} - static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) { - struct sev_data_activate *data; + struct sev_data_activate activate; int asid = sev_get_asid(kvm); int ret; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; - /* activate ASID on the given handle */ - data->handle = handle; - data->asid = asid; - ret = sev_guest_activate(data, error); - kfree(data); + activate.handle = handle; + activate.asid = asid; + ret = sev_guest_activate(&activate, error); return ret; } @@ -258,7 +295,7 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_launch_start *start; + struct sev_data_launch_start start; struct kvm_sev_launch_start params; void *dh_blob, *session_blob; int *error = &argp->error; @@ -270,20 +307,16 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) return -EFAULT; - start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT); - if (!start) - return -ENOMEM; + memset(&start, 0, sizeof(start)); dh_blob = NULL; if (params.dh_uaddr) { dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len); - if (IS_ERR(dh_blob)) { - ret = PTR_ERR(dh_blob); - goto e_free; - } + if (IS_ERR(dh_blob)) + return PTR_ERR(dh_blob); - start->dh_cert_address = __sme_set(__pa(dh_blob)); - start->dh_cert_len = params.dh_len; + start.dh_cert_address = __sme_set(__pa(dh_blob)); + start.dh_cert_len = params.dh_len; } session_blob = NULL; @@ -294,40 +327,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) goto e_free_dh; } - start->session_address = __sme_set(__pa(session_blob)); - start->session_len = params.session_len; + start.session_address = __sme_set(__pa(session_blob)); + start.session_len = params.session_len; } - start->handle = params.handle; - start->policy = params.policy; + start.handle = params.handle; + start.policy = params.policy; /* create memory encryption context */ - ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error); + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error); if (ret) goto e_free_session; /* Bind ASID to this guest */ - ret = sev_bind_asid(kvm, start->handle, error); + ret = sev_bind_asid(kvm, start.handle, error); if (ret) goto e_free_session; /* return handle to userspace */ - params.handle = start->handle; + params.handle = start.handle; if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) { - sev_unbind_asid(kvm, start->handle); + sev_unbind_asid(kvm, start.handle); ret = -EFAULT; goto e_free_session; } - sev->handle = start->handle; + sev->handle = start.handle; sev->fd = argp->sev_fd; e_free_session: kfree(session_blob); e_free_dh: kfree(dh_blob); -e_free: - kfree(start); return ret; } @@ -446,7 +477,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_launch_update_data params; - struct sev_data_launch_update_data *data; + struct sev_data_launch_update_data data; struct page **inpages; int ret; @@ -456,20 +487,14 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; - vaddr = params.uaddr; size = params.len; vaddr_end = vaddr + size; /* Lock the user memory. */ inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); - if (IS_ERR(inpages)) { - ret = PTR_ERR(inpages); - goto e_free; - } + if (IS_ERR(inpages)) + return PTR_ERR(inpages); /* * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in @@ -477,6 +502,9 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) */ sev_clflush_pages(inpages, npages); + data.reserved = 0; + data.handle = sev->handle; + for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { int offset, len; @@ -491,10 +519,9 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); - data->handle = sev->handle; - data->len = len; - data->address = __sme_page_pa(inpages[i]) + offset; - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error); + data.len = len; + data.address = __sme_page_pa(inpages[i]) + offset; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error); if (ret) goto e_unpin; @@ -510,8 +537,6 @@ e_unpin: } /* unlock the user pages */ sev_unpin_memory(kvm, inpages, npages); -e_free: - kfree(data); return ret; } @@ -563,23 +588,22 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm) static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_launch_update_vmsa *vmsa; + struct sev_data_launch_update_vmsa vmsa; + struct kvm_vcpu *vcpu; int i, ret; if (!sev_es_guest(kvm)) return -ENOTTY; - vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); - if (!vmsa) - return -ENOMEM; + vmsa.reserved = 0; - for (i = 0; i < kvm->created_vcpus; i++) { - struct vcpu_svm *svm = to_svm(kvm->vcpus[i]); + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); /* Perform some pre-encryption checks against the VMSA */ ret = sev_es_sync_vmsa(svm); if (ret) - goto e_free; + return ret; /* * The LAUNCH_UPDATE_VMSA command will perform in-place @@ -589,27 +613,25 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) */ clflush_cache_range(svm->vmsa, PAGE_SIZE); - vmsa->handle = sev->handle; - vmsa->address = __sme_pa(svm->vmsa); - vmsa->len = PAGE_SIZE; - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, vmsa, + vmsa.handle = sev->handle; + vmsa.address = __sme_pa(svm->vmsa); + vmsa.len = PAGE_SIZE; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, &argp->error); if (ret) - goto e_free; + return ret; svm->vcpu.arch.guest_state_protected = true; } -e_free: - kfree(vmsa); - return ret; + return 0; } static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) { void __user *measure = (void __user *)(uintptr_t)argp->data; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_launch_measure *data; + struct sev_data_launch_measure data; struct kvm_sev_launch_measure params; void __user *p = NULL; void *blob = NULL; @@ -621,9 +643,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) if (copy_from_user(¶ms, measure, sizeof(params))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; + memset(&data, 0, sizeof(data)); /* User wants to query the blob length */ if (!params.len) @@ -631,23 +651,20 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) p = (void __user *)(uintptr_t)params.uaddr; if (p) { - if (params.len > SEV_FW_BLOB_MAX_SIZE) { - ret = -EINVAL; - goto e_free; - } + if (params.len > SEV_FW_BLOB_MAX_SIZE) + return -EINVAL; - ret = -ENOMEM; - blob = kmalloc(params.len, GFP_KERNEL); + blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); if (!blob) - goto e_free; + return -ENOMEM; - data->address = __psp_pa(blob); - data->len = params.len; + data.address = __psp_pa(blob); + data.len = params.len; } cmd: - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error); + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error); /* * If we query the session length, FW responded with expected data. @@ -664,63 +681,50 @@ cmd: } done: - params.len = data->len; + params.len = data.len; if (copy_to_user(measure, ¶ms, sizeof(params))) ret = -EFAULT; e_free_blob: kfree(blob); -e_free: - kfree(data); return ret; } static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_launch_finish *data; - int ret; + struct sev_data_launch_finish data; if (!sev_guest(kvm)) return -ENOTTY; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; - - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error); - - kfree(data); - return ret; + data.handle = sev->handle; + return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error); } static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_guest_status params; - struct sev_data_guest_status *data; + struct sev_data_guest_status data; int ret; if (!sev_guest(kvm)) return -ENOTTY; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; + memset(&data, 0, sizeof(data)); - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error); + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error); if (ret) - goto e_free; + return ret; - params.policy = data->policy; - params.state = data->state; - params.handle = data->handle; + params.policy = data.policy; + params.state = data.state; + params.handle = data.handle; if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) ret = -EFAULT; -e_free: - kfree(data); + return ret; } @@ -729,23 +733,17 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, int *error, bool enc) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_dbg *data; - int ret; + struct sev_data_dbg data; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; + data.reserved = 0; + data.handle = sev->handle; + data.dst_addr = dst; + data.src_addr = src; + data.len = size; - data->handle = sev->handle; - data->dst_addr = dst; - data->src_addr = src; - data->len = size; - - ret = sev_issue_cmd(kvm, - enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, - data, error); - kfree(data); - return ret; + return sev_issue_cmd(kvm, + enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, + &data, error); } static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, @@ -965,7 +963,7 @@ err: static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_launch_secret *data; + struct sev_data_launch_secret data; struct kvm_sev_launch_secret params; struct page **pages; void *blob, *hdr; @@ -997,41 +995,36 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) goto e_unpin_memory; } - ret = -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - goto e_unpin_memory; + memset(&data, 0, sizeof(data)); offset = params.guest_uaddr & (PAGE_SIZE - 1); - data->guest_address = __sme_page_pa(pages[0]) + offset; - data->guest_len = params.guest_len; + data.guest_address = __sme_page_pa(pages[0]) + offset; + data.guest_len = params.guest_len; blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); if (IS_ERR(blob)) { ret = PTR_ERR(blob); - goto e_free; + goto e_unpin_memory; } - data->trans_address = __psp_pa(blob); - data->trans_len = params.trans_len; + data.trans_address = __psp_pa(blob); + data.trans_len = params.trans_len; hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); if (IS_ERR(hdr)) { ret = PTR_ERR(hdr); goto e_free_blob; } - data->hdr_address = __psp_pa(hdr); - data->hdr_len = params.hdr_len; + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error); kfree(hdr); e_free_blob: kfree(blob); -e_free: - kfree(data); e_unpin_memory: /* content of memory is updated, mark pages dirty */ for (i = 0; i < n; i++) { @@ -1046,7 +1039,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) { void __user *report = (void __user *)(uintptr_t)argp->data; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_attestation_report *data; + struct sev_data_attestation_report data; struct kvm_sev_attestation_report params; void __user *p; void *blob = NULL; @@ -1058,9 +1051,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); - if (!data) - return -ENOMEM; + memset(&data, 0, sizeof(data)); /* User wants to query the blob length */ if (!params.len) @@ -1068,23 +1059,20 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) p = (void __user *)(uintptr_t)params.uaddr; if (p) { - if (params.len > SEV_FW_BLOB_MAX_SIZE) { - ret = -EINVAL; - goto e_free; - } + if (params.len > SEV_FW_BLOB_MAX_SIZE) + return -EINVAL; - ret = -ENOMEM; - blob = kmalloc(params.len, GFP_KERNEL); + blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); if (!blob) - goto e_free; + return -ENOMEM; - data->address = __psp_pa(blob); - data->len = params.len; - memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce)); + data.address = __psp_pa(blob); + data.len = params.len; + memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce)); } cmd: - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error); + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error); /* * If we query the session length, FW responded with expected data. */ @@ -1100,22 +1088,417 @@ cmd: } done: - params.len = data->len; + params.len = data.len; if (copy_to_user(report, ¶ms, sizeof(params))) ret = -EFAULT; e_free_blob: kfree(blob); -e_free: - kfree(data); return ret; } +/* Userspace wants to query session length. */ +static int +__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_start *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start data; + int ret; + + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); + if (ret < 0) + return ret; + + params->session_len = data.session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + + return ret; +} + +static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start data; + struct kvm_sev_send_start params; + void *amd_certs, *session_data; + void *pdh_cert, *plat_certs; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_start))) + return -EFAULT; + + /* if session_len is zero, userspace wants to query the session length */ + if (!params.session_len) + return __sev_send_start_query_session_length(kvm, argp, + ¶ms); + + /* some sanity checks */ + if (!params.pdh_cert_uaddr || !params.pdh_cert_len || + !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE) + return -EINVAL; + + /* allocate the memory to hold the session data blob */ + session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); + if (!session_data) + return -ENOMEM; + + /* copy the certificate blobs from userspace */ + pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr, + params.pdh_cert_len); + if (IS_ERR(pdh_cert)) { + ret = PTR_ERR(pdh_cert); + goto e_free_session; + } + + plat_certs = psp_copy_user_blob(params.plat_certs_uaddr, + params.plat_certs_len); + if (IS_ERR(plat_certs)) { + ret = PTR_ERR(plat_certs); + goto e_free_pdh; + } + + amd_certs = psp_copy_user_blob(params.amd_certs_uaddr, + params.amd_certs_len); + if (IS_ERR(amd_certs)) { + ret = PTR_ERR(amd_certs); + goto e_free_plat_cert; + } + + /* populate the FW SEND_START field with system physical address */ + memset(&data, 0, sizeof(data)); + data.pdh_cert_address = __psp_pa(pdh_cert); + data.pdh_cert_len = params.pdh_cert_len; + data.plat_certs_address = __psp_pa(plat_certs); + data.plat_certs_len = params.plat_certs_len; + data.amd_certs_address = __psp_pa(amd_certs); + data.amd_certs_len = params.amd_certs_len; + data.session_address = __psp_pa(session_data); + data.session_len = params.session_len; + data.handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); + + if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr, + session_data, params.session_len)) { + ret = -EFAULT; + goto e_free_amd_cert; + } + + params.policy = data.policy; + params.session_len = data.session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + +e_free_amd_cert: + kfree(amd_certs); +e_free_plat_cert: + kfree(plat_certs); +e_free_pdh: + kfree(pdh_cert); +e_free_session: + kfree(session_data); + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data data; + int ret; + + data.handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); + if (ret < 0) + return ret; + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, + sizeof(struct kvm_sev_send_update_data))) + ret = -EFAULT; + + return ret; +} + +static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data data; + struct kvm_sev_send_update_data params; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if ((params.guest_len + offset > PAGE_SIZE)) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (!guest_page) + return -EFAULT; + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + goto e_unpin; + + trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_address = __psp_pa(trans_data); + data.trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; + data.guest_address |= sev_me_mask; + data.guest_len = params.guest_len; + data.handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + + return ret; +} + +static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_finish data; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data.handle = sev->handle; + return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error); +} + +static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_cancel data; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data.handle = sev->handle; + return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error); +} + +static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_receive_start start; + struct kvm_sev_receive_start params; + int *error = &argp->error; + void *session_data; + void *pdh_data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + /* Get parameter from the userspace */ + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_start))) + return -EFAULT; + + /* some sanity checks */ + if (!params.pdh_uaddr || !params.pdh_len || + !params.session_uaddr || !params.session_len) + return -EINVAL; + + pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len); + if (IS_ERR(pdh_data)) + return PTR_ERR(pdh_data); + + session_data = psp_copy_user_blob(params.session_uaddr, + params.session_len); + if (IS_ERR(session_data)) { + ret = PTR_ERR(session_data); + goto e_free_pdh; + } + + memset(&start, 0, sizeof(start)); + start.handle = params.handle; + start.policy = params.policy; + start.pdh_cert_address = __psp_pa(pdh_data); + start.pdh_cert_len = params.pdh_len; + start.session_address = __psp_pa(session_data); + start.session_len = params.session_len; + + /* create memory encryption context */ + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start, + error); + if (ret) + goto e_free_session; + + /* Bind ASID to this guest */ + ret = sev_bind_asid(kvm, start.handle, error); + if (ret) + goto e_free_session; + + params.handle = start.handle; + if (copy_to_user((void __user *)(uintptr_t)argp->data, + ¶ms, sizeof(struct kvm_sev_receive_start))) { + ret = -EFAULT; + sev_unbind_asid(kvm, start.handle); + goto e_free_session; + } + + sev->handle = start.handle; + sev->fd = argp->sev_fd; + +e_free_session: + kfree(session_data); +e_free_pdh: + kfree(pdh_data); + + return ret; +} + +static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data data; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if ((params.guest_len + offset > PAGE_SIZE)) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_address = __psp_pa(trans); + data.trans_len = params.trans_len; + + /* Pin guest memory */ + ret = -EFAULT; + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (!guest_page) + goto e_free_trans; + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; + data.guest_address |= sev_me_mask; + data.guest_len = params.guest_len; + data.handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data, + &argp->error); + + sev_unpin_memory(kvm, guest_page, n); + +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + +static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_receive_finish data; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data.handle = sev->handle; + return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; int r; - if (!svm_sev_enabled() || !sev) + if (!sev_enabled) return -ENOTTY; if (!argp) @@ -1126,13 +1509,22 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) mutex_lock(&kvm->lock); + /* enc_context_owner handles all memory enc operations */ + if (is_mirroring_enc_context(kvm)) { + r = -EINVAL; + goto out; + } + switch (sev_cmd.id) { + case KVM_SEV_ES_INIT: + if (!sev_es_enabled) { + r = -ENOTTY; + goto out; + } + fallthrough; case KVM_SEV_INIT: r = sev_guest_init(kvm, &sev_cmd); break; - case KVM_SEV_ES_INIT: - r = sev_es_guest_init(kvm, &sev_cmd); - break; case KVM_SEV_LAUNCH_START: r = sev_launch_start(kvm, &sev_cmd); break; @@ -1163,6 +1555,27 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_GET_ATTESTATION_REPORT: r = sev_get_attestation_report(kvm, &sev_cmd); break; + case KVM_SEV_SEND_START: + r = sev_send_start(kvm, &sev_cmd); + break; + case KVM_SEV_SEND_UPDATE_DATA: + r = sev_send_update_data(kvm, &sev_cmd); + break; + case KVM_SEV_SEND_FINISH: + r = sev_send_finish(kvm, &sev_cmd); + break; + case KVM_SEV_SEND_CANCEL: + r = sev_send_cancel(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_START: + r = sev_receive_start(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + r = sev_receive_update_data(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_FINISH: + r = sev_receive_finish(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; @@ -1186,6 +1599,10 @@ int svm_register_enc_region(struct kvm *kvm, if (!sev_guest(kvm)) return -ENOTTY; + /* If kvm is mirroring encryption context it isn't responsible for it */ + if (is_mirroring_enc_context(kvm)) + return -EINVAL; + if (range->addr > ULONG_MAX || range->size > ULONG_MAX) return -EINVAL; @@ -1252,6 +1669,10 @@ int svm_unregister_enc_region(struct kvm *kvm, struct enc_region *region; int ret; + /* If kvm is mirroring encryption context it isn't responsible for it */ + if (is_mirroring_enc_context(kvm)) + return -EINVAL; + mutex_lock(&kvm->lock); if (!sev_guest(kvm)) { @@ -1282,6 +1703,71 @@ failed: return ret; } +int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) +{ + struct file *source_kvm_file; + struct kvm *source_kvm; + struct kvm_sev_info *mirror_sev; + unsigned int asid; + int ret; + + source_kvm_file = fget(source_fd); + if (!file_is_kvm(source_kvm_file)) { + ret = -EBADF; + goto e_source_put; + } + + source_kvm = source_kvm_file->private_data; + mutex_lock(&source_kvm->lock); + + if (!sev_guest(source_kvm)) { + ret = -EINVAL; + goto e_source_unlock; + } + + /* Mirrors of mirrors should work, but let's not get silly */ + if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) { + ret = -EINVAL; + goto e_source_unlock; + } + + asid = to_kvm_svm(source_kvm)->sev_info.asid; + + /* + * The mirror kvm holds an enc_context_owner ref so its asid can't + * disappear until we're done with it + */ + kvm_get_kvm(source_kvm); + + fput(source_kvm_file); + mutex_unlock(&source_kvm->lock); + mutex_lock(&kvm->lock); + + if (sev_guest(kvm)) { + ret = -EINVAL; + goto e_mirror_unlock; + } + + /* Set enc_context_owner and copy its encryption context over */ + mirror_sev = &to_kvm_svm(kvm)->sev_info; + mirror_sev->enc_context_owner = source_kvm; + mirror_sev->asid = asid; + mirror_sev->active = true; + + mutex_unlock(&kvm->lock); + return 0; + +e_mirror_unlock: + mutex_unlock(&kvm->lock); + kvm_put_kvm(source_kvm); + return ret; +e_source_unlock: + mutex_unlock(&source_kvm->lock); +e_source_put: + fput(source_kvm_file); + return ret; +} + void sev_vm_destroy(struct kvm *kvm) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1291,6 +1777,12 @@ void sev_vm_destroy(struct kvm *kvm) if (!sev_guest(kvm)) return; + /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ + if (is_mirroring_enc_context(kvm)) { + kvm_put_kvm(sev->enc_context_owner); + return; + } + mutex_lock(&kvm->lock); /* @@ -1315,15 +1807,27 @@ void sev_vm_destroy(struct kvm *kvm) mutex_unlock(&kvm->lock); sev_unbind_asid(kvm, sev->handle); - sev_asid_free(sev->asid); + sev_asid_free(sev); +} + +void __init sev_set_cpu_caps(void) +{ + if (!sev_enabled) + kvm_cpu_cap_clear(X86_FEATURE_SEV); + if (!sev_es_enabled) + kvm_cpu_cap_clear(X86_FEATURE_SEV_ES); } void __init sev_hardware_setup(void) { - unsigned int eax, ebx, ecx, edx; +#ifdef CONFIG_KVM_AMD_SEV + unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; bool sev_es_supported = false; bool sev_supported = false; + if (!sev_enabled || !npt_enabled) + goto out; + /* Does the CPU support SEV? */ if (!boot_cpu_has(X86_FEATURE_SEV)) goto out; @@ -1336,12 +1840,12 @@ void __init sev_hardware_setup(void) /* Maximum number of encrypted guests supported simultaneously */ max_sev_asid = ecx; - - if (!svm_sev_enabled()) + if (!max_sev_asid) goto out; /* Minimum ASID value that should be used for SEV guest */ min_sev_asid = edx; + sev_me_mask = 1UL << (ebx & 0x3f); /* Initialize SEV ASID bitmaps */ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); @@ -1349,14 +1853,21 @@ void __init sev_hardware_setup(void) goto out; sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); - if (!sev_reclaim_asid_bitmap) + if (!sev_reclaim_asid_bitmap) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + goto out; + } + + sev_asid_count = max_sev_asid - min_sev_asid + 1; + if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)) goto out; - pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1); + pr_info("SEV supported: %u ASIDs\n", sev_asid_count); sev_supported = true; /* SEV-ES support requested? */ - if (!sev_es) + if (!sev_es_enabled) goto out; /* Does the CPU support SEV-ES? */ @@ -1367,23 +1878,44 @@ void __init sev_hardware_setup(void) if (min_sev_asid == 1) goto out; - pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1); + sev_es_asid_count = min_sev_asid - 1; + if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)) + goto out; + + pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count); sev_es_supported = true; out: - sev = sev_supported; - sev_es = sev_es_supported; + sev_enabled = sev_supported; + sev_es_enabled = sev_es_supported; +#endif } void sev_hardware_teardown(void) { - if (!svm_sev_enabled()) + if (!sev_enabled) return; + /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ + sev_flush_asids(0, max_sev_asid); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); - sev_flush_asids(); + misc_cg_set_capacity(MISC_CG_RES_SEV, 0); + misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0); +} + +int sev_cpu_init(struct svm_cpu_data *sd) +{ + if (!sev_enabled) + return 0; + + sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); + if (!sd->sev_vmcbs) + return -ENOMEM; + + return 0; } /* @@ -1775,7 +2307,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) len, GHCB_SCRATCH_AREA_LIMIT); return false; } - scratch_va = kzalloc(len, GFP_KERNEL); + scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); if (!scratch_va) return false; @@ -1849,7 +2381,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; vcpu->arch.regs[VCPU_REGS_RCX] = 0; - ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID); + ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); if (!ret) { ret = -EINVAL; break; @@ -1899,8 +2431,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) return ret; } -int sev_handle_vmgexit(struct vcpu_svm *svm) +int sev_handle_vmgexit(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; u64 ghcb_gpa, exit_code; struct ghcb *ghcb; @@ -1912,13 +2445,13 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) return sev_handle_vmgexit_msr_protocol(svm); if (!ghcb_gpa) { - vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n"); + vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); return -EINVAL; } - if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) { + if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) { /* Unable to map GHCB from guest */ - vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", + vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", ghcb_gpa); return -EINVAL; } @@ -1926,7 +2459,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) svm->ghcb = svm->ghcb_map.hva; ghcb = svm->ghcb_map.hva; - trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb); + trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); exit_code = ghcb_get_sw_exit_code(ghcb); @@ -1944,7 +2477,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) break; - ret = kvm_sev_es_mmio_read(&svm->vcpu, + ret = kvm_sev_es_mmio_read(vcpu, control->exit_info_1, control->exit_info_2, svm->ghcb_sa); @@ -1953,19 +2486,19 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) break; - ret = kvm_sev_es_mmio_write(&svm->vcpu, + ret = kvm_sev_es_mmio_write(vcpu, control->exit_info_1, control->exit_info_2, svm->ghcb_sa); break; case SVM_VMGEXIT_NMI_COMPLETE: - ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET); + ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET); break; case SVM_VMGEXIT_AP_HLT_LOOP: - ret = kvm_emulate_ap_reset_hold(&svm->vcpu); + ret = kvm_emulate_ap_reset_hold(vcpu); break; case SVM_VMGEXIT_AP_JUMP_TABLE: { - struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; + struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info; switch (control->exit_info_1) { case 0: @@ -1990,12 +2523,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) break; } case SVM_VMGEXIT_UNSUPPORTED_EVENT: - vcpu_unimpl(&svm->vcpu, + vcpu_unimpl(vcpu, "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", control->exit_info_1, control->exit_info_2); break; default: - ret = svm_invoke_exit_handler(svm, exit_code); + ret = svm_invoke_exit_handler(vcpu, exit_code); } return ret; @@ -2082,7 +2615,7 @@ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); - /* PKRU is restored on VMEXIT, save the curent host value */ + /* PKRU is restored on VMEXIT, save the current host value */ hostsa->pkru = read_pkru(); /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */ @@ -2104,5 +2637,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a * non-zero value. */ + if (!svm->ghcb) + return; + ghcb_set_sw_exit_info_2(svm->ghcb, 1); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 58a45bb139f8..b649f92287a2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -56,9 +56,6 @@ static const struct x86_cpu_id svm_cpu_id[] = { MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); #endif -#define IOPM_ALLOC_ORDER 2 -#define MSRPM_ALLOC_ORDER 1 - #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 @@ -95,6 +92,8 @@ static const struct svm_direct_access_msrs { } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { { .index = MSR_STAR, .always = true }, { .index = MSR_IA32_SYSENTER_CS, .always = true }, + { .index = MSR_IA32_SYSENTER_EIP, .always = false }, + { .index = MSR_IA32_SYSENTER_ESP, .always = false }, #ifdef CONFIG_X86_64 { .index = MSR_GS_BASE, .always = true }, { .index = MSR_FS_BASE, .always = true }, @@ -186,14 +185,6 @@ module_param(vls, int, 0444); static int vgif = true; module_param(vgif, int, 0444); -/* enable/disable SEV support */ -int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); -module_param(sev, int, 0444); - -/* enable/disable SEV-ES support */ -int sev_es = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); -module_param(sev_es, int, 0444); - bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); @@ -214,6 +205,15 @@ struct kvm_ldttss_desc { DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); +/* + * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via + * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. + * + * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to + * defer the restoration of TSC_AUX until the CPU returns to userspace. + */ +#define TSC_AUX_URET_SLOT 0 + static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) @@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) * In this case we will return to the nested guest * as soon as we leave SMM. */ - if (!is_smm(&svm->vcpu)) + if (!is_smm(vcpu)) svm_free_nested(svm); } else { @@ -363,10 +363,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) bool has_error_code = vcpu->arch.exception.has_error_code; u32 error_code = vcpu->arch.exception.error_code; - kvm_deliver_exception_payload(&svm->vcpu); + kvm_deliver_exception_payload(vcpu); if (nr == BP_VECTOR && !nrips) { - unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); + unsigned long rip, old_rip = kvm_rip_read(vcpu); /* * For guest debugging where we have to reinject #BP if some @@ -375,8 +375,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) * raises a fault that is not intercepted. Still better than * failing in all cases. */ - (void)skip_emulated_instruction(&svm->vcpu); - rip = kvm_rip_read(&svm->vcpu); + (void)skip_emulated_instruction(vcpu); + rip = kvm_rip_read(vcpu); svm->int3_rip = rip + svm->vmcb->save.cs.base; svm->int3_injected = rip - old_rip; } @@ -553,23 +553,21 @@ static void svm_cpu_uninit(int cpu) static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; + int ret = -ENOMEM; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) - return -ENOMEM; + return ret; sd->cpu = cpu; sd->save_area = alloc_page(GFP_KERNEL); if (!sd->save_area) goto free_cpu_data; + clear_page(page_address(sd->save_area)); - if (svm_sev_enabled()) { - sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, - sizeof(void *), - GFP_KERNEL); - if (!sd->sev_vmcbs) - goto free_save_area; - } + ret = sev_cpu_init(sd); + if (ret) + goto free_save_area; per_cpu(svm_data, cpu) = sd; @@ -579,7 +577,7 @@ free_save_area: __free_page(sd->save_area); free_cpu_data: kfree(sd); - return -ENOMEM; + return ret; } @@ -681,14 +679,15 @@ void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, u32 *svm_vcpu_alloc_msrpm(void) { - struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER); + unsigned int order = get_order(MSRPM_SIZE); + struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); u32 *msrpm; if (!pages) return NULL; msrpm = page_address(pages); - memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); + memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); return msrpm; } @@ -707,7 +706,7 @@ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) void svm_vcpu_free_msrpm(u32 *msrpm) { - __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER); + __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); } static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) @@ -881,20 +880,20 @@ static __init void svm_adjust_mmio_mask(void) */ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; - kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK); + kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); } static void svm_hardware_teardown(void) { int cpu; - if (svm_sev_enabled()) - sev_hardware_teardown(); + sev_hardware_teardown(); for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); - __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); + __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), + get_order(IOPM_SIZE)); iopm_base = 0; } @@ -922,6 +921,9 @@ static __init void svm_set_cpu_caps(void) if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); + + /* CPUID 0x8000001F (SME/SEV features) */ + sev_set_cpu_caps(); } static __init int svm_hardware_setup(void) @@ -930,14 +932,15 @@ static __init int svm_hardware_setup(void) struct page *iopm_pages; void *iopm_va; int r; + unsigned int order = get_order(IOPM_SIZE); - iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); + iopm_pages = alloc_pages(GFP_KERNEL, order); if (!iopm_pages) return -ENOMEM; iopm_va = page_address(iopm_pages); - memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); + memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; init_msrpm_offsets(); @@ -956,6 +959,9 @@ static __init int svm_hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 32; } + if (boot_cpu_has(X86_FEATURE_RDTSCP)) + kvm_define_user_return_msr(TSC_AUX_URET_SLOT, MSR_TSC_AUX); + /* Check for pause filtering support */ if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { pause_filter_count = 0; @@ -969,21 +975,6 @@ static __init int svm_hardware_setup(void) kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); } - if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) { - sev_hardware_setup(); - } else { - sev = false; - sev_es = false; - } - - svm_adjust_mmio_mask(); - - for_each_possible_cpu(cpu) { - r = svm_cpu_init(cpu); - if (r) - goto err; - } - /* * KVM's MMU doesn't support using 2-level paging for itself, and thus * NPT isn't supported if the host is using 2-level paging since host @@ -998,6 +989,17 @@ static __init int svm_hardware_setup(void) kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G); pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); + /* Note, SEV setup consumes npt_enabled. */ + sev_hardware_setup(); + + svm_adjust_mmio_mask(); + + for_each_possible_cpu(cpu) { + r = svm_cpu_init(cpu); + if (r) + goto err; + } + if (nrips) { if (!boot_cpu_has(X86_FEATURE_NRIPS)) nrips = false; @@ -1084,8 +1086,8 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) if (is_guest_mode(vcpu)) { /* Write L1's TSC offset. */ g_tsc_offset = svm->vmcb->control.tsc_offset - - svm->nested.hsave->control.tsc_offset; - svm->nested.hsave->control.tsc_offset = offset; + svm->vmcb01.ptr->control.tsc_offset; + svm->vmcb01.ptr->control.tsc_offset = offset; } trace_kvm_write_tsc_offset(vcpu->vcpu_id, @@ -1113,12 +1115,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm) } } -static void init_vmcb(struct vcpu_svm *svm) +static void init_vmcb(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; - svm->vcpu.arch.hflags = 0; + vcpu->arch.hflags = 0; svm_set_intercept(svm, INTERCEPT_CR0_READ); svm_set_intercept(svm, INTERCEPT_CR3_READ); @@ -1126,7 +1129,7 @@ static void init_vmcb(struct vcpu_svm *svm) svm_set_intercept(svm, INTERCEPT_CR0_WRITE); svm_set_intercept(svm, INTERCEPT_CR3_WRITE); svm_set_intercept(svm, INTERCEPT_CR4_WRITE); - if (!kvm_vcpu_apicv_active(&svm->vcpu)) + if (!kvm_vcpu_apicv_active(vcpu)) svm_set_intercept(svm, INTERCEPT_CR8_WRITE); set_dr_intercepts(svm); @@ -1170,12 +1173,12 @@ static void init_vmcb(struct vcpu_svm *svm) svm_set_intercept(svm, INTERCEPT_RDPRU); svm_set_intercept(svm, INTERCEPT_RSM); - if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { + if (!kvm_mwait_in_guest(vcpu->kvm)) { svm_set_intercept(svm, INTERCEPT_MONITOR); svm_set_intercept(svm, INTERCEPT_MWAIT); } - if (!kvm_hlt_in_guest(svm->vcpu.kvm)) + if (!kvm_hlt_in_guest(vcpu->kvm)) svm_set_intercept(svm, INTERCEPT_HLT); control->iopm_base_pa = __sme_set(iopm_base); @@ -1201,19 +1204,19 @@ static void init_vmcb(struct vcpu_svm *svm) init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); - svm_set_cr4(&svm->vcpu, 0); - svm_set_efer(&svm->vcpu, 0); + svm_set_cr4(vcpu, 0); + svm_set_efer(vcpu, 0); save->dr6 = 0xffff0ff0; - kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); save->rip = 0x0000fff0; - svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; + vcpu->arch.regs[VCPU_REGS_RIP] = save->rip; /* * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * It also updates the guest-visible cr0 value. */ - svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); - kvm_mmu_reset_context(&svm->vcpu); + svm_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); + kvm_mmu_reset_context(vcpu); save->cr4 = X86_CR4_PAE; /* rdx = ?? */ @@ -1225,17 +1228,18 @@ static void init_vmcb(struct vcpu_svm *svm) clr_exception_intercept(svm, PF_VECTOR); svm_clr_intercept(svm, INTERCEPT_CR3_READ); svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); - save->g_pat = svm->vcpu.arch.pat; + save->g_pat = vcpu->arch.pat; save->cr3 = 0; save->cr4 = 0; } - svm->asid_generation = 0; + svm->current_vmcb->asid_generation = 0; svm->asid = 0; svm->nested.vmcb12_gpa = 0; - svm->vcpu.arch.hflags = 0; + svm->nested.last_vmcb12_gpa = 0; + vcpu->arch.hflags = 0; - if (!kvm_pause_in_guest(svm->vcpu.kvm)) { + if (!kvm_pause_in_guest(vcpu->kvm)) { control->pause_filter_count = pause_filter_count; if (pause_filter_thresh) control->pause_filter_thresh = pause_filter_thresh; @@ -1246,18 +1250,15 @@ static void init_vmcb(struct vcpu_svm *svm) svm_check_invpcid(svm); - if (kvm_vcpu_apicv_active(&svm->vcpu)) - avic_init_vmcb(svm); - /* - * If hardware supports Virtual VMLOAD VMSAVE then enable it - * in VMCB and clear intercepts to avoid #VMEXIT. + * If the host supports V_SPEC_CTRL then disable the interception + * of MSR_IA32_SPEC_CTRL. */ - if (vls) { - svm_clr_intercept(svm, INTERCEPT_VMLOAD); - svm_clr_intercept(svm, INTERCEPT_VMSAVE); - svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; - } + if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + + if (kvm_vcpu_apicv_active(vcpu)) + avic_init_vmcb(svm); if (vgif) { svm_clr_intercept(svm, INTERCEPT_STGI); @@ -1265,11 +1266,11 @@ static void init_vmcb(struct vcpu_svm *svm) svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; } - if (sev_guest(svm->vcpu.kvm)) { + if (sev_guest(vcpu->kvm)) { svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; clr_exception_intercept(svm, UD_VECTOR); - if (sev_es_guest(svm->vcpu.kvm)) { + if (sev_es_guest(vcpu->kvm)) { /* Perform SEV-ES specific VMCB updates */ sev_es_init_vmcb(svm); } @@ -1291,12 +1292,12 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) svm->virt_spec_ctrl = 0; if (!init_event) { - svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | - MSR_IA32_APICBASE_ENABLE; - if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) - svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; + vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE | + MSR_IA32_APICBASE_ENABLE; + if (kvm_vcpu_is_reset_bsp(vcpu)) + vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; } - init_vmcb(svm); + init_vmcb(vcpu); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); kvm_rdx_write(vcpu, eax); @@ -1305,10 +1306,16 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); } +void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) +{ + svm->current_vmcb = target_vmcb; + svm->vmcb = target_vmcb->ptr; +} + static int svm_create_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm; - struct page *vmcb_page; + struct page *vmcb01_page; struct page *vmsa_page = NULL; int err; @@ -1316,11 +1323,11 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm = to_svm(vcpu); err = -ENOMEM; - vmcb_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!vmcb_page) + vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!vmcb01_page) goto out; - if (sev_es_guest(svm->vcpu.kvm)) { + if (sev_es_guest(vcpu->kvm)) { /* * SEV-ES guests require a separate VMSA page used to contain * the encrypted register state of the guest. @@ -1356,20 +1363,21 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm_vcpu_init_msrpm(vcpu, svm->msrpm); - svm->vmcb = page_address(vmcb_page); - svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT); + svm->vmcb01.ptr = page_address(vmcb01_page); + svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); if (vmsa_page) svm->vmsa = page_address(vmsa_page); - svm->asid_generation = 0; svm->guest_state_loaded = false; - init_vmcb(svm); + + svm_switch_vmcb(svm, &svm->vmcb01); + init_vmcb(vcpu); svm_init_osvw(vcpu); vcpu->arch.microcode_version = 0x01000065; - if (sev_es_guest(svm->vcpu.kvm)) + if (sev_es_guest(vcpu->kvm)) /* Perform SEV-ES specific VMCB creation updates */ sev_es_create_vcpu(svm); @@ -1379,7 +1387,7 @@ error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); error_free_vmcb_page: - __free_page(vmcb_page); + __free_page(vmcb01_page); out: return err; } @@ -1407,32 +1415,23 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) sev_free_vcpu(vcpu); - __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); - __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); + __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); + __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); } static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - unsigned int i; if (svm->guest_state_loaded) return; /* - * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save - * area (non-sev-es). Save ones that aren't so we can restore them - * individually later. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - - /* * Save additional host state that will be restored on VMEXIT (sev-es) * or subsequent vmload of host save area. */ - if (sev_es_guest(svm->vcpu.kvm)) { + if (sev_es_guest(vcpu->kvm)) { sev_es_prepare_guest_switch(svm, vcpu->cpu); } else { vmsave(__sme_page_pa(sd->save_area)); @@ -1446,29 +1445,15 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) } } - /* This assumes that the kernel never uses MSR_TSC_AUX */ if (static_cpu_has(X86_FEATURE_RDTSCP)) - wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + kvm_set_user_return_msr(TSC_AUX_URET_SLOT, svm->tsc_aux, -1ull); svm->guest_state_loaded = true; } static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); - unsigned int i; - - if (!svm->guest_state_loaded) - return; - - /* - * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save - * area (non-sev-es). Restore the ones that weren't. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - - svm->guest_state_loaded = false; + to_svm(vcpu)->guest_state_loaded = false; } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -1476,11 +1461,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) struct vcpu_svm *svm = to_svm(vcpu); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); - if (unlikely(cpu != vcpu->cpu)) { - svm->asid_generation = 0; - vmcb_mark_all_dirty(svm->vmcb); - } - if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; indirect_branch_prediction_barrier(); @@ -1564,7 +1544,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm) /* Drop int_ctl fields related to VINTR injection. */ svm->vmcb->control.int_ctl &= mask; if (is_guest_mode(&svm->vcpu)) { - svm->nested.hsave->control.int_ctl &= mask; + svm->vmcb01.ptr->control.int_ctl &= mask; WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != (svm->nested.ctl.int_ctl & V_TPR_MASK)); @@ -1577,16 +1557,17 @@ static void svm_clear_vintr(struct vcpu_svm *svm) static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; + struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; case VCPU_SREG_DS: return &save->ds; case VCPU_SREG_ES: return &save->es; - case VCPU_SREG_FS: return &save->fs; - case VCPU_SREG_GS: return &save->gs; + case VCPU_SREG_FS: return &save01->fs; + case VCPU_SREG_GS: return &save01->gs; case VCPU_SREG_SS: return &save->ss; - case VCPU_SREG_TR: return &save->tr; - case VCPU_SREG_LDTR: return &save->ldtr; + case VCPU_SREG_TR: return &save01->tr; + case VCPU_SREG_LDTR: return &save01->ldtr; } BUG(); return NULL; @@ -1709,37 +1690,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) vmcb_mark_dirty(svm->vmcb, VMCB_DT); } -static void update_cr0_intercept(struct vcpu_svm *svm) -{ - ulong gcr0; - u64 *hcr0; - - /* - * SEV-ES guests must always keep the CR intercepts cleared. CR - * tracking is done using the CR write traps. - */ - if (sev_es_guest(svm->vcpu.kvm)) - return; - - gcr0 = svm->vcpu.arch.cr0; - hcr0 = &svm->vmcb->save.cr0; - *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) - | (gcr0 & SVM_CR0_SELECTIVE_MASK); - - vmcb_mark_dirty(svm->vmcb, VMCB_CR); - - if (gcr0 == *hcr0) { - svm_clr_intercept(svm, INTERCEPT_CR0_READ); - svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); - } else { - svm_set_intercept(svm, INTERCEPT_CR0_READ); - svm_set_intercept(svm, INTERCEPT_CR0_WRITE); - } -} - void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); + u64 hcr0 = cr0; #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { @@ -1757,7 +1711,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) vcpu->arch.cr0 = cr0; if (!npt_enabled) - cr0 |= X86_CR0_PG | X86_CR0_WP; + hcr0 |= X86_CR0_PG | X86_CR0_WP; /* * re-enable caching here because the QEMU bios @@ -1765,10 +1719,26 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) * reboot */ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); - svm->vmcb->save.cr0 = cr0; + hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); + + svm->vmcb->save.cr0 = hcr0; vmcb_mark_dirty(svm->vmcb, VMCB_CR); - update_cr0_intercept(svm); + + /* + * SEV-ES guests must always keep the CR intercepts cleared. CR + * tracking is done using the CR write traps. + */ + if (sev_es_guest(vcpu->kvm)) + return; + + if (hcr0 == cr0) { + /* Selective CR0 write remains on. */ + svm_clr_intercept(svm, INTERCEPT_CR0_READ); + svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); + } else { + svm_set_intercept(svm, INTERCEPT_CR0_READ); + svm_set_intercept(svm, INTERCEPT_CR0_WRITE); + } } static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -1847,7 +1817,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) vmcb_mark_dirty(svm->vmcb, VMCB_ASID); } - svm->asid_generation = sd->asid_generation; + svm->current_vmcb->asid_generation = sd->asid_generation; svm->asid = sd->next_asid++; } @@ -1896,39 +1866,43 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) vmcb_mark_dirty(svm->vmcb, VMCB_DR); } -static int pf_interception(struct vcpu_svm *svm) +static int pf_interception(struct kvm_vcpu *vcpu) { - u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); + struct vcpu_svm *svm = to_svm(vcpu); + + u64 fault_address = svm->vmcb->control.exit_info_2; u64 error_code = svm->vmcb->control.exit_info_1; - return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, + return kvm_handle_page_fault(vcpu, error_code, fault_address, static_cpu_has(X86_FEATURE_DECODEASSISTS) ? svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_len); } -static int npf_interception(struct vcpu_svm *svm) +static int npf_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); + u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); u64 error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); - return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, + return kvm_mmu_page_fault(vcpu, fault_address, error_code, static_cpu_has(X86_FEATURE_DECODEASSISTS) ? svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_len); } -static int db_interception(struct vcpu_svm *svm) +static int db_interception(struct kvm_vcpu *vcpu) { - struct kvm_run *kvm_run = svm->vcpu.run; - struct kvm_vcpu *vcpu = &svm->vcpu; + struct kvm_run *kvm_run = vcpu->run; + struct vcpu_svm *svm = to_svm(vcpu); - if (!(svm->vcpu.guest_debug & + if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; - kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); + kvm_queue_exception_p(vcpu, DB_VECTOR, payload); return 1; } @@ -1938,7 +1912,7 @@ static int db_interception(struct vcpu_svm *svm) kvm_make_request(KVM_REQ_EVENT, vcpu); } - if (svm->vcpu.guest_debug & + if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; @@ -1952,9 +1926,10 @@ static int db_interception(struct vcpu_svm *svm) return 1; } -static int bp_interception(struct vcpu_svm *svm) +static int bp_interception(struct kvm_vcpu *vcpu) { - struct kvm_run *kvm_run = svm->vcpu.run; + struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_run *kvm_run = vcpu->run; kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; @@ -1962,14 +1937,14 @@ static int bp_interception(struct vcpu_svm *svm) return 0; } -static int ud_interception(struct vcpu_svm *svm) +static int ud_interception(struct kvm_vcpu *vcpu) { - return handle_ud(&svm->vcpu); + return handle_ud(vcpu); } -static int ac_interception(struct vcpu_svm *svm) +static int ac_interception(struct kvm_vcpu *vcpu) { - kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); + kvm_queue_exception_e(vcpu, AC_VECTOR, 0); return 1; } @@ -2012,7 +1987,7 @@ static bool is_erratum_383(void) return true; } -static void svm_handle_mce(struct vcpu_svm *svm) +static void svm_handle_mce(struct kvm_vcpu *vcpu) { if (is_erratum_383()) { /* @@ -2021,7 +1996,7 @@ static void svm_handle_mce(struct vcpu_svm *svm) */ pr_err("KVM: Guest triggered AMD Erratum 383\n"); - kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } @@ -2033,20 +2008,21 @@ static void svm_handle_mce(struct vcpu_svm *svm) kvm_machine_check(); } -static int mc_interception(struct vcpu_svm *svm) +static int mc_interception(struct kvm_vcpu *vcpu) { return 1; } -static int shutdown_interception(struct vcpu_svm *svm) +static int shutdown_interception(struct kvm_vcpu *vcpu) { - struct kvm_run *kvm_run = svm->vcpu.run; + struct kvm_run *kvm_run = vcpu->run; + struct vcpu_svm *svm = to_svm(vcpu); /* * The VM save area has already been encrypted so it * cannot be reinitialized - just terminate. */ - if (sev_es_guest(svm->vcpu.kvm)) + if (sev_es_guest(vcpu->kvm)) return -EINVAL; /* @@ -2054,20 +2030,20 @@ static int shutdown_interception(struct vcpu_svm *svm) * so reinitialize it. */ clear_page(svm->vmcb); - init_vmcb(svm); + init_vmcb(vcpu); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } -static int io_interception(struct vcpu_svm *svm) +static int io_interception(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = &svm->vcpu; + struct vcpu_svm *svm = to_svm(vcpu); u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ int size, in, string; unsigned port; - ++svm->vcpu.stat.io_exits; + ++vcpu->stat.io_exits; string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; port = io_info >> 16; @@ -2082,93 +2058,69 @@ static int io_interception(struct vcpu_svm *svm) svm->next_rip = svm->vmcb->control.exit_info_2; - return kvm_fast_pio(&svm->vcpu, size, port, in); + return kvm_fast_pio(vcpu, size, port, in); } -static int nmi_interception(struct vcpu_svm *svm) +static int nmi_interception(struct kvm_vcpu *vcpu) { return 1; } -static int intr_interception(struct vcpu_svm *svm) +static int intr_interception(struct kvm_vcpu *vcpu) { - ++svm->vcpu.stat.irq_exits; + ++vcpu->stat.irq_exits; return 1; } -static int nop_on_interception(struct vcpu_svm *svm) +static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) { - return 1; -} - -static int halt_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_halt(&svm->vcpu); -} - -static int vmmcall_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_hypercall(&svm->vcpu); -} - -static int vmload_interception(struct vcpu_svm *svm) -{ - struct vmcb *nested_vmcb; + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb *vmcb12; struct kvm_host_map map; int ret; - if (nested_svm_check_permissions(svm)) + if (nested_svm_check_permissions(vcpu)) return 1; - ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); + ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); if (ret) { if (ret == -EINVAL) - kvm_inject_gp(&svm->vcpu, 0); + kvm_inject_gp(vcpu, 0); return 1; } - nested_vmcb = map.hva; + vmcb12 = map.hva; + + ret = kvm_skip_emulated_instruction(vcpu); - ret = kvm_skip_emulated_instruction(&svm->vcpu); + if (vmload) { + nested_svm_vmloadsave(vmcb12, svm->vmcb); + svm->sysenter_eip_hi = 0; + svm->sysenter_esp_hi = 0; + } else + nested_svm_vmloadsave(svm->vmcb, vmcb12); - nested_svm_vmloadsave(nested_vmcb, svm->vmcb); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + kvm_vcpu_unmap(vcpu, &map, true); return ret; } -static int vmsave_interception(struct vcpu_svm *svm) +static int vmload_interception(struct kvm_vcpu *vcpu) { - struct vmcb *nested_vmcb; - struct kvm_host_map map; - int ret; - - if (nested_svm_check_permissions(svm)) - return 1; - - ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); - if (ret) { - if (ret == -EINVAL) - kvm_inject_gp(&svm->vcpu, 0); - return 1; - } - - nested_vmcb = map.hva; - - ret = kvm_skip_emulated_instruction(&svm->vcpu); - - nested_svm_vmloadsave(svm->vmcb, nested_vmcb); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + return vmload_vmsave_interception(vcpu, true); +} - return ret; +static int vmsave_interception(struct kvm_vcpu *vcpu) +{ + return vmload_vmsave_interception(vcpu, false); } -static int vmrun_interception(struct vcpu_svm *svm) +static int vmrun_interception(struct kvm_vcpu *vcpu) { - if (nested_svm_check_permissions(svm)) + if (nested_svm_check_permissions(vcpu)) return 1; - return nested_svm_vmrun(svm); + return nested_svm_vmrun(vcpu); } enum { @@ -2207,7 +2159,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, }; - int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { + int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_INSTR_VMRUN] = vmrun_interception, [SVM_INSTR_VMLOAD] = vmload_interception, [SVM_INSTR_VMSAVE] = vmsave_interception, @@ -2216,17 +2168,13 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) int ret; if (is_guest_mode(vcpu)) { - svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode]; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - /* Returns '1' or -errno on failure, '0' on success. */ - ret = nested_svm_vmexit(svm); + ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); if (ret) return ret; return 1; } - return svm_instr_handlers[opcode](svm); + return svm_instr_handlers[opcode](vcpu); } /* @@ -2237,9 +2185,9 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) * regions (e.g. SMM memory on host). * 2) VMware backdoor */ -static int gp_interception(struct vcpu_svm *svm) +static int gp_interception(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = &svm->vcpu; + struct vcpu_svm *svm = to_svm(vcpu); u32 error_code = svm->vmcb->control.exit_info_1; int opcode; @@ -2304,73 +2252,58 @@ void svm_set_gif(struct vcpu_svm *svm, bool value) } } -static int stgi_interception(struct vcpu_svm *svm) +static int stgi_interception(struct kvm_vcpu *vcpu) { int ret; - if (nested_svm_check_permissions(svm)) + if (nested_svm_check_permissions(vcpu)) return 1; - ret = kvm_skip_emulated_instruction(&svm->vcpu); - svm_set_gif(svm, true); + ret = kvm_skip_emulated_instruction(vcpu); + svm_set_gif(to_svm(vcpu), true); return ret; } -static int clgi_interception(struct vcpu_svm *svm) +static int clgi_interception(struct kvm_vcpu *vcpu) { int ret; - if (nested_svm_check_permissions(svm)) + if (nested_svm_check_permissions(vcpu)) return 1; - ret = kvm_skip_emulated_instruction(&svm->vcpu); - svm_set_gif(svm, false); + ret = kvm_skip_emulated_instruction(vcpu); + svm_set_gif(to_svm(vcpu), false); return ret; } -static int invlpga_interception(struct vcpu_svm *svm) +static int invlpga_interception(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = &svm->vcpu; - - trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), - kvm_rax_read(&svm->vcpu)); - - /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ - kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); + gva_t gva = kvm_rax_read(vcpu); + u32 asid = kvm_rcx_read(vcpu); - return kvm_skip_emulated_instruction(&svm->vcpu); -} + /* FIXME: Handle an address size prefix. */ + if (!is_long_mode(vcpu)) + gva = (u32)gva; -static int skinit_interception(struct vcpu_svm *svm) -{ - trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); + trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); - kvm_queue_exception(&svm->vcpu, UD_VECTOR); - return 1; -} + /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ + kvm_mmu_invlpg(vcpu, gva); -static int wbinvd_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_wbinvd(&svm->vcpu); + return kvm_skip_emulated_instruction(vcpu); } -static int xsetbv_interception(struct vcpu_svm *svm) +static int skinit_interception(struct kvm_vcpu *vcpu) { - u64 new_bv = kvm_read_edx_eax(&svm->vcpu); - u32 index = kvm_rcx_read(&svm->vcpu); - - int err = kvm_set_xcr(&svm->vcpu, index, new_bv); - return kvm_complete_insn_gp(&svm->vcpu, err); -} + trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); -static int rdpru_interception(struct vcpu_svm *svm) -{ - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } -static int task_switch_interception(struct vcpu_svm *svm) +static int task_switch_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); u16 tss_selector; int reason; int int_type = svm->vmcb->control.exit_int_info & @@ -2399,7 +2332,7 @@ static int task_switch_interception(struct vcpu_svm *svm) if (reason == TASK_SWITCH_GATE) { switch (type) { case SVM_EXITINTINFO_TYPE_NMI: - svm->vcpu.arch.nmi_injected = false; + vcpu->arch.nmi_injected = false; break; case SVM_EXITINTINFO_TYPE_EXEPT: if (svm->vmcb->control.exit_info_2 & @@ -2408,10 +2341,10 @@ static int task_switch_interception(struct vcpu_svm *svm) error_code = (u32)svm->vmcb->control.exit_info_2; } - kvm_clear_exception_queue(&svm->vcpu); + kvm_clear_exception_queue(vcpu); break; case SVM_EXITINTINFO_TYPE_INTR: - kvm_clear_interrupt_queue(&svm->vcpu); + kvm_clear_interrupt_queue(vcpu); break; default: break; @@ -2422,77 +2355,58 @@ static int task_switch_interception(struct vcpu_svm *svm) int_type == SVM_EXITINTINFO_TYPE_SOFT || (int_type == SVM_EXITINTINFO_TYPE_EXEPT && (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { - if (!skip_emulated_instruction(&svm->vcpu)) + if (!skip_emulated_instruction(vcpu)) return 0; } if (int_type != SVM_EXITINTINFO_TYPE_SOFT) int_vec = -1; - return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, + return kvm_task_switch(vcpu, tss_selector, int_vec, reason, has_error_code, error_code); } -static int cpuid_interception(struct vcpu_svm *svm) +static int iret_interception(struct kvm_vcpu *vcpu) { - return kvm_emulate_cpuid(&svm->vcpu); -} + struct vcpu_svm *svm = to_svm(vcpu); -static int iret_interception(struct vcpu_svm *svm) -{ - ++svm->vcpu.stat.nmi_window_exits; - svm->vcpu.arch.hflags |= HF_IRET_MASK; - if (!sev_es_guest(svm->vcpu.kvm)) { + ++vcpu->stat.nmi_window_exits; + vcpu->arch.hflags |= HF_IRET_MASK; + if (!sev_es_guest(vcpu->kvm)) { svm_clr_intercept(svm, INTERCEPT_IRET); - svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); + svm->nmi_iret_rip = kvm_rip_read(vcpu); } - kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); + kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } -static int invd_interception(struct vcpu_svm *svm) -{ - /* Treat an INVD instruction as a NOP and just skip it. */ - return kvm_skip_emulated_instruction(&svm->vcpu); -} - -static int invlpg_interception(struct vcpu_svm *svm) +static int invlpg_interception(struct kvm_vcpu *vcpu) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) - return kvm_emulate_instruction(&svm->vcpu, 0); - - kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); - return kvm_skip_emulated_instruction(&svm->vcpu); -} + return kvm_emulate_instruction(vcpu, 0); -static int emulate_on_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_instruction(&svm->vcpu, 0); + kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); + return kvm_skip_emulated_instruction(vcpu); } -static int rsm_interception(struct vcpu_svm *svm) +static int emulate_on_interception(struct kvm_vcpu *vcpu) { - return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); + return kvm_emulate_instruction(vcpu, 0); } -static int rdpmc_interception(struct vcpu_svm *svm) +static int rsm_interception(struct kvm_vcpu *vcpu) { - int err; - - if (!nrips) - return emulate_on_interception(svm); - - err = kvm_rdpmc(&svm->vcpu); - return kvm_complete_insn_gp(&svm->vcpu, err); + return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); } -static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, +static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, unsigned long val) { - unsigned long cr0 = svm->vcpu.arch.cr0; + struct vcpu_svm *svm = to_svm(vcpu); + unsigned long cr0 = vcpu->arch.cr0; bool ret = false; - if (!is_guest_mode(&svm->vcpu) || + if (!is_guest_mode(vcpu) || (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) return false; @@ -2509,17 +2423,18 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, #define CR_VALID (1ULL << 63) -static int cr_interception(struct vcpu_svm *svm) +static int cr_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); int reg, cr; unsigned long val; int err; if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) - return emulate_on_interception(svm); + return emulate_on_interception(vcpu); if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) - return emulate_on_interception(svm); + return emulate_on_interception(vcpu); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) @@ -2530,61 +2445,61 @@ static int cr_interception(struct vcpu_svm *svm) err = 0; if (cr >= 16) { /* mov to cr */ cr -= 16; - val = kvm_register_read(&svm->vcpu, reg); + val = kvm_register_read(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: - if (!check_selective_cr0_intercepted(svm, val)) - err = kvm_set_cr0(&svm->vcpu, val); + if (!check_selective_cr0_intercepted(vcpu, val)) + err = kvm_set_cr0(vcpu, val); else return 1; break; case 3: - err = kvm_set_cr3(&svm->vcpu, val); + err = kvm_set_cr3(vcpu, val); break; case 4: - err = kvm_set_cr4(&svm->vcpu, val); + err = kvm_set_cr4(vcpu, val); break; case 8: - err = kvm_set_cr8(&svm->vcpu, val); + err = kvm_set_cr8(vcpu, val); break; default: WARN(1, "unhandled write to CR%d", cr); - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } } else { /* mov from cr */ switch (cr) { case 0: - val = kvm_read_cr0(&svm->vcpu); + val = kvm_read_cr0(vcpu); break; case 2: - val = svm->vcpu.arch.cr2; + val = vcpu->arch.cr2; break; case 3: - val = kvm_read_cr3(&svm->vcpu); + val = kvm_read_cr3(vcpu); break; case 4: - val = kvm_read_cr4(&svm->vcpu); + val = kvm_read_cr4(vcpu); break; case 8: - val = kvm_get_cr8(&svm->vcpu); + val = kvm_get_cr8(vcpu); break; default: WARN(1, "unhandled read from CR%d", cr); - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - kvm_register_write(&svm->vcpu, reg, val); + kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); } - return kvm_complete_insn_gp(&svm->vcpu, err); + return kvm_complete_insn_gp(vcpu, err); } -static int cr_trap(struct vcpu_svm *svm) +static int cr_trap(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = &svm->vcpu; + struct vcpu_svm *svm = to_svm(vcpu); unsigned long old_value, new_value; unsigned int cr; int ret = 0; @@ -2606,7 +2521,7 @@ static int cr_trap(struct vcpu_svm *svm) kvm_post_set_cr4(vcpu, old_value, new_value); break; case 8: - ret = kvm_set_cr8(&svm->vcpu, new_value); + ret = kvm_set_cr8(vcpu, new_value); break; default: WARN(1, "unhandled CR%d write trap", cr); @@ -2617,57 +2532,57 @@ static int cr_trap(struct vcpu_svm *svm) return kvm_complete_insn_gp(vcpu, ret); } -static int dr_interception(struct vcpu_svm *svm) +static int dr_interception(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); int reg, dr; unsigned long val; int err = 0; - if (svm->vcpu.guest_debug == 0) { + if (vcpu->guest_debug == 0) { /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ clr_dr_intercepts(svm); - svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; + vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) - return emulate_on_interception(svm); + return emulate_on_interception(vcpu); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; if (dr >= 16) { /* mov to DRn */ dr -= 16; - val = kvm_register_read(&svm->vcpu, reg); - err = kvm_set_dr(&svm->vcpu, dr, val); + val = kvm_register_read(vcpu, reg); + err = kvm_set_dr(vcpu, dr, val); } else { - kvm_get_dr(&svm->vcpu, dr, &val); - kvm_register_write(&svm->vcpu, reg, val); + kvm_get_dr(vcpu, dr, &val); + kvm_register_write(vcpu, reg, val); } - return kvm_complete_insn_gp(&svm->vcpu, err); + return kvm_complete_insn_gp(vcpu, err); } -static int cr8_write_interception(struct vcpu_svm *svm) +static int cr8_write_interception(struct kvm_vcpu *vcpu) { - struct kvm_run *kvm_run = svm->vcpu.run; int r; - u8 cr8_prev = kvm_get_cr8(&svm->vcpu); + u8 cr8_prev = kvm_get_cr8(vcpu); /* instruction emulation calls kvm_set_cr8() */ - r = cr_interception(svm); - if (lapic_in_kernel(&svm->vcpu)) + r = cr_interception(vcpu); + if (lapic_in_kernel(vcpu)) return r; - if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) + if (cr8_prev <= kvm_get_cr8(vcpu)) return r; - kvm_run->exit_reason = KVM_EXIT_SET_TPR; + vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } -static int efer_trap(struct vcpu_svm *svm) +static int efer_trap(struct kvm_vcpu *vcpu) { struct msr_data msr_info; int ret; @@ -2680,10 +2595,10 @@ static int efer_trap(struct vcpu_svm *svm) */ msr_info.host_initiated = false; msr_info.index = MSR_EFER; - msr_info.data = svm->vmcb->control.exit_info_1 & ~EFER_SVME; - ret = kvm_set_msr_common(&svm->vcpu, &msr_info); + msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; + ret = kvm_set_msr_common(vcpu, &msr_info); - return kvm_complete_insn_gp(&svm->vcpu, ret); + return kvm_complete_insn_gp(vcpu, ret); } static int svm_get_msr_feature(struct kvm_msr_entry *msr) @@ -2710,34 +2625,41 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_STAR: - msr_info->data = svm->vmcb->save.star; + msr_info->data = svm->vmcb01.ptr->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: - msr_info->data = svm->vmcb->save.lstar; + msr_info->data = svm->vmcb01.ptr->save.lstar; break; case MSR_CSTAR: - msr_info->data = svm->vmcb->save.cstar; + msr_info->data = svm->vmcb01.ptr->save.cstar; break; case MSR_KERNEL_GS_BASE: - msr_info->data = svm->vmcb->save.kernel_gs_base; + msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: - msr_info->data = svm->vmcb->save.sfmask; + msr_info->data = svm->vmcb01.ptr->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: - msr_info->data = svm->vmcb->save.sysenter_cs; + msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: - msr_info->data = svm->sysenter_eip; + msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; + if (guest_cpuid_is_intel(vcpu)) + msr_info->data |= (u64)svm->sysenter_eip_hi << 32; break; case MSR_IA32_SYSENTER_ESP: - msr_info->data = svm->sysenter_esp; + msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; + if (guest_cpuid_is_intel(vcpu)) + msr_info->data |= (u64)svm->sysenter_esp_hi << 32; break; case MSR_TSC_AUX: if (!boot_cpu_has(X86_FEATURE_RDTSCP)) return 1; + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) + return 1; msr_info->data = svm->tsc_aux; break; /* @@ -2771,7 +2693,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) !guest_has_spec_ctrl_msr(vcpu)) return 1; - msr_info->data = svm->spec_ctrl; + if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) + msr_info->data = svm->vmcb->save.spec_ctrl; + else + msr_info->data = svm->spec_ctrl; break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr_info->host_initiated && @@ -2809,8 +2734,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { struct vcpu_svm *svm = to_svm(vcpu); - if (!sev_es_guest(svm->vcpu.kvm) || !err) - return kvm_complete_insn_gp(&svm->vcpu, err); + if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb)) + return kvm_complete_insn_gp(vcpu, err); ghcb_set_sw_exit_info_1(svm->ghcb, 1); ghcb_set_sw_exit_info_2(svm->ghcb, @@ -2820,11 +2745,6 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) return 1; } -static int rdmsr_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_rdmsr(&svm->vcpu); -} - static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) { struct vcpu_svm *svm = to_svm(vcpu); @@ -2853,6 +2773,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); + int r; u32 ecx = msr->index; u64 data = msr->data; @@ -2861,7 +2782,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vcpu->arch.pat = data; - svm->vmcb->save.g_pat = data; + svm->vmcb01.ptr->save.g_pat = data; + if (is_guest_mode(vcpu)) + nested_vmcb02_compute_g_pat(svm); vmcb_mark_dirty(svm->vmcb, VMCB_NPT); break; case MSR_IA32_SPEC_CTRL: @@ -2872,7 +2795,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) if (kvm_spec_ctrl_test_value(data)) return 1; - svm->spec_ctrl = data; + if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) + svm->vmcb->save.spec_ctrl = data; + else + svm->spec_ctrl = data; if (!data) break; @@ -2915,44 +2841,70 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->virt_spec_ctrl = data; break; case MSR_STAR: - svm->vmcb->save.star = data; + svm->vmcb01.ptr->save.star = data; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: - svm->vmcb->save.lstar = data; + svm->vmcb01.ptr->save.lstar = data; break; case MSR_CSTAR: - svm->vmcb->save.cstar = data; + svm->vmcb01.ptr->save.cstar = data; break; case MSR_KERNEL_GS_BASE: - svm->vmcb->save.kernel_gs_base = data; + svm->vmcb01.ptr->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: - svm->vmcb->save.sfmask = data; + svm->vmcb01.ptr->save.sfmask = data; break; #endif case MSR_IA32_SYSENTER_CS: - svm->vmcb->save.sysenter_cs = data; + svm->vmcb01.ptr->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: - svm->sysenter_eip = data; - svm->vmcb->save.sysenter_eip = data; + svm->vmcb01.ptr->save.sysenter_eip = (u32)data; + /* + * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs + * when we spoof an Intel vendor ID (for cross vendor migration). + * In this case we use this intercept to track the high + * 32 bit part of these msrs to support Intel's + * implementation of SYSENTER/SYSEXIT. + */ + svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; break; case MSR_IA32_SYSENTER_ESP: - svm->sysenter_esp = data; - svm->vmcb->save.sysenter_esp = data; + svm->vmcb01.ptr->save.sysenter_esp = (u32)data; + svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; break; case MSR_TSC_AUX: if (!boot_cpu_has(X86_FEATURE_RDTSCP)) return 1; + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) + return 1; + /* - * This is rare, so we update the MSR here instead of using - * direct_access_msrs. Doing that would require a rdmsr in - * svm_vcpu_put. + * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has + * incomplete and conflicting architectural behavior. Current + * AMD CPUs completely ignore bits 63:32, i.e. they aren't + * reserved and always read as zeros. Emulate AMD CPU behavior + * to avoid explosions if the vCPU is migrated from an AMD host + * to an Intel host. */ + data = (u32)data; + + /* + * TSC_AUX is usually changed only during boot and never read + * directly. Intercept TSC_AUX instead of exposing it to the + * guest via direct_access_msrs, and switch it via user return. + */ + preempt_disable(); + r = kvm_set_user_return_msr(TSC_AUX_URET_SLOT, data, -1ull); + preempt_enable(); + if (r) + return 1; + svm->tsc_aux = data; - wrmsrl(MSR_TSC_AUX, svm->tsc_aux); break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { @@ -3006,38 +2958,32 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) return 0; } -static int wrmsr_interception(struct vcpu_svm *svm) -{ - return kvm_emulate_wrmsr(&svm->vcpu); -} - -static int msr_interception(struct vcpu_svm *svm) +static int msr_interception(struct kvm_vcpu *vcpu) { - if (svm->vmcb->control.exit_info_1) - return wrmsr_interception(svm); + if (to_svm(vcpu)->vmcb->control.exit_info_1) + return kvm_emulate_wrmsr(vcpu); else - return rdmsr_interception(svm); + return kvm_emulate_rdmsr(vcpu); } -static int interrupt_window_interception(struct vcpu_svm *svm) +static int interrupt_window_interception(struct kvm_vcpu *vcpu) { - kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); - svm_clear_vintr(svm); + kvm_make_request(KVM_REQ_EVENT, vcpu); + svm_clear_vintr(to_svm(vcpu)); /* * For AVIC, the only reason to end up here is ExtINTs. * In this case AVIC was temporarily disabled for * requesting the IRQ window and we have to re-enable it. */ - svm_toggle_avic_for_irq_window(&svm->vcpu, true); + svm_toggle_avic_for_irq_window(vcpu, true); - ++svm->vcpu.stat.irq_window_exits; + ++vcpu->stat.irq_window_exits; return 1; } -static int pause_interception(struct vcpu_svm *svm) +static int pause_interception(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = &svm->vcpu; bool in_kernel; /* @@ -3045,35 +2991,18 @@ static int pause_interception(struct vcpu_svm *svm) * vcpu->arch.preempted_in_kernel can never be true. Just * set in_kernel to false as well. */ - in_kernel = !sev_es_guest(svm->vcpu.kvm) && svm_get_cpl(vcpu) == 0; + in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; if (!kvm_pause_in_guest(vcpu->kvm)) grow_ple_window(vcpu); kvm_vcpu_on_spin(vcpu, in_kernel); - return 1; -} - -static int nop_interception(struct vcpu_svm *svm) -{ - return kvm_skip_emulated_instruction(&(svm->vcpu)); + return kvm_skip_emulated_instruction(vcpu); } -static int monitor_interception(struct vcpu_svm *svm) +static int invpcid_interception(struct kvm_vcpu *vcpu) { - printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); - return nop_interception(svm); -} - -static int mwait_interception(struct vcpu_svm *svm) -{ - printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); - return nop_interception(svm); -} - -static int invpcid_interception(struct vcpu_svm *svm) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; + struct vcpu_svm *svm = to_svm(vcpu); unsigned long type; gva_t gva; @@ -3098,7 +3027,7 @@ static int invpcid_interception(struct vcpu_svm *svm) return kvm_handle_invpcid(vcpu, type, gva); } -static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { +static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception, @@ -3133,15 +3062,15 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, - [SVM_EXIT_SMI] = nop_on_interception, - [SVM_EXIT_INIT] = nop_on_interception, + [SVM_EXIT_SMI] = kvm_emulate_as_nop, + [SVM_EXIT_INIT] = kvm_emulate_as_nop, [SVM_EXIT_VINTR] = interrupt_window_interception, - [SVM_EXIT_RDPMC] = rdpmc_interception, - [SVM_EXIT_CPUID] = cpuid_interception, + [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, + [SVM_EXIT_CPUID] = kvm_emulate_cpuid, [SVM_EXIT_IRET] = iret_interception, - [SVM_EXIT_INVD] = invd_interception, + [SVM_EXIT_INVD] = kvm_emulate_invd, [SVM_EXIT_PAUSE] = pause_interception, - [SVM_EXIT_HLT] = halt_interception, + [SVM_EXIT_HLT] = kvm_emulate_halt, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPGA] = invlpga_interception, [SVM_EXIT_IOIO] = io_interception, @@ -3149,17 +3078,17 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = vmrun_interception, - [SVM_EXIT_VMMCALL] = vmmcall_interception, + [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, - [SVM_EXIT_WBINVD] = wbinvd_interception, - [SVM_EXIT_MONITOR] = monitor_interception, - [SVM_EXIT_MWAIT] = mwait_interception, - [SVM_EXIT_XSETBV] = xsetbv_interception, - [SVM_EXIT_RDPRU] = rdpru_interception, + [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, + [SVM_EXIT_MONITOR] = kvm_emulate_monitor, + [SVM_EXIT_MWAIT] = kvm_emulate_mwait, + [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, + [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, @@ -3177,6 +3106,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; + struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; if (!dump_invalid_vmcb) { pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); @@ -3239,28 +3169,28 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) save->ds.limit, save->ds.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "fs:", - save->fs.selector, save->fs.attrib, - save->fs.limit, save->fs.base); + save01->fs.selector, save01->fs.attrib, + save01->fs.limit, save01->fs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gs:", - save->gs.selector, save->gs.attrib, - save->gs.limit, save->gs.base); + save01->gs.selector, save01->gs.attrib, + save01->gs.limit, save01->gs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gdtr:", save->gdtr.selector, save->gdtr.attrib, save->gdtr.limit, save->gdtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ldtr:", - save->ldtr.selector, save->ldtr.attrib, - save->ldtr.limit, save->ldtr.base); + save01->ldtr.selector, save01->ldtr.attrib, + save01->ldtr.limit, save01->ldtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "idtr:", save->idtr.selector, save->idtr.attrib, save->idtr.limit, save->idtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "tr:", - save->tr.selector, save->tr.attrib, - save->tr.limit, save->tr.base); + save01->tr.selector, save01->tr.attrib, + save01->tr.limit, save01->tr.base); pr_err("cpl: %d efer: %016llx\n", save->cpl, save->efer); pr_err("%-15s %016llx %-13s %016llx\n", @@ -3274,15 +3204,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) pr_err("%-15s %016llx %-13s %016llx\n", "rsp:", save->rsp, "rax:", save->rax); pr_err("%-15s %016llx %-13s %016llx\n", - "star:", save->star, "lstar:", save->lstar); + "star:", save01->star, "lstar:", save01->lstar); pr_err("%-15s %016llx %-13s %016llx\n", - "cstar:", save->cstar, "sfmask:", save->sfmask); + "cstar:", save01->cstar, "sfmask:", save01->sfmask); pr_err("%-15s %016llx %-13s %016llx\n", - "kernel_gs_base:", save->kernel_gs_base, - "sysenter_cs:", save->sysenter_cs); + "kernel_gs_base:", save01->kernel_gs_base, + "sysenter_cs:", save01->sysenter_cs); pr_err("%-15s %016llx %-13s %016llx\n", - "sysenter_esp:", save->sysenter_esp, - "sysenter_eip:", save->sysenter_eip); + "sysenter_esp:", save01->sysenter_esp, + "sysenter_eip:", save01->sysenter_eip); pr_err("%-15s %016llx %-13s %016llx\n", "gpat:", save->g_pat, "dbgctl:", save->dbgctl); pr_err("%-15s %016llx %-13s %016llx\n", @@ -3309,24 +3239,24 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) return -EINVAL; } -int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code) +int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) { - if (svm_handle_invalid_exit(&svm->vcpu, exit_code)) + if (svm_handle_invalid_exit(vcpu, exit_code)) return 0; #ifdef CONFIG_RETPOLINE if (exit_code == SVM_EXIT_MSR) - return msr_interception(svm); + return msr_interception(vcpu); else if (exit_code == SVM_EXIT_VINTR) - return interrupt_window_interception(svm); + return interrupt_window_interception(vcpu); else if (exit_code == SVM_EXIT_INTR) - return intr_interception(svm); + return intr_interception(vcpu); else if (exit_code == SVM_EXIT_HLT) - return halt_interception(svm); + return kvm_emulate_halt(vcpu); else if (exit_code == SVM_EXIT_NPF) - return npf_interception(svm); + return npf_interception(vcpu); #endif - return svm_exit_handlers[exit_code](svm); + return svm_exit_handlers[exit_code](vcpu); } static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, @@ -3395,7 +3325,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (exit_fastpath != EXIT_FASTPATH_NONE) return 1; - return svm_invoke_exit_handler(svm, exit_code); + return svm_invoke_exit_handler(vcpu, exit_code); } static void reload_tss(struct kvm_vcpu *vcpu) @@ -3406,15 +3336,27 @@ static void reload_tss(struct kvm_vcpu *vcpu) load_TR_desc(); } -static void pre_svm_run(struct vcpu_svm *svm) +static void pre_svm_run(struct kvm_vcpu *vcpu) { - struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + struct vcpu_svm *svm = to_svm(vcpu); + + /* + * If the previous vmrun of the vmcb occurred on a different physical + * cpu, then mark the vmcb dirty and assign a new asid. Hardware's + * vmcb clean bits are per logical CPU, as are KVM's asid assignments. + */ + if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { + svm->current_vmcb->asid_generation = 0; + vmcb_mark_all_dirty(svm->vmcb); + svm->current_vmcb->cpu = vcpu->cpu; + } - if (sev_guest(svm->vcpu.kvm)) - return pre_sev_run(svm, svm->vcpu.cpu); + if (sev_guest(vcpu->kvm)) + return pre_sev_run(svm, vcpu->cpu); /* FIXME: handle wraparound of asid_generation */ - if (svm->asid_generation != sd->asid_generation) + if (svm->current_vmcb->asid_generation != sd->asid_generation) new_asid(svm, sd); } @@ -3424,7 +3366,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; - if (!sev_es_guest(svm->vcpu.kvm)) + if (!sev_es_guest(vcpu->kvm)) svm_set_intercept(svm, INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } @@ -3478,7 +3420,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu) return false; ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || - (svm->vcpu.arch.hflags & HF_NMI_MASK); + (vcpu->arch.hflags & HF_NMI_MASK); return ret; } @@ -3498,9 +3440,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); - - return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); + return !!(vcpu->arch.hflags & HF_NMI_MASK); } static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) @@ -3508,12 +3448,12 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) struct vcpu_svm *svm = to_svm(vcpu); if (masked) { - svm->vcpu.arch.hflags |= HF_NMI_MASK; - if (!sev_es_guest(svm->vcpu.kvm)) + vcpu->arch.hflags |= HF_NMI_MASK; + if (!sev_es_guest(vcpu->kvm)) svm_set_intercept(svm, INTERCEPT_IRET); } else { - svm->vcpu.arch.hflags &= ~HF_NMI_MASK; - if (!sev_es_guest(svm->vcpu.kvm)) + vcpu->arch.hflags &= ~HF_NMI_MASK; + if (!sev_es_guest(vcpu->kvm)) svm_clr_intercept(svm, INTERCEPT_IRET); } } @@ -3526,7 +3466,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) if (!gif_set(svm)) return true; - if (sev_es_guest(svm->vcpu.kvm)) { + if (sev_es_guest(vcpu->kvm)) { /* * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask * bit to determine the state of the IF flag. @@ -3536,7 +3476,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) } else if (is_guest_mode(vcpu)) { /* As long as interrupts are being delivered... */ if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) - ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF) + ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) return true; @@ -3595,8 +3535,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) - == HF_NMI_MASK) + if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) return; /* IRET will cause a vm exit */ if (!gif_set(svm)) { @@ -3638,7 +3577,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu) if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else - svm->asid_generation--; + svm->current_vmcb->asid_generation--; } static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) @@ -3675,8 +3614,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; } -static void svm_complete_interrupts(struct vcpu_svm *svm) +static void svm_complete_interrupts(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); u8 vector; int type; u32 exitintinfo = svm->vmcb->control.exit_int_info; @@ -3688,28 +3628,28 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) * If we've made progress since setting HF_IRET_MASK, we've * executed an IRET and can allow NMI injection. */ - if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && - (sev_es_guest(svm->vcpu.kvm) || - kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) { - svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); - kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); + if ((vcpu->arch.hflags & HF_IRET_MASK) && + (sev_es_guest(vcpu->kvm) || + kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { + vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); + kvm_make_request(KVM_REQ_EVENT, vcpu); } - svm->vcpu.arch.nmi_injected = false; - kvm_clear_exception_queue(&svm->vcpu); - kvm_clear_interrupt_queue(&svm->vcpu); + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); if (!(exitintinfo & SVM_EXITINTINFO_VALID)) return; - kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); + kvm_make_request(KVM_REQ_EVENT, vcpu); vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; switch (type) { case SVM_EXITINTINFO_TYPE_NMI: - svm->vcpu.arch.nmi_injected = true; + vcpu->arch.nmi_injected = true; break; case SVM_EXITINTINFO_TYPE_EXEPT: /* @@ -3725,21 +3665,20 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) */ if (kvm_exception_is_soft(vector)) { if (vector == BP_VECTOR && int3_injected && - kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) - kvm_rip_write(&svm->vcpu, - kvm_rip_read(&svm->vcpu) - - int3_injected); + kvm_is_linear_rip(vcpu, svm->int3_rip)) + kvm_rip_write(vcpu, + kvm_rip_read(vcpu) - int3_injected); break; } if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { u32 err = svm->vmcb->control.exit_int_info_err; - kvm_requeue_exception_e(&svm->vcpu, vector, err); + kvm_requeue_exception_e(vcpu, vector, err); } else - kvm_requeue_exception(&svm->vcpu, vector); + kvm_requeue_exception(vcpu, vector); break; case SVM_EXITINTINFO_TYPE_INTR: - kvm_queue_interrupt(&svm->vcpu, vector, false); + kvm_queue_interrupt(vcpu, vector, false); break; default: break; @@ -3754,7 +3693,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) control->exit_int_info = control->event_inj; control->exit_int_info_err = control->event_inj_err; control->event_inj = 0; - svm_complete_interrupts(svm); + svm_complete_interrupts(vcpu); } static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) @@ -3766,57 +3705,32 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) return EXIT_FASTPATH_NONE; } -static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, - struct vcpu_svm *svm) +static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) { - /* - * VMENTER enables interrupts (host state), but the kernel state is - * interrupts disabled when this is invoked. Also tell RCU about - * it. This is the same logic as for exit_to_user_mode(). - * - * This ensures that e.g. latency analysis on the host observes - * guest mode as interrupt enabled. - * - * guest_enter_irqoff() informs context tracking about the - * transition to guest mode and if enabled adjusts RCU state - * accordingly. - */ - instrumentation_begin(); - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - instrumentation_end(); + struct vcpu_svm *svm = to_svm(vcpu); + unsigned long vmcb_pa = svm->current_vmcb->pa; - guest_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); + kvm_guest_enter_irqoff(); - if (sev_es_guest(svm->vcpu.kvm)) { - __svm_sev_es_vcpu_run(svm->vmcb_pa); + if (sev_es_guest(vcpu->kvm)) { + __svm_sev_es_vcpu_run(vmcb_pa); } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); + /* + * Use a single vmcb (vmcb01 because it's always valid) for + * context switching guest state via VMLOAD/VMSAVE, that way + * the state doesn't need to be copied between vmcb01 and + * vmcb02 when switching vmcbs for nested virtualization. + */ + vmload(svm->vmcb01.pa); + __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs); + vmsave(svm->vmcb01.pa); vmload(__sme_page_pa(sd->save_area)); } - /* - * VMEXIT disables interrupts (host state), but tracing and lockdep - * have them in state 'on' as recorded before entering guest mode. - * Same as enter_from_user_mode(). - * - * guest_exit_irqoff() restores host context and reinstates RCU if - * enabled and required. - * - * This needs to be done before the below as native_read_msr() - * contains a tracepoint and x86_spec_ctrl_restore_host() calls - * into world and some more. - */ - lockdep_hardirqs_off(CALLER_ADDR0); - guest_exit_irqoff(); - - instrumentation_begin(); - trace_hardirqs_off_finish(); - instrumentation_end(); + kvm_guest_exit_irqoff(); } static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) @@ -3845,7 +3759,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) smp_send_reschedule(vcpu->cpu); } - pre_svm_run(svm); + pre_svm_run(vcpu); sync_lapic_to_cr8(vcpu); @@ -3859,7 +3773,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * Run with all-zero DR6 unless needed, so that we can get the exact cause * of a #DB. */ - if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) + if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) svm_set_dr6(svm, vcpu->arch.dr6); else svm_set_dr6(svm, DR6_ACTIVE_LOW); @@ -3875,9 +3789,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ - x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); - svm_vcpu_enter_exit(vcpu, svm); + svm_vcpu_enter_exit(vcpu); /* * We do not use IBRS in the kernel. If this vCPU has used the @@ -3894,15 +3809,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * If the L02 MSR bitmap does not intercept the MSR, then we need to * save it. */ - if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) && + unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - if (!sev_es_guest(svm->vcpu.kvm)) + if (!sev_es_guest(vcpu->kvm)) reload_tss(vcpu); - x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); + if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); - if (!sev_es_guest(svm->vcpu.kvm)) { + if (!sev_es_guest(vcpu->kvm)) { vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; @@ -3910,7 +3827,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) } if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) - kvm_before_interrupt(&svm->vcpu); + kvm_before_interrupt(vcpu); kvm_load_host_xsave_state(vcpu); stgi(); @@ -3918,13 +3835,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) /* Any pending NMI will happen here */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) - kvm_after_interrupt(&svm->vcpu); + kvm_after_interrupt(vcpu); sync_cr8_to_lapic(vcpu); svm->next_rip = 0; - if (is_guest_mode(&svm->vcpu)) { - sync_nested_vmcb_control(svm); + if (is_guest_mode(vcpu)) { + nested_sync_control_from_vmcb02(svm); svm->nested.nested_run_pending = 0; } @@ -3933,7 +3850,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) - svm->vcpu.arch.apf.host_apf_flags = + vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); if (npt_enabled) { @@ -3947,9 +3864,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + MC_VECTOR)) - svm_handle_mce(svm); + svm_handle_mce(vcpu); - svm_complete_interrupts(svm); + svm_complete_interrupts(vcpu); if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; @@ -3957,21 +3874,26 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) return svm_exit_handlers_fastpath(vcpu); } -static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root, +static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) { struct vcpu_svm *svm = to_svm(vcpu); unsigned long cr3; - cr3 = __sme_set(root); if (npt_enabled) { - svm->vmcb->control.nested_cr3 = cr3; + svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); vmcb_mark_dirty(svm->vmcb, VMCB_NPT); /* Loading L2's CR3 is handled by enter_svm_guest_mode. */ if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) return; cr3 = vcpu->arch.cr3; + } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { + cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); + } else { + /* PCID in the guest should be impossible with a 32-bit MMU. */ + WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); + cr3 = root_hpa; } svm->vmcb->save.cr3 = cr3; @@ -4048,7 +3970,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) /* Update nrips enabled cache */ svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && - guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); + guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); /* Check again if INVPCID interception if required */ svm_check_invpcid(svm); @@ -4060,24 +3982,50 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); } - if (!kvm_vcpu_apicv_active(vcpu)) - return; + if (kvm_vcpu_apicv_active(vcpu)) { + /* + * AVIC does not work with an x2APIC mode guest. If the X2APIC feature + * is exposed to the guest, disable AVIC. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) + kvm_request_apicv_update(vcpu->kvm, false, + APICV_INHIBIT_REASON_X2APIC); - /* - * AVIC does not work with an x2APIC mode guest. If the X2APIC feature - * is exposed to the guest, disable AVIC. - */ - if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) - kvm_request_apicv_update(vcpu->kvm, false, - APICV_INHIBIT_REASON_X2APIC); + /* + * Currently, AVIC does not work with nested virtualization. + * So, we disable AVIC when cpuid for SVM is set in the L1 guest. + */ + if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) + kvm_request_apicv_update(vcpu->kvm, false, + APICV_INHIBIT_REASON_NESTED); + } - /* - * Currently, AVIC does not work with nested virtualization. - * So, we disable AVIC when cpuid for SVM is set in the L1 guest. - */ - if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) - kvm_request_apicv_update(vcpu->kvm, false, - APICV_INHIBIT_REASON_NESTED); + if (guest_cpuid_is_intel(vcpu)) { + /* + * We must intercept SYSENTER_EIP and SYSENTER_ESP + * accesses because the processor only stores 32 bits. + * For the same reason we cannot use virtual VMLOAD/VMSAVE. + */ + svm_set_intercept(svm, INTERCEPT_VMLOAD); + svm_set_intercept(svm, INTERCEPT_VMSAVE); + svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; + + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); + } else { + /* + * If hardware supports Virtual VMLOAD VMSAVE then enable it + * in VMCB and clear intercepts to avoid #VMEXIT. + */ + if (vls) { + svm_clr_intercept(svm, INTERCEPT_VMLOAD); + svm_clr_intercept(svm, INTERCEPT_VMSAVE); + svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; + } + /* No need to intercept these MSRs */ + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); + } } static bool svm_has_wbinvd_exit(void) @@ -4349,15 +4297,15 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) if (!(saved_efer & EFER_SVME)) return 1; - if (kvm_vcpu_map(&svm->vcpu, + if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) return 1; if (svm_allocate_nested(svm)) return 1; - ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva); + kvm_vcpu_unmap(vcpu, &map, true); } } @@ -4400,7 +4348,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i * * This happens because CPU microcode reading instruction bytes * uses a special opcode which attempts to read data using CPL=0 - * priviledges. The microcode reads CS:RIP and if it hits a SMAP + * privileges. The microcode reads CS:RIP and if it hits a SMAP * fault, it gives up and returns no instruction bytes. * * Detection: @@ -4612,6 +4560,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .vm_copy_enc_context_from = svm_vm_copy_asid_from, + .can_emulate_instruction = svm_can_emulate_instruction, .apic_init_signal_blocked = svm_apic_init_signal_blocked, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 39e071fdab0c..84b3133c2251 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -23,12 +23,10 @@ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) -static const u32 host_save_user_msrs[] = { - MSR_TSC_AUX, -}; -#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) +#define IOPM_SIZE PAGE_SIZE * 3 +#define MSRPM_SIZE PAGE_SIZE * 2 -#define MAX_DIRECT_ACCESS_MSRS 18 +#define MAX_DIRECT_ACCESS_MSRS 20 #define MSRPM_OFFSETS 16 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; @@ -65,6 +63,8 @@ struct kvm_sev_info { unsigned long pages_locked; /* Number of pages locked */ struct list_head regions_list; /* List of registered regions */ u64 ap_jump_table; /* SEV-ES AP Jump Table address */ + struct kvm *enc_context_owner; /* Owner of copied encryption context */ + struct misc_cg *misc_cg; /* For misc cgroup accounting */ }; struct kvm_svm { @@ -81,11 +81,19 @@ struct kvm_svm { struct kvm_vcpu; +struct kvm_vmcb_info { + struct vmcb *ptr; + unsigned long pa; + int cpu; + uint64_t asid_generation; +}; + struct svm_nested_state { - struct vmcb *hsave; + struct kvm_vmcb_info vmcb02; u64 hsave_msr; u64 vm_cr_msr; u64 vmcb12_gpa; + u64 last_vmcb12_gpa; /* These are the merged vectors */ u32 *msrpm; @@ -102,21 +110,20 @@ struct svm_nested_state { struct vcpu_svm { struct kvm_vcpu vcpu; + /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ struct vmcb *vmcb; - unsigned long vmcb_pa; + struct kvm_vmcb_info vmcb01; + struct kvm_vmcb_info *current_vmcb; struct svm_cpu_data *svm_data; u32 asid; - uint64_t asid_generation; - uint64_t sysenter_esp; - uint64_t sysenter_eip; + u32 sysenter_esp_hi; + u32 sysenter_eip_hi; uint64_t tsc_aux; u64 msr_decfg; u64 next_rip; - u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; - u64 spec_ctrl; /* * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be @@ -239,17 +246,14 @@ static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) vmcb->control.clean &= ~(1 << bit); } -static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) +static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) { - return container_of(vcpu, struct vcpu_svm, vcpu); + return !test_bit(bit, (unsigned long *)&vmcb->control.clean); } -static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) +static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { - if (is_guest_mode(&svm->vcpu)) - return svm->nested.hsave; - else - return svm->vmcb; + return container_of(vcpu, struct vcpu_svm, vcpu); } static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) @@ -272,7 +276,7 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) static inline void set_dr_intercepts(struct vcpu_svm *svm) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; if (!sev_es_guest(svm->vcpu.kvm)) { vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); @@ -299,7 +303,7 @@ static inline void set_dr_intercepts(struct vcpu_svm *svm) static inline void clr_dr_intercepts(struct vcpu_svm *svm) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; vmcb->control.intercepts[INTERCEPT_DR] = 0; @@ -314,7 +318,7 @@ static inline void clr_dr_intercepts(struct vcpu_svm *svm) static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; WARN_ON_ONCE(bit >= 32); vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); @@ -324,7 +328,7 @@ static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; WARN_ON_ONCE(bit >= 32); vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); @@ -334,7 +338,7 @@ static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; vmcb_set_intercept(&vmcb->control, bit); @@ -343,7 +347,7 @@ static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) { - struct vmcb *vmcb = get_host_vmcb(svm); + struct vmcb *vmcb = svm->vmcb01.ptr; vmcb_clr_intercept(&vmcb->control, bit); @@ -387,8 +391,6 @@ static inline bool gif_set(struct vcpu_svm *svm) /* svm.c */ #define MSR_INVALID 0xffffffffU -extern int sev; -extern int sev_es; extern bool dump_invalid_vmcb; u32 svm_msrpm_offset(u32 msr); @@ -405,7 +407,7 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu); bool svm_nmi_blocked(struct kvm_vcpu *vcpu); bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); void svm_set_gif(struct vcpu_svm *svm, bool value); -int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code); +int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write); @@ -437,20 +439,30 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); } -int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, - struct vmcb *nested_vmcb); +int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); void svm_leave_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm); -int nested_svm_vmrun(struct vcpu_svm *svm); +int nested_svm_vmrun(struct kvm_vcpu *vcpu); void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); int nested_svm_vmexit(struct vcpu_svm *svm); + +static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) +{ + svm->vmcb->control.exit_code = exit_code; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; + return nested_svm_vmexit(svm); +} + int nested_svm_exit_handled(struct vcpu_svm *svm); -int nested_svm_check_permissions(struct vcpu_svm *svm); +int nested_svm_check_permissions(struct kvm_vcpu *vcpu); int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); -void sync_nested_vmcb_control(struct vcpu_svm *svm); +void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); +void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); +void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); extern struct kvm_x86_nested_ops svm_nested_ops; @@ -491,8 +503,8 @@ void avic_vm_destroy(struct kvm *kvm); int avic_vm_init(struct kvm *kvm); void avic_init_vmcb(struct vcpu_svm *svm); void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate); -int avic_incomplete_ipi_interception(struct vcpu_svm *svm); -int avic_unaccelerated_access_interception(struct vcpu_svm *svm); +int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); +int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); int avic_init_vcpu(struct vcpu_svm *svm); void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void avic_vcpu_put(struct kvm_vcpu *vcpu); @@ -550,22 +562,20 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); extern unsigned int max_sev_asid; -static inline bool svm_sev_enabled(void) -{ - return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0; -} - void sev_vm_destroy(struct kvm *kvm); int svm_mem_enc_op(struct kvm *kvm, void __user *argp); int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); void pre_sev_run(struct vcpu_svm *svm, int cpu); +void __init sev_set_cpu_caps(void); void __init sev_hardware_setup(void); void sev_hardware_teardown(void); +int sev_cpu_init(struct svm_cpu_data *sd); void sev_free_vcpu(struct kvm_vcpu *vcpu); -int sev_handle_vmgexit(struct vcpu_svm *svm); +int sev_handle_vmgexit(struct kvm_vcpu *vcpu); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm); diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 6feb8c08f45a..4fa17df123cd 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -79,28 +79,10 @@ SYM_FUNC_START(__svm_vcpu_run) /* Enter guest mode */ sti -1: vmload %_ASM_AX - jmp 3f -2: cmpb $0, kvm_rebooting - jne 3f - ud2 - _ASM_EXTABLE(1b, 2b) -3: vmrun %_ASM_AX - jmp 5f -4: cmpb $0, kvm_rebooting - jne 5f - ud2 - _ASM_EXTABLE(3b, 4b) +1: vmrun %_ASM_AX -5: vmsave %_ASM_AX - jmp 7f -6: cmpb $0, kvm_rebooting - jne 7f - ud2 - _ASM_EXTABLE(5b, 6b) -7: - cli +2: cli #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ @@ -167,6 +149,13 @@ SYM_FUNC_START(__svm_vcpu_run) #endif pop %_ASM_BP ret + +3: cmpb $0, kvm_rebooting + jne 2b + ud2 + + _ASM_EXTABLE(1b, 3b) + SYM_FUNC_END(__svm_vcpu_run) /** @@ -186,18 +175,15 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) #endif push %_ASM_BX - /* Enter guest mode */ + /* Move @vmcb to RAX. */ mov %_ASM_ARG1, %_ASM_AX + + /* Enter guest mode */ sti 1: vmrun %_ASM_AX - jmp 3f -2: cmpb $0, kvm_rebooting - jne 3f - ud2 - _ASM_EXTABLE(1b, 2b) -3: cli +2: cli #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ @@ -217,4 +203,11 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) #endif pop %_ASM_BP ret + +3: cmpb $0, kvm_rebooting + jne 2b + ud2 + + _ASM_EXTABLE(1b, 3b) + SYM_FUNC_END(__svm_sev_es_vcpu_run) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index bcca0b80e0d0..bced76637823 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -11,6 +11,7 @@ #include "mmu.h" #include "nested.h" #include "pmu.h" +#include "sgx.h" #include "trace.h" #include "vmx.h" #include "x86.h" @@ -21,13 +22,7 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); static bool __read_mostly nested_early_check = 0; module_param(nested_early_check, bool, S_IRUGO); -#define CC(consistency_check) \ -({ \ - bool failed = (consistency_check); \ - if (failed) \ - trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ - failed; \ -}) +#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK /* * Hyper-V requires all of these, so mark them as supported even though @@ -619,6 +614,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, } /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ +#ifdef CONFIG_X86_64 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, MSR_FS_BASE, MSR_TYPE_RW); @@ -627,6 +623,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); +#endif /* * Checking the L0->L1 bitmap is trying to verify two things: @@ -2306,6 +2303,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; + if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) + vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); + secondary_exec_controls_set(vmx, exec_control); } @@ -3453,6 +3453,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); enum nested_evmptrld_status evmptrld_status; + ++vcpu->stat.nested_run; + if (!nested_vmx_check_permission(vcpu)) return 1; @@ -3537,7 +3539,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * snapshot restore (migration). * * In this flow, it is assumed that vmcs12 cache was - * trasferred as part of captured nVMX state and should + * transferred as part of captured nVMX state and should * therefore not be read from guest memory (which may not * exist on destination host yet). */ @@ -3810,9 +3812,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) /* * Process any exceptions that are not debug traps before MTF. + * + * Note that only a pending nested run can block a pending exception. + * Otherwise an injected NMI/interrupt should either be + * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO, + * while delivering the pending exception. */ + if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) { - if (block_nested_events) + if (vmx->nested.nested_run_pending) return -EBUSY; if (!nested_vmx_check_exception(vcpu, &exit_qual)) goto no_vmexit; @@ -3829,7 +3837,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) } if (vcpu->arch.exception.pending) { - if (block_nested_events) + if (vmx->nested.nested_run_pending) return -EBUSY; if (!nested_vmx_check_exception(vcpu, &exit_qual)) goto no_vmexit; @@ -4105,6 +4113,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { /* update exit information fields: */ vmcs12->vm_exit_reason = vm_exit_reason; + if (to_vmx(vcpu)->exit_reason.enclave_mode) + vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; @@ -4422,6 +4432,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); + /* Similarly, triple faults in L2 should never escape. */ + WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); + kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); /* Service the TLB flush request for L2 before switching to L1. */ @@ -4558,6 +4571,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx->fail = 0; } +static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) +{ + nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); +} + /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). @@ -5005,7 +5023,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) return nested_vmx_failInvalid(vcpu); /* Decode instruction info and find the field to read */ - field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); + field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); offset = vmcs_field_to_offset(field); if (offset < 0) @@ -5023,7 +5041,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (instr_info & BIT(10)) { - kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); + kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); } else { len = is_64_bit_mode(vcpu) ? 8 : 4; if (get_vmx_mem_address(vcpu, exit_qualification, @@ -5097,7 +5115,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) return nested_vmx_failInvalid(vcpu); if (instr_info & BIT(10)) - value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); + value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); else { len = is_64_bit_mode(vcpu) ? 8 : 4; if (get_vmx_mem_address(vcpu, exit_qualification, @@ -5108,7 +5126,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) return kvm_handle_memory_failure(vcpu, r, &e); } - field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); + field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); offset = vmcs_field_to_offset(field); if (offset < 0) @@ -5305,7 +5323,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; @@ -5385,7 +5403,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.msrs.vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; @@ -5479,16 +5497,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, if (!nested_vmx_check_eptp(vcpu, new_eptp)) return 1; - kvm_mmu_unload(vcpu); mmu->ept_ad = accessed_dirty; mmu->mmu_role.base.ad_disabled = !accessed_dirty; vmcs12->ept_pointer = new_eptp; - /* - * TODO: Check what's the correct approach in case - * mmu reload fails. Currently, we just let the next - * reload potentially fail - */ - kvm_mmu_reload(vcpu); + + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); } return 0; @@ -5646,7 +5659,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ reg = (exit_qualification >> 8) & 15; - val = kvm_register_readl(vcpu, reg); + val = kvm_register_read(vcpu, reg); switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & @@ -5705,6 +5718,21 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, return false; } +static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 encls_leaf; + + if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || + !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) + return false; + + encls_leaf = kvm_rax_read(vcpu); + if (encls_leaf > 62) + encls_leaf = 63; + return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); +} + static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap) { @@ -5801,9 +5829,6 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, case EXIT_REASON_VMFUNC: /* VM functions are emulated through L2->L0 vmexits. */ return true; - case EXIT_REASON_ENCLS: - /* SGX is never exposed to L1 */ - return true; default: break; } @@ -5927,6 +5952,8 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, case EXIT_REASON_TPAUSE: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); + case EXIT_REASON_ENCLS: + return nested_vmx_exit_handled_encls(vcpu, vmcs12); default: return true; } @@ -6502,6 +6529,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) msrs->secondary_ctls_high |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if (enable_sgx) + msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; + /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, msrs->misc_low, @@ -6599,6 +6629,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) struct kvm_x86_nested_ops vmx_nested_ops = { .check_events = vmx_check_nested_events, .hv_timer_pending = nested_vmx_preemption_timer_pending, + .triple_fault = nested_vmx_triple_fault, .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state, .get_nested_state_pages = vmx_get_nested_state_pages, diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 197148d76b8f..184418baeb3c 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -244,6 +244,11 @@ static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu) PIN_BASED_EXT_INTR_MASK; } +static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING); +} + /* * if fixed0[i] == 1: val[i] must be 1 * if fixed1[i] == 0: val[i] must be 0 diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 4831bc44ce66..459748680daf 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -10,7 +10,7 @@ #include "vmx.h" /* - * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we + * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c new file mode 100644 index 000000000000..6693ebdc0770 --- /dev/null +++ b/arch/x86/kvm/vmx/sgx.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. */ + +#include <asm/sgx.h> + +#include "cpuid.h" +#include "kvm_cache_regs.h" +#include "nested.h" +#include "sgx.h" +#include "vmx.h" +#include "x86.h" + +bool __read_mostly enable_sgx = 1; +module_param_named(sgx, enable_sgx, bool, 0444); + +/* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */ +static u64 sgx_pubkey_hash[4] __ro_after_init; + +/* + * ENCLS's memory operands use a fixed segment (DS) and a fixed + * address size based on the mode. Related prefixes are ignored. + */ +static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, + int size, int alignment, gva_t *gva) +{ + struct kvm_segment s; + bool fault; + + /* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */ + *gva = offset; + if (!is_long_mode(vcpu)) { + vmx_get_segment(vcpu, &s, VCPU_SREG_DS); + *gva += s.base; + } + + if (!IS_ALIGNED(*gva, alignment)) { + fault = true; + } else if (likely(is_long_mode(vcpu))) { + fault = is_noncanonical_address(*gva, vcpu); + } else { + *gva &= 0xffffffff; + fault = (s.unusable) || + (s.type != 2 && s.type != 3) || + (*gva > s.limit) || + ((s.base != 0 || s.limit != 0xffffffff) && + (((u64)*gva + size - 1) > s.limit + 1)); + } + if (fault) + kvm_inject_gp(vcpu, 0); + return fault ? -EINVAL : 0; +} + +static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, + unsigned int size) +{ + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 2; + vcpu->run->internal.data[0] = addr; + vcpu->run->internal.data[1] = size; +} + +static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, + unsigned int size) +{ + if (__copy_from_user(data, (void __user *)hva, size)) { + sgx_handle_emulation_failure(vcpu, hva, size); + return -EFAULT; + } + + return 0; +} + +static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, + gpa_t *gpa) +{ + struct x86_exception ex; + + if (write) + *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); + else + *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); + + if (*gpa == UNMAPPED_GVA) { + kvm_inject_emulated_page_fault(vcpu, &ex); + return -EFAULT; + } + + return 0; +} + +static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) +{ + *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); + if (kvm_is_error_hva(*hva)) { + sgx_handle_emulation_failure(vcpu, gpa, 1); + return -EFAULT; + } + + *hva |= gpa & ~PAGE_MASK; + + return 0; +} + +static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) +{ + struct x86_exception ex; + + /* + * A non-EPCM #PF indicates a bad userspace HVA. This *should* check + * for PFEC.SGX and not assume any #PF on SGX2 originated in the EPC, + * but the error code isn't (yet) plumbed through the ENCLS helpers. + */ + if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; + } + + /* + * If the guest thinks it's running on SGX2 hardware, inject an SGX + * #PF if the fault matches an EPCM fault signature (#GP on SGX1, + * #PF on SGX2). The assumption is that EPCM faults are much more + * likely than a bad userspace address. + */ + if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) && + guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) { + memset(&ex, 0, sizeof(ex)); + ex.vector = PF_VECTOR; + ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK | + PFERR_SGX_MASK; + ex.address = gva; + ex.error_code_valid = true; + ex.nested_page_fault = false; + kvm_inject_page_fault(vcpu, &ex); + } else { + kvm_inject_gp(vcpu, 0); + } + return 1; +} + +static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, + struct sgx_pageinfo *pageinfo, + unsigned long secs_hva, + gva_t secs_gva) +{ + struct sgx_secs *contents = (struct sgx_secs *)pageinfo->contents; + struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1; + u64 attributes, xfrm, size; + u32 miscselect; + u8 max_size_log2; + int trapnr, ret; + + sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0); + sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1); + if (!sgx_12_0 || !sgx_12_1) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; + } + + miscselect = contents->miscselect; + attributes = contents->attributes; + xfrm = contents->xfrm; + size = contents->size; + + /* Enforce restriction of access to the PROVISIONKEY. */ + if (!vcpu->kvm->arch.sgx_provisioning_allowed && + (attributes & SGX_ATTR_PROVISIONKEY)) { + if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY) + pr_warn_once("KVM: SGX PROVISIONKEY advertised but not allowed\n"); + kvm_inject_gp(vcpu, 0); + return 1; + } + + /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ + if ((u32)miscselect & ~sgx_12_0->ebx || + (u32)attributes & ~sgx_12_1->eax || + (u32)(attributes >> 32) & ~sgx_12_1->ebx || + (u32)xfrm & ~sgx_12_1->ecx || + (u32)(xfrm >> 32) & ~sgx_12_1->edx) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + /* Enforce CPUID restriction on max enclave size. */ + max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : + sgx_12_0->edx; + if (size >= BIT_ULL(max_size_log2)) + kvm_inject_gp(vcpu, 0); + + /* + * sgx_virt_ecreate() returns: + * 1) 0: ECREATE was successful + * 2) -EFAULT: ECREATE was run but faulted, and trapnr was set to the + * exception number. + * 3) -EINVAL: access_ok() on @secs_hva failed. This should never + * happen as KVM checks host addresses at memslot creation. + * sgx_virt_ecreate() has already warned in this case. + */ + ret = sgx_virt_ecreate(pageinfo, (void __user *)secs_hva, &trapnr); + if (!ret) + return kvm_skip_emulated_instruction(vcpu); + if (ret == -EFAULT) + return sgx_inject_fault(vcpu, secs_gva, trapnr); + + return ret; +} + +static int handle_encls_ecreate(struct kvm_vcpu *vcpu) +{ + gva_t pageinfo_gva, secs_gva; + gva_t metadata_gva, contents_gva; + gpa_t metadata_gpa, contents_gpa, secs_gpa; + unsigned long metadata_hva, contents_hva, secs_hva; + struct sgx_pageinfo pageinfo; + struct sgx_secs *contents; + struct x86_exception ex; + int r; + + if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || + sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) + return 1; + + /* + * Copy the PAGEINFO to local memory, its pointers need to be + * translated, i.e. we need to do a deep copy/translate. + */ + r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo, + sizeof(pageinfo), &ex); + if (r == X86EMUL_PROPAGATE_FAULT) { + kvm_inject_emulated_page_fault(vcpu, &ex); + return 1; + } else if (r != X86EMUL_CONTINUE) { + sgx_handle_emulation_failure(vcpu, pageinfo_gva, + sizeof(pageinfo)); + return 0; + } + + if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) || + sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096, + &contents_gva)) + return 1; + + /* + * Translate the SECINFO, SOURCE and SECS pointers from GVA to GPA. + * Resume the guest on failure to inject a #PF. + */ + if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) || + sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) || + sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa)) + return 1; + + /* + * ...and then to HVA. The order of accesses isn't architectural, i.e. + * KVM doesn't have to fully process one address at a time. Exit to + * userspace if a GPA is invalid. + */ + if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) || + sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) || + sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva)) + return 0; + + /* + * Copy contents into kernel memory to prevent TOCTOU attack. E.g. the + * guest could do ECREATE w/ SECS.SGX_ATTR_PROVISIONKEY=0, and + * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to + * enforce restriction of access to the PROVISIONKEY. + */ + contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!contents) + return -ENOMEM; + + /* Exit to userspace if copying from a host userspace address fails. */ + if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) { + free_page((unsigned long)contents); + return 0; + } + + pageinfo.metadata = metadata_hva; + pageinfo.contents = (u64)contents; + + r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva); + + free_page((unsigned long)contents); + + return r; +} + +static int handle_encls_einit(struct kvm_vcpu *vcpu) +{ + unsigned long sig_hva, secs_hva, token_hva, rflags; + struct vcpu_vmx *vmx = to_vmx(vcpu); + gva_t sig_gva, secs_gva, token_gva; + gpa_t sig_gpa, secs_gpa, token_gpa; + int ret, trapnr; + + if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || + sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || + sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva)) + return 1; + + /* + * Translate the SIGSTRUCT, SECS and TOKEN pointers from GVA to GPA. + * Resume the guest on failure to inject a #PF. + */ + if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) || + sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) || + sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa)) + return 1; + + /* + * ...and then to HVA. The order of accesses isn't architectural, i.e. + * KVM doesn't have to fully process one address at a time. Exit to + * userspace if a GPA is invalid. Note, all structures are aligned and + * cannot split pages. + */ + if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) || + sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) || + sgx_gpa_to_hva(vcpu, token_gpa, &token_hva)) + return 0; + + ret = sgx_virt_einit((void __user *)sig_hva, (void __user *)token_hva, + (void __user *)secs_hva, + vmx->msr_ia32_sgxlepubkeyhash, &trapnr); + + if (ret == -EFAULT) + return sgx_inject_fault(vcpu, secs_gva, trapnr); + + /* + * sgx_virt_einit() returns -EINVAL when access_ok() fails on @sig_hva, + * @token_hva or @secs_hva. This should never happen as KVM checks host + * addresses at memslot creation. sgx_virt_einit() has already warned + * in this case, so just return. + */ + if (ret < 0) + return ret; + + rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | + X86_EFLAGS_AF | X86_EFLAGS_SF | + X86_EFLAGS_OF); + if (ret) + rflags |= X86_EFLAGS_ZF; + else + rflags &= ~X86_EFLAGS_ZF; + vmx_set_rflags(vcpu, rflags); + + kvm_rax_write(vcpu, ret); + return kvm_skip_emulated_instruction(vcpu); +} + +static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) +{ + if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) + return false; + + if (leaf >= ECREATE && leaf <= ETRACK) + return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); + + if (leaf >= EAUG && leaf <= EMODT) + return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); + + return false; +} + +static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu) +{ + const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED; + + return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits; +} + +int handle_encls(struct kvm_vcpu *vcpu) +{ + u32 leaf = (u32)kvm_rax_read(vcpu); + + if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { + kvm_queue_exception(vcpu, UD_VECTOR); + } else if (!sgx_enabled_in_guest_bios(vcpu)) { + kvm_inject_gp(vcpu, 0); + } else { + if (leaf == ECREATE) + return handle_encls_ecreate(vcpu); + if (leaf == EINIT) + return handle_encls_einit(vcpu); + WARN(1, "KVM: unexpected exit on ENCLS[%u]", leaf); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; + return 0; + } + return 1; +} + +void setup_default_sgx_lepubkeyhash(void) +{ + /* + * Use Intel's default value for Skylake hardware if Launch Control is + * not supported, i.e. Intel's hash is hardcoded into silicon, or if + * Launch Control is supported and enabled, i.e. mimic the reset value + * and let the guest write the MSRs at will. If Launch Control is + * supported but disabled, then use the current MSR values as the hash + * MSRs exist but are read-only (locked and not writable). + */ + if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) || + rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { + sgx_pubkey_hash[0] = 0xa6053e051270b7acULL; + sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL; + sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL; + sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL; + } else { + /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */ + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); + } +} + +void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash, + sizeof(sgx_pubkey_hash)); +} + +/* + * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM + * restrictions if the guest's allowed-1 settings diverge from hardware. + */ +static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *guest_cpuid; + u32 eax, ebx, ecx, edx; + + if (!vcpu->kvm->arch.sgx_provisioning_allowed) + return true; + + guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0); + if (!guest_cpuid) + return true; + + cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx); + if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx) + return true; + + guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1); + if (!guest_cpuid) + return true; + + cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx); + if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx || + guest_cpuid->ecx != ecx || guest_cpuid->edx != edx) + return true; + + return false; +} + +void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + /* + * There is no software enable bit for SGX that is virtualized by + * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the + * guest (either by the host or by the guest's BIOS) but enabled in the + * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate + * the expected system behavior for ENCLS. + */ + u64 bitmap = -1ull; + + /* Nothing to do if hardware doesn't support SGX */ + if (!cpu_has_vmx_encls_vmexit()) + return; + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) && + sgx_enabled_in_guest_bios(vcpu)) { + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { + bitmap &= ~GENMASK_ULL(ETRACK, ECREATE); + if (sgx_intercept_encls_ecreate(vcpu)) + bitmap |= (1 << ECREATE); + } + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) + bitmap &= ~GENMASK_ULL(EMODT, EAUG); + + /* + * Trap and execute EINIT if launch control is enabled in the + * host using the guest's values for launch control MSRs, even + * if the guest's values are fixed to hardware default values. + * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing + * the MSRs is extraordinarily expensive. + */ + if (boot_cpu_has(X86_FEATURE_SGX_LC)) + bitmap |= (1 << EINIT); + + if (!vmcs12 && is_guest_mode(vcpu)) + vmcs12 = get_vmcs12(vcpu); + if (vmcs12 && nested_cpu_has_encls_exit(vmcs12)) + bitmap |= vmcs12->encls_exiting_bitmap; + } + vmcs_write64(ENCLS_EXITING_BITMAP, bitmap); +} diff --git a/arch/x86/kvm/vmx/sgx.h b/arch/x86/kvm/vmx/sgx.h new file mode 100644 index 000000000000..a400888b376d --- /dev/null +++ b/arch/x86/kvm/vmx/sgx.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_SGX_H +#define __KVM_X86_SGX_H + +#include <linux/kvm_host.h> + +#include "capabilities.h" +#include "vmx_ops.h" + +#ifdef CONFIG_X86_SGX_KVM +extern bool __read_mostly enable_sgx; + +int handle_encls(struct kvm_vcpu *vcpu); + +void setup_default_sgx_lepubkeyhash(void); +void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu); + +void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12); +#else +#define enable_sgx 0 + +static inline void setup_default_sgx_lepubkeyhash(void) { } +static inline void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) { } + +static inline void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + /* Nothing to do if hardware doesn't support SGX */ + if (cpu_has_vmx_encls_vmexit()) + vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); +} +#endif + +#endif /* __KVM_X86_SGX_H */ diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c index c8e51c004f78..034adb6404dc 100644 --- a/arch/x86/kvm/vmx/vmcs12.c +++ b/arch/x86/kvm/vmx/vmcs12.c @@ -50,6 +50,7 @@ const unsigned short vmcs_field_to_offset_table[] = { FIELD64(VMREAD_BITMAP, vmread_bitmap), FIELD64(VMWRITE_BITMAP, vmwrite_bitmap), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), + FIELD64(ENCLS_EXITING_BITMAP, encls_exiting_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h index 80232daf00ff..13494956d0e9 100644 --- a/arch/x86/kvm/vmx/vmcs12.h +++ b/arch/x86/kvm/vmx/vmcs12.h @@ -69,7 +69,8 @@ struct __packed vmcs12 { u64 vm_function_control; u64 eptp_list_address; u64 pml_address; - u64 padding64[3]; /* room for future expansion */ + u64 encls_exiting_bitmap; + u64 padding64[2]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have @@ -256,6 +257,7 @@ static inline void vmx_check_vmcs12_offsets(void) CHECK_OFFSET(vm_function_control, 296); CHECK_OFFSET(eptp_list_address, 304); CHECK_OFFSET(pml_address, 312); + CHECK_OFFSET(encls_exiting_bitmap, 320); CHECK_OFFSET(cr0_guest_host_mask, 344); CHECK_OFFSET(cr4_guest_host_mask, 352); CHECK_OFFSET(cr0_read_shadow, 360); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 32cf8287d4a7..d000cddbd734 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -36,6 +36,7 @@ #include <asm/debugreg.h> #include <asm/desc.h> #include <asm/fpu/internal.h> +#include <asm/idtentry.h> #include <asm/io.h> #include <asm/irq_remapping.h> #include <asm/kexec.h> @@ -57,6 +58,7 @@ #include "mmu.h" #include "nested.h" #include "pmu.h" +#include "sgx.h" #include "trace.h" #include "vmcs.h" #include "vmcs12.h" @@ -156,9 +158,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { MSR_IA32_SPEC_CTRL, MSR_IA32_PRED_CMD, MSR_IA32_TSC, +#ifdef CONFIG_X86_64 MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE, +#endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, @@ -361,8 +365,6 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = { module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); static u32 vmx_segment_access_rights(struct kvm_segment *var); -static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, - u32 msr, int type); void vmx_vmexit(void); @@ -472,26 +474,6 @@ static const u32 vmx_uret_msrs_list[] = { static bool __read_mostly enlightened_vmcs = true; module_param(enlightened_vmcs, bool, 0444); -/* check_ept_pointer() should be under protection of ept_pointer_lock. */ -static void check_ept_pointer_match(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - u64 tmp_eptp = INVALID_PAGE; - int i; - - kvm_for_each_vcpu(i, vcpu, kvm) { - if (!VALID_PAGE(tmp_eptp)) { - tmp_eptp = to_vmx(vcpu)->ept_pointer; - } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { - to_kvm_vmx(kvm)->ept_pointers_match - = EPT_POINTERS_MISMATCH; - return; - } - } - - to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; -} - static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data) { @@ -501,47 +483,70 @@ static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush range->pages); } -static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, - struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) +static inline int hv_remote_flush_root_ept(hpa_t root_ept, + struct kvm_tlb_range *range) { - u64 ept_pointer = to_vmx(vcpu)->ept_pointer; - - /* - * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address - * of the base of EPT PML4 table, strip off EPT configuration - * information. - */ if (range) - return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, + return hyperv_flush_guest_mapping_range(root_ept, kvm_fill_hv_flush_list_func, (void *)range); else - return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); + return hyperv_flush_guest_mapping(root_ept); } static int hv_remote_flush_tlb_with_range(struct kvm *kvm, struct kvm_tlb_range *range) { + struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); struct kvm_vcpu *vcpu; - int ret = 0, i; + int ret = 0, i, nr_unique_valid_roots; + hpa_t root; - spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); + spin_lock(&kvm_vmx->hv_root_ept_lock); - if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) - check_ept_pointer_match(kvm); + if (!VALID_PAGE(kvm_vmx->hv_root_ept)) { + nr_unique_valid_roots = 0; - if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { + /* + * Flush all valid roots, and see if all vCPUs have converged + * on a common root, in which case future flushes can skip the + * loop and flush the common root. + */ kvm_for_each_vcpu(i, vcpu, kvm) { - /* If ept_pointer is invalid pointer, bypass flush request. */ - if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) - ret |= __hv_remote_flush_tlb_with_range( - kvm, vcpu, range); + root = to_vmx(vcpu)->hv_root_ept; + if (!VALID_PAGE(root) || root == kvm_vmx->hv_root_ept) + continue; + + /* + * Set the tracked root to the first valid root. Keep + * this root for the entirety of the loop even if more + * roots are encountered as a low effort optimization + * to avoid flushing the same (first) root again. + */ + if (++nr_unique_valid_roots == 1) + kvm_vmx->hv_root_ept = root; + + if (!ret) + ret = hv_remote_flush_root_ept(root, range); + + /* + * Stop processing roots if a failure occurred and + * multiple valid roots have already been detected. + */ + if (ret && nr_unique_valid_roots > 1) + break; } + + /* + * The optimized flush of a single root can't be used if there + * are multiple valid roots (obviously). + */ + if (nr_unique_valid_roots > 1) + kvm_vmx->hv_root_ept = INVALID_PAGE; } else { - ret = __hv_remote_flush_tlb_with_range(kvm, - kvm_get_vcpu(kvm, 0), range); + ret = hv_remote_flush_root_ept(kvm_vmx->hv_root_ept, range); } - spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); + spin_unlock(&kvm_vmx->hv_root_ept_lock); return ret; } static int hv_remote_flush_tlb(struct kvm *kvm) @@ -559,7 +564,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) * evmcs in singe VM shares same assist page. */ if (!*p_hv_pa_pg) - *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL); + *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); if (!*p_hv_pa_pg) return -ENOMEM; @@ -576,6 +581,21 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) #endif /* IS_ENABLED(CONFIG_HYPERV) */ +static void hv_track_root_ept(struct kvm_vcpu *vcpu, hpa_t root_ept) +{ +#if IS_ENABLED(CONFIG_HYPERV) + struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); + + if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) { + spin_lock(&kvm_vmx->hv_root_ept_lock); + to_vmx(vcpu)->hv_root_ept = root_ept; + if (root_ept != kvm_vmx->hv_root_ept) + kvm_vmx->hv_root_ept = INVALID_PAGE; + spin_unlock(&kvm_vmx->hv_root_ept_lock); + } +#endif +} + /* * Comment's format: document - errata name - stepping - processor name. * Refer from @@ -1529,7 +1549,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) /* * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that - * utilize encodings marked reserved will casue a #GP fault. + * utilize encodings marked reserved will cause a #GP fault. */ value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && @@ -1570,12 +1590,25 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) { + /* + * Emulation of instructions in SGX enclaves is impossible as RIP does + * not point tthe failing instruction, and even if it did, the code + * stream is inaccessible. Inject #UD instead of exiting to userspace + * so that guest userspace can't DoS the guest simply by triggering + * emulation (enclaves are CPL3 only). + */ + if (to_vmx(vcpu)->exit_reason.enclave_mode) { + kvm_queue_exception(vcpu, UD_VECTOR); + return false; + } return true; } static int skip_emulated_instruction(struct kvm_vcpu *vcpu) { + union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; unsigned long rip, orig_rip; + u32 instr_len; /* * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on @@ -1586,9 +1619,33 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) * i.e. we end up advancing IP with some random value. */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || - to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { + exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { + instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + + /* + * Emulating an enclave's instructions isn't supported as KVM + * cannot access the enclave's memory or its true RIP, e.g. the + * vmcs.GUEST_RIP points at the exit point of the enclave, not + * the RIP that actually triggered the VM-Exit. But, because + * most instructions that cause VM-Exit will #UD in an enclave, + * most instruction-based VM-Exits simply do not occur. + * + * There are a few exceptions, notably the debug instructions + * INT1ICEBRK and INT3, as they are allowed in debug enclaves + * and generate #DB/#BP as expected, which KVM might intercept. + * But again, the CPU does the dirty work and saves an instr + * length of zero so VMMs don't shoot themselves in the foot. + * WARN if KVM tries to skip a non-zero length instruction on + * a VM-Exit from an enclave. + */ + if (!instr_len) + goto rip_updated; + + WARN(exit_reason.enclave_mode, + "KVM: skipping instruction after SGX enclave VM-Exit"); + orig_rip = kvm_rip_read(vcpu); - rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + rip = orig_rip + instr_len; #ifdef CONFIG_X86_64 /* * We need to mask out the high 32 bits of RIP if not in 64-bit @@ -1604,6 +1661,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) return 0; } +rip_updated: /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); @@ -1865,6 +1923,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_FEAT_CTL: msr_info->data = vmx->msr_ia32_feature_control; break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) + return 1; + msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash + [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; + break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; @@ -2158,6 +2223,29 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); + + /* SGX may be enabled/disabled by guest's firmware */ + vmx_write_encls_bitmap(vcpu, NULL); + break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + /* + * On real hardware, the LE hash MSRs are writable before + * the firmware sets bit 0 in MSR 0x7a ("activating" SGX), + * at which point SGX related bits in IA32_FEATURE_CONTROL + * become writable. + * + * KVM does not emulate SGX activation for simplicity, so + * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL + * is unlocked. This is technically not architectural + * behavior, but it's close enough. + */ + if (!msr_info->host_initiated && + (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) || + ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && + !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) + return 1; + vmx->msr_ia32_sgxlepubkeyhash + [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!msr_info->host_initiated) @@ -2761,7 +2849,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); /* - * Update real mode segment cache. It may be not up-to-date if sement + * Update real mode segment cache. It may be not up-to-date if segment * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); @@ -3088,8 +3176,7 @@ static int vmx_get_max_tdp_level(void) return 4; } -u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, - int root_level) +u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) { u64 eptp = VMX_EPTP_MT_WB; @@ -3098,13 +3185,13 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, if (enable_ept_ad_bits && (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) eptp |= VMX_EPTP_AD_ENABLE_BIT; - eptp |= (root_hpa & PAGE_MASK); + eptp |= root_hpa; return eptp; } -static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, - int pgd_level) +static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, + int root_level) { struct kvm *kvm = vcpu->kvm; bool update_guest_cr3 = true; @@ -3112,16 +3199,10 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, u64 eptp; if (enable_ept) { - eptp = construct_eptp(vcpu, pgd, pgd_level); + eptp = construct_eptp(vcpu, root_hpa, root_level); vmcs_write64(EPT_POINTER, eptp); - if (kvm_x86_ops.tlb_remote_flush) { - spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); - to_vmx(vcpu)->ept_pointer = eptp; - to_kvm_vmx(kvm)->ept_pointers_match - = EPT_POINTERS_CHECK; - spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); - } + hv_track_root_ept(vcpu, root_hpa); if (!enable_unrestricted_guest && !is_paging(vcpu)) guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; @@ -3131,7 +3212,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { - guest_cr3 = pgd; + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); } if (update_guest_cr3) @@ -3738,8 +3819,7 @@ static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr) __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); } -static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, - u32 msr, int type) +void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; @@ -3784,8 +3864,7 @@ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, vmx_clear_msr_bitmap_write(msr_bitmap, msr); } -static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, - u32 msr, int type) +void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; @@ -3818,15 +3897,6 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, vmx_set_msr_bitmap_write(msr_bitmap, msr); } -void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, - u32 msr, int type, bool value) -{ - if (value) - vmx_enable_intercept_for_msr(vcpu, msr, type); - else - vmx_disable_intercept_for_msr(vcpu, msr, type); -} - static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) { u8 mode = 0; @@ -4314,15 +4384,6 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) vmx->secondary_exec_control = exec_control; } -static void ept_set_mmio_spte_mask(void) -{ - /* - * EPT Misconfigurations can be generated if the value of bits 2:0 - * of an EPT paging-structure entry is 110b (write/execute). - */ - kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0); -} - #define VMX_XSS_EXIT_BITMAP 0 /* @@ -4410,8 +4471,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } - if (cpu_has_vmx_encls_vmexit()) - vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); + vmx_write_encls_bitmap(&vmx->vcpu, NULL); if (vmx_pt_mode_is_host_guest()) { memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); @@ -5020,7 +5080,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ - val = kvm_register_readl(vcpu, reg); + val = kvm_register_read(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: @@ -5143,7 +5203,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) kvm_register_write(vcpu, reg, val); err = 0; } else { - err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)); + err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); } out: @@ -5184,17 +5244,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) return 1; } -static int handle_vmcall(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_hypercall(vcpu); -} - -static int handle_invd(struct kvm_vcpu *vcpu) -{ - /* Treat an INVD instruction as a NOP and just skip it. */ - return kvm_skip_emulated_instruction(vcpu); -} - static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmx_get_exit_qual(vcpu); @@ -5203,28 +5252,6 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } -static int handle_rdpmc(struct kvm_vcpu *vcpu) -{ - int err; - - err = kvm_rdpmc(vcpu); - return kvm_complete_insn_gp(vcpu, err); -} - -static int handle_wbinvd(struct kvm_vcpu *vcpu) -{ - return kvm_emulate_wbinvd(vcpu); -} - -static int handle_xsetbv(struct kvm_vcpu *vcpu) -{ - u64 new_bv = kvm_read_edx_eax(vcpu); - u32 index = kvm_rcx_read(vcpu); - - int err = kvm_set_xcr(vcpu, index, new_bv); - return kvm_complete_insn_gp(vcpu, err); -} - static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { @@ -5361,7 +5388,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) EPT_VIOLATION_EXECUTABLE)) ? PFERR_PRESENT_MASK : 0; - error_code |= (exit_qualification & 0x100) != 0 ? + error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; vcpu->arch.exit_qualification = exit_qualification; @@ -5384,6 +5411,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { gpa_t gpa; + if (!vmx_can_emulate_instruction(vcpu, NULL, 0)) + return 1; + /* * A nested guest cannot optimize MMIO vmexits, because we have an * nGPA here instead of the required GPA. @@ -5485,18 +5515,6 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) } } -static void vmx_enable_tdp(void) -{ - kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, - enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, - enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, - 0ull, VMX_EPT_EXECUTABLE_MASK, - cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, - VMX_EPT_RWX_MASK, 0ull); - - ept_set_mmio_spte_mask(); -} - /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. @@ -5516,34 +5534,11 @@ static int handle_pause(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } -static int handle_nop(struct kvm_vcpu *vcpu) -{ - return kvm_skip_emulated_instruction(vcpu); -} - -static int handle_mwait(struct kvm_vcpu *vcpu) -{ - printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); - return handle_nop(vcpu); -} - -static int handle_invalid_op(struct kvm_vcpu *vcpu) -{ - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; -} - static int handle_monitor_trap(struct kvm_vcpu *vcpu) { return 1; } -static int handle_monitor(struct kvm_vcpu *vcpu) -{ - printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); - return handle_nop(vcpu); -} - static int handle_invpcid(struct kvm_vcpu *vcpu) { u32 vmx_instruction_info; @@ -5560,7 +5555,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) } vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); if (type > 3) { kvm_inject_gp(vcpu, 0); @@ -5632,16 +5627,18 @@ static int handle_vmx_instruction(struct kvm_vcpu *vcpu) return 1; } +#ifndef CONFIG_X86_SGX_KVM static int handle_encls(struct kvm_vcpu *vcpu) { /* - * SGX virtualization is not yet supported. There is no software - * enable bit for SGX, so we have to trap ENCLS and inject a #UD - * to prevent the guest from executing ENCLS. + * SGX virtualization is disabled. There is no software enable bit for + * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent + * the guest from executing ENCLS (when SGX is supported by hardware). */ kvm_queue_exception(vcpu, UD_VECTOR); return 1; } +#endif /* CONFIG_X86_SGX_KVM */ static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) { @@ -5668,10 +5665,10 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window, [EXIT_REASON_HLT] = kvm_emulate_halt, - [EXIT_REASON_INVD] = handle_invd, + [EXIT_REASON_INVD] = kvm_emulate_invd, [EXIT_REASON_INVLPG] = handle_invlpg, - [EXIT_REASON_RDPMC] = handle_rdpmc, - [EXIT_REASON_VMCALL] = handle_vmcall, + [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc, + [EXIT_REASON_VMCALL] = kvm_emulate_hypercall, [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, @@ -5685,8 +5682,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, - [EXIT_REASON_WBINVD] = handle_wbinvd, - [EXIT_REASON_XSETBV] = handle_xsetbv, + [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd, + [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_GDTR_IDTR] = handle_desc, @@ -5694,13 +5691,13 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, - [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, + [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait, [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, - [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, + [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor, [EXIT_REASON_INVEPT] = handle_vmx_instruction, [EXIT_REASON_INVVPID] = handle_vmx_instruction, - [EXIT_REASON_RDRAND] = handle_invalid_op, - [EXIT_REASON_RDSEED] = handle_invalid_op, + [EXIT_REASON_RDRAND] = kvm_handle_invalid_op, + [EXIT_REASON_RDSEED] = kvm_handle_invalid_op, [EXIT_REASON_PML_FULL] = handle_pml_full, [EXIT_REASON_INVPCID] = handle_invpcid, [EXIT_REASON_VMFUNC] = handle_vmx_instruction, @@ -5787,12 +5784,23 @@ static void vmx_dump_dtsel(char *name, uint32_t limit) vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); } -void dump_vmcs(void) +static void vmx_dump_msrs(char *name, struct vmx_msrs *m) +{ + unsigned int i; + struct vmx_msr_entry *e; + + pr_err("MSR %s:\n", name); + for (i = 0, e = m->val; i < m->nr; ++i, ++e) + pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value); +} + +void dump_vmcs(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmentry_ctl, vmexit_ctl; u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; unsigned long cr4; - u64 efer; + int efer_slot; if (!dump_invalid_vmcs) { pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); @@ -5804,7 +5812,6 @@ void dump_vmcs(void) cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); cr4 = vmcs_readl(GUEST_CR4); - efer = vmcs_read64(GUEST_IA32_EFER); secondary_exec_control = 0; if (cpu_has_secondary_exec_ctrls()) secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); @@ -5816,9 +5823,7 @@ void dump_vmcs(void) pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); - if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && - (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) - { + if (cpu_has_vmx_ept()) { pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", @@ -5841,10 +5846,20 @@ void dump_vmcs(void) vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); - if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || - (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) - pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", - efer, vmcs_read64(GUEST_IA32_PAT)); + efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); + if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) + pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); + else if (efer_slot >= 0) + pr_err("EFER= 0x%016llx (autoload)\n", + vmx->msr_autoload.guest.val[efer_slot].value); + else if (vmentry_ctl & VM_ENTRY_IA32E_MODE) + pr_err("EFER= 0x%016llx (effective)\n", + vcpu->arch.efer | (EFER_LMA | EFER_LME)); + else + pr_err("EFER= 0x%016llx (effective)\n", + vcpu->arch.efer & ~(EFER_LMA | EFER_LME)); + if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT) + pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT)); pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", vmcs_read64(GUEST_IA32_DEBUGCTL), vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); @@ -5860,6 +5875,10 @@ void dump_vmcs(void) if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) pr_err("InterruptStatus = %04x\n", vmcs_read16(GUEST_INTR_STATUS)); + if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0) + vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest); + if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0) + vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest); pr_err("*** Host State ***\n"); pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", @@ -5881,14 +5900,16 @@ void dump_vmcs(void) vmcs_readl(HOST_IA32_SYSENTER_ESP), vmcs_read32(HOST_IA32_SYSENTER_CS), vmcs_readl(HOST_IA32_SYSENTER_EIP)); - if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) - pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", - vmcs_read64(HOST_IA32_EFER), - vmcs_read64(HOST_IA32_PAT)); + if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER) + pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER)); + if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT) + pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT)); if (cpu_has_load_perf_global_ctrl() && vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); + if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0) + vmx_dump_msrs("host autoload", &vmx->msr_autoload.host); pr_err("*** Control State ***\n"); pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", @@ -5997,7 +6018,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) } if (exit_reason.failed_vmentry) { - dump_vmcs(); + dump_vmcs(vcpu); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason.full; @@ -6006,7 +6027,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) } if (unlikely(vmx->fail)) { - dump_vmcs(); + dump_vmcs(vcpu); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); @@ -6027,19 +6048,19 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) exit_reason.basic != EXIT_REASON_PML_FULL && exit_reason.basic != EXIT_REASON_APIC_ACCESS && exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { + int ndata = 3; + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; - vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason.full; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { - vcpu->run->internal.ndata++; - vcpu->run->internal.data[3] = + vcpu->run->internal.data[ndata++] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); } - vcpu->run->internal.data[vcpu->run->internal.ndata++] = - vcpu->arch.last_vmentry_cpu; + vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; + vcpu->run->internal.ndata = ndata; return 0; } @@ -6092,7 +6113,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) unexpected_vmexit: vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason.full); - dump_vmcs(); + dump_vmcs(vcpu); vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; @@ -6395,18 +6416,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) void vmx_do_interrupt_nmi_irqoff(unsigned long entry); -static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info) +static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, + unsigned long entry) { - unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; - gate_desc *desc = (gate_desc *)host_idt_base + vector; - kvm_before_interrupt(vcpu); - vmx_do_interrupt_nmi_irqoff(gate_offset(desc)); + vmx_do_interrupt_nmi_irqoff(entry); kvm_after_interrupt(vcpu); } static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) { + const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist; u32 intr_info = vmx_get_intr_info(&vmx->vcpu); /* if exit due to PF check for async PF */ @@ -6417,18 +6437,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ else if (is_nmi(intr_info)) - handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info); + handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); } static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) { u32 intr_info = vmx_get_intr_info(vcpu); + unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; + gate_desc *desc = (gate_desc *)host_idt_base + vector; if (WARN_ONCE(!is_external_intr(intr_info), "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) return; - handle_interrupt_nmi_irqoff(vcpu, intr_info); + handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); } static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) @@ -6642,25 +6664,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { - /* - * VMENTER enables interrupts (host state), but the kernel state is - * interrupts disabled when this is invoked. Also tell RCU about - * it. This is the same logic as for exit_to_user_mode(). - * - * This ensures that e.g. latency analysis on the host observes - * guest mode as interrupt enabled. - * - * guest_enter_irqoff() informs context tracking about the - * transition to guest mode and if enabled adjusts RCU state - * accordingly. - */ - instrumentation_begin(); - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - instrumentation_end(); - - guest_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); + kvm_guest_enter_irqoff(); /* L1D Flush includes CPU buffer clear to mitigate MDS */ if (static_branch_unlikely(&vmx_l1d_should_flush)) @@ -6676,24 +6680,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); - /* - * VMEXIT disables interrupts (host state), but tracing and lockdep - * have them in state 'on' as recorded before entering guest mode. - * Same as enter_from_user_mode(). - * - * guest_exit_irqoff() restores host context and reinstates RCU if - * enabled and required. - * - * This needs to be done before the below as native_read_msr() - * contains a tracepoint and x86_spec_ctrl_restore_host() calls - * into world and some more. - */ - lockdep_hardirqs_off(CALLER_ADDR0); - guest_exit_irqoff(); - - instrumentation_begin(); - trace_hardirqs_off_finish(); - instrumentation_end(); + kvm_guest_exit_irqoff(); } static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -6938,9 +6925,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); +#ifdef CONFIG_X86_64 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); +#endif vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); @@ -6976,6 +6965,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) else memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); + vcpu_setup_sgx_lepubkeyhash(vcpu); + vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -6989,8 +6980,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) vmx->pi_desc.nv = POSTED_INTR_VECTOR; vmx->pi_desc.sn = 1; - vmx->ept_pointer = INVALID_PAGE; - +#if IS_ENABLED(CONFIG_HYPERV) + vmx->hv_root_ept = INVALID_PAGE; +#endif return 0; free_vmcs: @@ -7007,7 +6999,9 @@ free_vpid: static int vmx_vm_init(struct kvm *kvm) { - spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); +#if IS_ENABLED(CONFIG_HYPERV) + spin_lock_init(&to_kvm_vmx(kvm)->hv_root_ept_lock); +#endif if (!ple_gap) kvm->arch.pause_in_guest = true; @@ -7252,7 +7246,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; - /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ + /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; @@ -7302,6 +7296,19 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) set_cr4_guest_host_mask(vmx); + vmx_write_encls_bitmap(vcpu, NULL); + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX)) + vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; + else + vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) + vmx->msr_ia32_feature_control_valid_bits |= + FEAT_CTL_SGX_LC_ENABLED; + else + vmx->msr_ia32_feature_control_valid_bits &= + ~FEAT_CTL_SGX_LC_ENABLED; + /* Refresh #PF interception to account for MAXPHYADDR changes. */ vmx_update_exception_bitmap(vcpu); } @@ -7322,6 +7329,13 @@ static __init void vmx_set_cpu_caps(void) if (vmx_pt_mode_is_host_guest()) kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); + if (!enable_sgx) { + kvm_cpu_cap_clear(X86_FEATURE_SGX); + kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); + kvm_cpu_cap_clear(X86_FEATURE_SGX1); + kvm_cpu_cap_clear(X86_FEATURE_SGX2); + } + if (vmx_umip_emulated()) kvm_cpu_cap_set(X86_FEATURE_UMIP); @@ -7848,7 +7862,8 @@ static __init int hardware_setup(void) set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ if (enable_ept) - vmx_enable_tdp(); + kvm_mmu_set_ept_masks(enable_ept_ad_bits, + cpu_has_vmx_ept_execute_only()); if (!enable_ept) ept_lpage_level = 0; @@ -7909,6 +7924,8 @@ static __init int hardware_setup(void) if (!enable_ept || !cpu_has_vmx_intel_pt()) pt_mode = PT_MODE_SYSTEM; + setup_default_sgx_lepubkeyhash(); + if (nested) { nested_vmx_setup_ctls_msrs(&vmcs_config.nested, vmx_capability.ept); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 89da5e1251f1..008cb87ff088 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -325,7 +325,12 @@ struct vcpu_vmx { */ u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; - u64 ept_pointer; + /* SGX Launch Control public key hash */ + u64 msr_ia32_sgxlepubkeyhash[4]; + +#if IS_ENABLED(CONFIG_HYPERV) + u64 hv_root_ept; +#endif struct pt_desc pt_desc; struct lbr_desc lbr_desc; @@ -338,12 +343,6 @@ struct vcpu_vmx { } shadow_msr_intercept; }; -enum ept_pointers_status { - EPT_POINTERS_CHECK = 0, - EPT_POINTERS_MATCH = 1, - EPT_POINTERS_MISMATCH = 2 -}; - struct kvm_vmx { struct kvm kvm; @@ -351,8 +350,10 @@ struct kvm_vmx { bool ept_identity_pagetable_done; gpa_t ept_identity_map_addr; - enum ept_pointers_status ept_pointers_match; - spinlock_t ept_pointer_lock; +#if IS_ENABLED(CONFIG_HYPERV) + hpa_t hv_root_ept; + spinlock_t hv_root_ept_lock; +#endif }; bool nested_vmx_allowed(struct kvm_vcpu *vcpu); @@ -376,8 +377,7 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); void ept_save_pdptrs(struct kvm_vcpu *vcpu); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); -u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, - int root_level); +u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); @@ -392,8 +392,19 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); -void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, - u32 msr, int type, bool value); + +void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); +void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); + +static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, + int type, bool value) +{ + if (value) + vmx_enable_intercept_for_msr(vcpu, msr, type); + else + vmx_disable_intercept_for_msr(vcpu, msr, type); +} + void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); static inline u8 vmx_get_rvi(void) @@ -543,6 +554,6 @@ static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu) return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu); } -void dump_vmcs(void); +void dump_vmcs(struct kvm_vcpu *vcpu); #endif /* __KVM_X86_VMX_H */ diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index 692b0c31c9c8..164b64f65a8f 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -37,6 +37,10 @@ static __always_inline void vmcs_check32(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "32-bit accessor invalid for 16-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, + "32-bit accessor invalid for 64-bit field"); + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, + "32-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "32-bit accessor invalid for natural width field"); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 47e021bdcc94..6eda2834fc05 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -75,6 +75,7 @@ #include <asm/tlbflush.h> #include <asm/intel_pt.h> #include <asm/emulate_prefix.h> +#include <asm/sgx.h> #include <clocksource/hyperv_timer.h> #define CREATE_TRACE_POINTS @@ -156,9 +157,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); /* * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables - * adaptive tuning starting from default advancment of 1000ns. '0' disables + * adaptive tuning starting from default advancement of 1000ns. '0' disables * advancement entirely. Any other value is used as-is and disables adaptive - * tuning, i.e. allows priveleged userspace to set an exact advancement time. + * tuning, i.e. allows privileged userspace to set an exact advancement time. */ static int __read_mostly lapic_timer_advance_ns = -1; module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); @@ -245,6 +246,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("l1d_flush", l1d_flush), VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), + VCPU_STAT("nested_run", nested_run), + VCPU_STAT("directed_yield_attempted", directed_yield_attempted), + VCPU_STAT("directed_yield_successful", directed_yield_successful), VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped), VM_STAT("mmu_pte_write", mmu_pte_write), VM_STAT("mmu_pde_zapped", mmu_pde_zapped), @@ -271,8 +275,7 @@ static struct kmem_cache *x86_emulator_cache; * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. */ -static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr, - u64 data, bool write) +static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) { const char *op = write ? "wrmsr" : "rdmsr"; @@ -544,8 +547,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { queue: - if (has_error && !is_protmode(vcpu)) - has_error = false; if (reinject) { /* * On vmentry, vcpu->arch.exception.pending is only @@ -984,14 +985,17 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) return 0; } -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) { - if (static_call(kvm_x86_get_cpl)(vcpu) == 0) - return __kvm_set_xcr(vcpu, index, xcr); + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { + kvm_inject_gp(vcpu, 0); + return 1; + } - return 1; + return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_set_xcr); +EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1073,10 +1077,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return 0; } - if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + /* + * Do not condition the GPA check on long mode, this helper is used to + * stuff CR3, e.g. for RSM emulation, and there is no guarantee that + * the current vCPU mode is accurate. + */ + if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return 1; - else if (is_pae_paging(vcpu) && - !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) + + if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush); @@ -1192,20 +1201,21 @@ void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) } EXPORT_SYMBOL_GPL(kvm_get_dr); -bool kvm_rdpmc(struct kvm_vcpu *vcpu) +int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_rcx_read(vcpu); u64 data; - int err; - err = kvm_pmu_rdpmc(vcpu, ecx, &data); - if (err) - return err; + if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + kvm_rax_write(vcpu, (u32)data); kvm_rdx_write(vcpu, data >> 32); - return err; + return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_rdpmc); +EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS @@ -1288,7 +1298,7 @@ static const u32 emulated_msrs_all[] = { MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, MSR_IA32_TSC_ADJUST, - MSR_IA32_TSCDEADLINE, + MSR_IA32_TSC_DEADLINE, MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES, MSR_IA32_MISC_ENABLE, @@ -1373,7 +1383,7 @@ static u64 kvm_get_arch_capabilities(void) /* * If nx_huge_pages is enabled, KVM's shadow paging will ensure that * the nested hypervisor runs with NX huge pages. If it is not, - * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other + * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other * L1 guests, so it need not worry about its own (L2) guests. */ data |= ARCH_CAP_PSCHANGE_MC_NO; @@ -1445,7 +1455,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) if (r == KVM_MSR_RET_INVALID) { /* Unconditionally clear the output for simplicity */ *data = 0; - if (kvm_msr_ignored_check(vcpu, index, 0, false)) + if (kvm_msr_ignored_check(index, 0, false)) r = 0; } @@ -1526,35 +1536,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) { + struct kvm_x86_msr_filter *msr_filter; + struct msr_bitmap_range *ranges; struct kvm *kvm = vcpu->kvm; - struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; - u32 count = kvm->arch.msr_filter.count; - u32 i; - bool r = kvm->arch.msr_filter.default_allow; + bool allowed; int idx; + u32 i; - /* MSR filtering not set up or x2APIC enabled, allow everything */ - if (!count || (index >= 0x800 && index <= 0x8ff)) + /* x2APIC MSRs do not support filtering. */ + if (index >= 0x800 && index <= 0x8ff) return true; - /* Prevent collision with set_msr_filter */ idx = srcu_read_lock(&kvm->srcu); - for (i = 0; i < count; i++) { + msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); + if (!msr_filter) { + allowed = true; + goto out; + } + + allowed = msr_filter->default_allow; + ranges = msr_filter->ranges; + + for (i = 0; i < msr_filter->count; i++) { u32 start = ranges[i].base; u32 end = start + ranges[i].nmsrs; u32 flags = ranges[i].flags; unsigned long *bitmap = ranges[i].bitmap; if ((index >= start) && (index < end) && (flags & type)) { - r = !!test_bit(index - start, bitmap); + allowed = !!test_bit(index - start, bitmap); break; } } +out: srcu_read_unlock(&kvm->srcu, idx); - return r; + return allowed; } EXPORT_SYMBOL_GPL(kvm_msr_allowed); @@ -1611,7 +1630,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, int ret = __kvm_set_msr(vcpu, index, data, host_initiated); if (ret == KVM_MSR_RET_INVALID) - if (kvm_msr_ignored_check(vcpu, index, data, true)) + if (kvm_msr_ignored_check(index, data, true)) ret = 0; return ret; @@ -1649,7 +1668,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, if (ret == KVM_MSR_RET_INVALID) { /* Unconditionally clear *data for simplicity */ *data = 0; - if (kvm_msr_ignored_check(vcpu, index, 0, false)) + if (kvm_msr_ignored_check(index, 0, false)) ret = 0; } @@ -1783,6 +1802,40 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); +int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) +{ + return kvm_skip_emulated_instruction(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); + +int kvm_emulate_invd(struct kvm_vcpu *vcpu) +{ + /* Treat an INVD instruction as a NOP and just skip it. */ + return kvm_emulate_as_nop(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_emulate_invd); + +int kvm_emulate_mwait(struct kvm_vcpu *vcpu) +{ + pr_warn_once("kvm: MWAIT instruction emulated as NOP!\n"); + return kvm_emulate_as_nop(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_emulate_mwait); + +int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} +EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); + +int kvm_emulate_monitor(struct kvm_vcpu *vcpu) +{ + pr_warn_once("kvm: MONITOR instruction emulated as NOP!\n"); + return kvm_emulate_as_nop(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_emulate_monitor); + static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { xfer_to_guest_mode_prepare(); @@ -1841,7 +1894,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) ret = EXIT_FASTPATH_EXIT_HANDLED; } break; - case MSR_IA32_TSCDEADLINE: + case MSR_IA32_TSC_DEADLINE: data = kvm_read_edx_eax(vcpu); if (!handle_fastpath_set_tscdeadline(vcpu, data)) { kvm_skip_emulated_instruction(vcpu); @@ -2320,7 +2373,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) kvm_vcpu_write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); - spin_lock(&kvm->arch.pvclock_gtod_sync_lock); + spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { @@ -2328,7 +2381,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) } kvm_track_tsc_matching(vcpu); - spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); + spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags); } static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, @@ -2550,11 +2603,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; + unsigned long flags; + + kvm_hv_invalidate_tsc_page(kvm); - spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); + /* no guest entries from this point */ + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); pvclock_update_vm_gtod_copy(kvm); + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -2562,8 +2620,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); - - spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } @@ -2571,17 +2627,18 @@ u64 get_kvmclock_ns(struct kvm *kvm) { struct kvm_arch *ka = &kvm->arch; struct pvclock_vcpu_time_info hv_clock; + unsigned long flags; u64 ret; - spin_lock(&ka->pvclock_gtod_sync_lock); + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); if (!ka->use_master_clock) { - spin_unlock(&ka->pvclock_gtod_sync_lock); + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); return get_kvmclock_base_ns() + ka->kvmclock_offset; } hv_clock.tsc_timestamp = ka->master_cycle_now; hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; - spin_unlock(&ka->pvclock_gtod_sync_lock); + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); @@ -2675,13 +2732,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ - spin_lock(&ka->pvclock_gtod_sync_lock); + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } - spin_unlock(&ka->pvclock_gtod_sync_lock); + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); @@ -3075,7 +3132,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); - case MSR_IA32_TSCDEADLINE: + case MSR_IA32_TSC_DEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: @@ -3370,6 +3427,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0; break; case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: + if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) + return kvm_pmu_get_msr(vcpu, msr_info); + if (!msr_info->host_initiated) + return 1; + msr_info->data = 0; + break; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: @@ -3437,7 +3500,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); - case MSR_IA32_TSCDEADLINE: + case MSR_IA32_TSC_DEADLINE: msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: @@ -3759,8 +3822,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_USER_SPACE_MSR: case KVM_CAP_X86_MSR_FILTER: case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: +#ifdef CONFIG_X86_SGX_KVM + case KVM_CAP_SGX_ATTRIBUTE: +#endif + case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: r = 1; break; + case KVM_CAP_SET_GUEST_DEBUG2: + return KVM_GUESTDBG_VALID_MASK; #ifdef CONFIG_KVM_XEN case KVM_CAP_XEN_HVM: r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | @@ -4013,7 +4082,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { struct kvm_host_map map; struct kvm_steal_time *st; - int idx; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -4021,15 +4089,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) if (vcpu->arch.st.preempted) return; - /* - * Take the srcu lock as memslots will be accessed to check the gfn - * cache generation against the memslots generation. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, &vcpu->arch.st.cache, true)) - goto out; + return; st = map.hva + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); @@ -4037,20 +4099,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); - -out: - srcu_read_unlock(&vcpu->kvm->srcu, idx); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + int idx; + if (vcpu->preempted && !vcpu->arch.guest_state_protected) vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); if (kvm_xen_msr_enabled(vcpu->kvm)) kvm_xen_runstate_set_preempted(vcpu); else kvm_steal_time_set_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); @@ -4663,7 +4730,6 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, kvm_update_pv_runtime(vcpu); return 0; - default: return -EINVAL; } @@ -5345,6 +5411,28 @@ split_irqchip_unlock: kvm->arch.bus_lock_detection_enabled = true; r = 0; break; +#ifdef CONFIG_X86_SGX_KVM + case KVM_CAP_SGX_ATTRIBUTE: { + unsigned long allowed_attributes = 0; + + r = sgx_set_attribute(&allowed_attributes, cap->args[0]); + if (r) + break; + + /* KVM only supports the PROVISIONKEY privileged attribute. */ + if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && + !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) + kvm->arch.sgx_provisioning_allowed = true; + else + r = -EINVAL; + break; + } +#endif + case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: + r = -EINVAL; + if (kvm_x86_ops.vm_copy_enc_context_from) + r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]); + return r; default: r = -EINVAL; break; @@ -5352,25 +5440,34 @@ split_irqchip_unlock: return r; } -static void kvm_clear_msr_filter(struct kvm *kvm) +static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) +{ + struct kvm_x86_msr_filter *msr_filter; + + msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); + if (!msr_filter) + return NULL; + + msr_filter->default_allow = default_allow; + return msr_filter; +} + +static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) { u32 i; - u32 count = kvm->arch.msr_filter.count; - struct msr_bitmap_range ranges[16]; - mutex_lock(&kvm->lock); - kvm->arch.msr_filter.count = 0; - memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0])); - mutex_unlock(&kvm->lock); - synchronize_srcu(&kvm->srcu); + if (!msr_filter) + return; - for (i = 0; i < count; i++) - kfree(ranges[i].bitmap); + for (i = 0; i < msr_filter->count; i++) + kfree(msr_filter->ranges[i].bitmap); + + kfree(msr_filter); } -static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range) +static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, + struct kvm_msr_filter_range *user_range) { - struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; struct msr_bitmap_range range; unsigned long *bitmap = NULL; size_t bitmap_size; @@ -5404,11 +5501,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user goto err; } - /* Everything ok, add this range identifier to our global pool */ - ranges[kvm->arch.msr_filter.count] = range; - /* Make sure we filled the array before we tell anyone to walk it */ - smp_wmb(); - kvm->arch.msr_filter.count++; + /* Everything ok, add this range identifier. */ + msr_filter->ranges[msr_filter->count] = range; + msr_filter->count++; return 0; err: @@ -5419,10 +5514,11 @@ err: static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) { struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_x86_msr_filter *new_filter, *old_filter; struct kvm_msr_filter filter; bool default_allow; - int r = 0; bool empty = true; + int r = 0; u32 i; if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) @@ -5435,25 +5531,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) if (empty && !default_allow) return -EINVAL; - kvm_clear_msr_filter(kvm); - - kvm->arch.msr_filter.default_allow = default_allow; + new_filter = kvm_alloc_msr_filter(default_allow); + if (!new_filter) + return -ENOMEM; - /* - * Protect from concurrent calls to this function that could trigger - * a TOCTOU violation on kvm->arch.msr_filter.count. - */ - mutex_lock(&kvm->lock); for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { - r = kvm_add_msr_filter(kvm, &filter.ranges[i]); - if (r) - break; + r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); + if (r) { + kvm_free_msr_filter(new_filter); + return r; + } } + mutex_lock(&kvm->lock); + + /* The per-VM filter is protected by kvm->lock... */ + old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); + + rcu_assign_pointer(kvm->arch.msr_filter, new_filter); + synchronize_srcu(&kvm->srcu); + + kvm_free_msr_filter(old_filter); + kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); mutex_unlock(&kvm->lock); - return r; + return 0; } long kvm_arch_vm_ioctl(struct file *filp, @@ -5700,6 +5803,7 @@ set_pit2_out: } #endif case KVM_SET_CLOCK: { + struct kvm_arch *ka = &kvm->arch; struct kvm_clock_data user_ns; u64 now_ns; @@ -5718,8 +5822,22 @@ set_pit2_out: * pvclock_update_vm_gtod_copy(). */ kvm_gen_update_masterclock(kvm); - now_ns = get_kvmclock_ns(kvm); - kvm->arch.kvmclock_offset += user_ns.clock - now_ns; + + /* + * This pairs with kvm_guest_time_update(): when masterclock is + * in use, we use master_kernel_ns + kvmclock_offset to set + * unsigned 'system_time' so if we use get_kvmclock_ns() (which + * is slightly ahead) here we risk going negative on unsigned + * 'system_time' when 'user_ns.clock' is very small. + */ + spin_lock_irq(&ka->pvclock_gtod_sync_lock); + if (kvm->arch.use_master_clock) + now_ns = ka->master_kernel_ns; + else + now_ns = get_kvmclock_base_ns(); + ka->kvmclock_offset = user_ns.clock - now_ns; + spin_unlock_irq(&ka->pvclock_gtod_sync_lock); + kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE); break; } @@ -5959,6 +6077,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } +EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) @@ -5975,6 +6094,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } +EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, @@ -6603,7 +6723,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); - smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, + on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); @@ -6894,12 +7014,12 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { - return kvm_register_read(emul_to_vcpu(ctxt), reg); + return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { - kvm_register_write(emul_to_vcpu(ctxt), reg, val); + kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); } static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) @@ -7698,6 +7818,7 @@ static void kvm_hyperv_tsc_notifier(void) struct kvm *kvm; struct kvm_vcpu *vcpu; int cpu; + unsigned long flags; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) @@ -7713,17 +7834,15 @@ static void kvm_hyperv_tsc_notifier(void) list_for_each_entry(kvm, &vm_list, vm_list) { struct kvm_arch *ka = &kvm->arch; - spin_lock(&ka->pvclock_gtod_sync_lock); - + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); pvclock_update_vm_gtod_copy(kvm); + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); kvm_for_each_vcpu(cpu, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_for_each_vcpu(cpu, vcpu, kvm) kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); - - spin_unlock(&ka->pvclock_gtod_sync_lock); } mutex_unlock(&kvm_lock); } @@ -8004,9 +8123,6 @@ int kvm_arch_init(void *opaque) if (r) goto out_free_percpu; - kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, - PT_DIRTY_MASK, PT64_NX_MASK, 0, - PT_PRESENT_MASK, 0, sme_me_mask); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); @@ -8166,21 +8282,35 @@ void kvm_apicv_init(struct kvm *kvm, bool enable) } EXPORT_SYMBOL_GPL(kvm_apicv_init); -static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) +static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) { struct kvm_vcpu *target = NULL; struct kvm_apic_map *map; + vcpu->stat.directed_yield_attempted++; + rcu_read_lock(); - map = rcu_dereference(kvm->arch.apic_map); + map = rcu_dereference(vcpu->kvm->arch.apic_map); if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) target = map->phys_map[dest_id]->vcpu; rcu_read_unlock(); - if (target && READ_ONCE(target->ready)) - kvm_vcpu_yield_to(target); + if (!target || !READ_ONCE(target->ready)) + goto no_yield; + + /* Ignore requests to yield to self */ + if (vcpu == target) + goto no_yield; + + if (kvm_vcpu_yield_to(target) <= 0) + goto no_yield; + + vcpu->stat.directed_yield_successful++; + +no_yield: + return; } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) @@ -8227,7 +8357,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) break; kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); - kvm_sched_yield(vcpu->kvm, a1); + kvm_sched_yield(vcpu, a1); ret = 0; break; #ifdef CONFIG_X86_64 @@ -8245,7 +8375,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) break; - kvm_sched_yield(vcpu->kvm, a0); + kvm_sched_yield(vcpu, a0); ret = 0; break; default: @@ -8328,6 +8458,27 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); } + +int kvm_check_nested_events(struct kvm_vcpu *vcpu) +{ + if (WARN_ON_ONCE(!is_guest_mode(vcpu))) + return -EIO; + + if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { + kvm_x86_ops.nested_ops->triple_fault(vcpu); + return 1; + } + + return kvm_x86_ops.nested_ops->check_events(vcpu); +} + +static void kvm_inject_exception(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) + vcpu->arch.exception.error_code = false; + static_call(kvm_x86_queue_exception)(vcpu); +} + static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) { int r; @@ -8336,7 +8487,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit /* try to reinject previous events if any */ if (vcpu->arch.exception.injected) { - static_call(kvm_x86_queue_exception)(vcpu); + kvm_inject_exception(vcpu); can_inject = false; } /* @@ -8373,7 +8524,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit * from L2 to L1. */ if (is_guest_mode(vcpu)) { - r = kvm_x86_ops.nested_ops->check_events(vcpu); + r = kvm_check_nested_events(vcpu); if (r < 0) goto busy; } @@ -8399,7 +8550,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit } } - static_call(kvm_x86_queue_exception)(vcpu); + kvm_inject_exception(vcpu); can_inject = false; } @@ -8548,7 +8699,7 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); for (i = 0; i < 8; i++) - put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); + put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); kvm_get_dr(vcpu, 6, &val); put_smstate(u32, buf, 0x7fcc, (u32)val); @@ -8594,7 +8745,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) int i; for (i = 0; i < 16; i++) - put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); + put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); @@ -8936,10 +9087,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { - vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; - vcpu->mmio_needed = 0; - r = 0; - goto out; + if (is_guest_mode(vcpu)) { + kvm_x86_ops.nested_ops->triple_fault(vcpu); + } else { + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + vcpu->mmio_needed = 0; + r = 0; + goto out; + } } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ @@ -9160,6 +9315,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_after_interrupt(vcpu); + /* + * Wait until after servicing IRQs to account guest time so that any + * ticks that occurred while running the guest are properly accounted + * to the guest. Waiting until IRQs are enabled degrades the accuracy + * of accounting via context tracking, but the loss of accuracy is + * acceptable for all known use cases. + */ + vtime_account_guest_exit(); + if (lapic_in_kernel(vcpu)) { s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; if (delta != S64_MIN) { @@ -9237,7 +9401,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) - kvm_x86_ops.nested_ops->check_events(vcpu); + kvm_check_nested_events(vcpu); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted); @@ -10634,8 +10798,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { - u32 i; - if (current->mm == kvm->mm) { /* * Free memory regions allocated on behalf of userspace, @@ -10651,8 +10813,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) mutex_unlock(&kvm->slots_lock); } static_call_cond(kvm_x86_vm_destroy)(kvm); - for (i = 0; i < kvm->arch.msr_filter.count; i++) - kfree(kvm->arch.msr_filter.ranges[i].bitmap); + kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); kvm_pic_destroy(kvm); kvm_ioapic_destroy(kvm); kvm_free_vcpus(kvm); @@ -10966,6 +11127,14 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) + return true; + + return false; +} + bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) { if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) @@ -10976,14 +11145,14 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) kvm_test_request(KVM_REQ_EVENT, vcpu)) return true; - if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) - return true; - - return false; + return kvm_arch_dy_has_pending_interrupt(vcpu); } bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { + if (vcpu->arch.guest_state_protected) + return true; + return vcpu->arch.preempted_in_kernel; } @@ -11254,7 +11423,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) if (!kvm_pv_async_pf_enabled(vcpu)) return true; else - return apf_pageready_slot_free(vcpu); + return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); } void kvm_arch_start_assignment(struct kvm *kvm) @@ -11503,7 +11672,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: - kvm_mmu_unload(vcpu); + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); return kvm_skip_emulated_instruction(vcpu); default: diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 39eb04887141..521f74e5bbf2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -8,6 +8,59 @@ #include "kvm_cache_regs.h" #include "kvm_emulate.h" +static __always_inline void kvm_guest_enter_irqoff(void) +{ + /* + * VMENTER enables interrupts (host state), but the kernel state is + * interrupts disabled when this is invoked. Also tell RCU about + * it. This is the same logic as for exit_to_user_mode(). + * + * This ensures that e.g. latency analysis on the host observes + * guest mode as interrupt enabled. + * + * guest_enter_irqoff() informs context tracking about the + * transition to guest mode and if enabled adjusts RCU state + * accordingly. + */ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + instrumentation_end(); + + guest_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +static __always_inline void kvm_guest_exit_irqoff(void) +{ + /* + * VMEXIT disables interrupts (host state), but tracing and lockdep + * have them in state 'on' as recorded before entering guest mode. + * Same as enter_from_user_mode(). + * + * context_tracking_guest_exit() restores host context and reinstates + * RCU if enabled and required. + * + * This needs to be done immediately after VM-Exit, before any code + * that might contain tracepoints or call out to the greater world, + * e.g. before x86_spec_ctrl_restore_host(). + */ + lockdep_hardirqs_off(CALLER_ADDR0); + context_tracking_guest_exit(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + instrumentation_end(); +} + +#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ +({ \ + bool failed = (consistency_check); \ + if (failed) \ + trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ + failed; \ +}) + #define KVM_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 @@ -48,6 +101,8 @@ static inline unsigned int __shrink_ple_window(unsigned int val, #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL +int kvm_check_nested_events(struct kvm_vcpu *vcpu); + static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { vcpu->arch.exception.pending = false; @@ -222,19 +277,19 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return false; } -static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg) +static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) { - unsigned long val = kvm_register_read(vcpu, reg); + unsigned long val = kvm_register_read_raw(vcpu, reg); return is_64_bit_mode(vcpu) ? val : (u32)val; } -static inline void kvm_register_writel(struct kvm_vcpu *vcpu, +static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val) { if (!is_64_bit_mode(vcpu)) val = (u32)val; - return kvm_register_write(vcpu, reg, val); + return kvm_register_write_raw(vcpu, reg, val); } static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) @@ -250,7 +305,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); -void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); int kvm_read_guest_virt(struct kvm_vcpu *vcpu, diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 3b6544111ac9..16bc9130e7a5 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -6,7 +6,7 @@ */ #include <linux/linkage.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> /* if you want SMP support, implement these with real spinlocks */ .macro LOCK reg diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 1c5c81c16b06..ce6935690766 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -6,7 +6,7 @@ */ #include <linux/linkage.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> .macro read64 reg movl %ebx, %eax diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 2402d4c489d2..db4b4f9197c7 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -3,7 +3,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/export.h> /* diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 77b9b2a3b5c8..57b79c577496 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -11,7 +11,7 @@ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 12539fca75c4..b0f3b2a62ae2 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -4,7 +4,7 @@ * * Written by Masami Hiramatsu <mhiramat@redhat.com> */ -#include <asm/insn.h> +#include <asm/insn.h> /* __ignore_sync_check__ */ /* Attribute tables are generated from opcode map */ #include "inat-tables.c" diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index bb0b3fe1e0a0..a67afd74232c 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -232,7 +232,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode - * @regoff: Operand offset, in pt_regs, used to deterimine segment register + * @regoff: Operand offset, in pt_regs, used to determine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. @@ -404,10 +404,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: - /* - * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. - * The macro below takes care of both cases. - */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: default: @@ -517,7 +513,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register - * @offs2: Offset of the second opeand register, if applicable + * @offs2: Offset of the second operand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The @@ -576,7 +572,7 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first - * register offset is invalild. The second register offset is already + * register offset is invalid. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && @@ -928,10 +924,11 @@ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { - insn_get_modrm(insn); + int ret; - if (!insn->modrm.nbytes) - return -EINVAL; + ret = insn_get_modrm(insn); + if (ret) + return ret; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; @@ -977,14 +974,14 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; + int ret; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; - insn_get_modrm(insn); - - if (!insn->modrm.nbytes) - return -EINVAL; + ret = insn_get_modrm(insn); + if (ret) + return ret; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; @@ -1106,18 +1103,21 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * - * -EINVAL on error. + * Negative value on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; + int ret; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; - insn_get_modrm(insn); + ret = insn_get_modrm(insn); + if (ret) + return ret; if (!insn->modrm.nbytes) return -EINVAL; @@ -1125,7 +1125,9 @@ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; - insn_get_sib(insn); + ret = insn_get_sib(insn); + if (ret) + return ret; if (!insn->sib.nbytes) return -EINVAL; @@ -1194,8 +1196,8 @@ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) short eff_addr; long tmp; - insn_get_modrm(insn); - insn_get_displacement(insn); + if (insn_get_displacement(insn)) + goto out; if (insn->addr_bytes != 2) goto out; @@ -1492,7 +1494,7 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN } /** - * insn_decode() - Decode an instruction + * insn_decode_from_regs() - Decode an instruction * @insn: Structure to store decoded instruction * @regs: Structure with register values as seen when entering kernel mode * @buf: Buffer containing the instruction bytes @@ -1505,8 +1507,8 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN * * True if instruction was decoded, False otherwise. */ -bool insn_decode(struct insn *insn, struct pt_regs *regs, - unsigned char buf[MAX_INSN_SIZE], int buf_size) +bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size) { int seg_defs; @@ -1529,7 +1531,9 @@ bool insn_decode(struct insn *insn, struct pt_regs *regs, insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs); insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs); - insn_get_length(insn); + if (insn_get_length(insn)) + return false; + if (buf_size < insn->length) return false; diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 435630a6ec97..058f19b20465 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -11,10 +11,13 @@ #else #include <string.h> #endif -#include <asm/inat.h> -#include <asm/insn.h> +#include <asm/inat.h> /*__ignore_sync_check__ */ +#include <asm/insn.h> /* __ignore_sync_check__ */ -#include <asm/emulate_prefix.h> +#include <linux/errno.h> +#include <linux/kconfig.h> + +#include <asm/emulate_prefix.h> /* __ignore_sync_check__ */ #define leXX_to_cpu(t, r) \ ({ \ @@ -51,6 +54,7 @@ * insn_init() - initialize struct insn * @insn: &struct insn to be initialized * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @buf_len: length of the insn buffer at @kaddr * @x86_64: !0 for 64-bit kernel or 64-bit app */ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) @@ -111,8 +115,12 @@ static void insn_get_emulate_prefix(struct insn *insn) * Populates the @insn->prefixes bitmap, and updates @insn->next_byte * to point to the (first) opcode. No effect if @insn->prefixes.got * is already set. + * + * * Returns: + * 0: on success + * < 0: on error */ -void insn_get_prefixes(struct insn *insn) +int insn_get_prefixes(struct insn *insn) { struct insn_field *prefixes = &insn->prefixes; insn_attr_t attr; @@ -120,7 +128,7 @@ void insn_get_prefixes(struct insn *insn) int i, nb; if (prefixes->got) - return; + return 0; insn_get_emulate_prefix(insn); @@ -230,8 +238,10 @@ vex_end: prefixes->got = 1; + return 0; + err_out: - return; + return -ENODATA; } /** @@ -243,16 +253,25 @@ err_out: * If necessary, first collects any preceding (prefix) bytes. * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got * is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_opcode(struct insn *insn) +int insn_get_opcode(struct insn *insn) { struct insn_field *opcode = &insn->opcode; + int pfx_id, ret; insn_byte_t op; - int pfx_id; + if (opcode->got) - return; - if (!insn->prefixes.got) - insn_get_prefixes(insn); + return 0; + + if (!insn->prefixes.got) { + ret = insn_get_prefixes(insn); + if (ret) + return ret; + } /* Get first opcode */ op = get_next(insn_byte_t, insn); @@ -267,9 +286,13 @@ void insn_get_opcode(struct insn *insn) insn->attr = inat_get_avx_attribute(op, m, p); if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) || (!inat_accept_vex(insn->attr) && - !inat_is_group(insn->attr))) - insn->attr = 0; /* This instruction is bad */ - goto end; /* VEX has only 1 byte for opcode */ + !inat_is_group(insn->attr))) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } + /* VEX has only 1 byte for opcode */ + goto end; } insn->attr = inat_get_opcode_attribute(op); @@ -280,13 +303,18 @@ void insn_get_opcode(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); } - if (inat_must_vex(insn->attr)) - insn->attr = 0; /* This instruction is bad */ + + if (inat_must_vex(insn->attr)) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } end: opcode->got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -296,15 +324,25 @@ err_out: * Populates @insn->modrm and updates @insn->next_byte to point past the * ModRM byte, if any. If necessary, first collects the preceding bytes * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_modrm(struct insn *insn) +int insn_get_modrm(struct insn *insn) { struct insn_field *modrm = &insn->modrm; insn_byte_t pfx_id, mod; + int ret; + if (modrm->got) - return; - if (!insn->opcode.got) - insn_get_opcode(insn); + return 0; + + if (!insn->opcode.got) { + ret = insn_get_opcode(insn); + if (ret) + return ret; + } if (inat_has_modrm(insn->attr)) { mod = get_next(insn_byte_t, insn); @@ -313,17 +351,22 @@ void insn_get_modrm(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_group_attribute(mod, pfx_id, insn->attr); - if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) - insn->attr = 0; /* This is bad */ + if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) { + /* Bad insn */ + insn->attr = 0; + return -EINVAL; + } } } if (insn->x86_64 && inat_is_force64(insn->attr)) insn->opnd_bytes = 8; + modrm->got = 1; + return 0; err_out: - return; + return -ENODATA; } @@ -337,11 +380,16 @@ err_out: int insn_rip_relative(struct insn *insn) { struct insn_field *modrm = &insn->modrm; + int ret; if (!insn->x86_64) return 0; - if (!modrm->got) - insn_get_modrm(insn); + + if (!modrm->got) { + ret = insn_get_modrm(insn); + if (ret) + return 0; + } /* * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. @@ -355,15 +403,25 @@ int insn_rip_relative(struct insn *insn) * * If necessary, first collects the instruction up to and including the * ModRM byte. + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_sib(struct insn *insn) +int insn_get_sib(struct insn *insn) { insn_byte_t modrm; + int ret; if (insn->sib.got) - return; - if (!insn->modrm.got) - insn_get_modrm(insn); + return 0; + + if (!insn->modrm.got) { + ret = insn_get_modrm(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { modrm = insn->modrm.bytes[0]; if (insn->addr_bytes != 2 && @@ -374,8 +432,10 @@ void insn_get_sib(struct insn *insn) } insn->sib.got = 1; + return 0; + err_out: - return; + return -ENODATA; } @@ -386,15 +446,25 @@ err_out: * If necessary, first collects the instruction up to and including the * SIB byte. * Displacement value is sign-expanded. + * + * * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_displacement(struct insn *insn) +int insn_get_displacement(struct insn *insn) { insn_byte_t mod, rm, base; + int ret; if (insn->displacement.got) - return; - if (!insn->sib.got) - insn_get_sib(insn); + return 0; + + if (!insn->sib.got) { + ret = insn_get_sib(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { /* * Interpreting the modrm byte: @@ -436,9 +506,10 @@ void insn_get_displacement(struct insn *insn) } out: insn->displacement.got = 1; + return 0; err_out: - return; + return -ENODATA; } /* Decode moffset16/32/64. Return 0 if failed */ @@ -537,20 +608,30 @@ err_out: } /** - * insn_get_immediate() - Get the immediates of instruction + * insn_get_immediate() - Get the immediate in an instruction * @insn: &struct insn containing instruction * * If necessary, first collects the instruction up to and including the * displacement bytes. * Basically, most of immediates are sign-expanded. Unsigned-value can be - * get by bit masking with ((1 << (nbytes * 8)) - 1) + * computed by bit masking with ((1 << (nbytes * 8)) - 1) + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_immediate(struct insn *insn) +int insn_get_immediate(struct insn *insn) { + int ret; + if (insn->immediate.got) - return; - if (!insn->displacement.got) - insn_get_displacement(insn); + return 0; + + if (!insn->displacement.got) { + ret = insn_get_displacement(insn); + if (ret) + return ret; + } if (inat_has_moffset(insn->attr)) { if (!__get_moffset(insn)) @@ -597,9 +678,10 @@ void insn_get_immediate(struct insn *insn) } done: insn->immediate.got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -608,13 +690,65 @@ err_out: * * If necessary, first collects the instruction up to and including the * immediates bytes. - */ -void insn_get_length(struct insn *insn) + * + * Returns: + * - 0 on success + * - < 0 on error +*/ +int insn_get_length(struct insn *insn) { + int ret; + if (insn->length) - return; - if (!insn->immediate.got) - insn_get_immediate(insn); + return 0; + + if (!insn->immediate.got) { + ret = insn_get_immediate(insn); + if (ret) + return ret; + } + insn->length = (unsigned char)((unsigned long)insn->next_byte - (unsigned long)insn->kaddr); + + return 0; +} + +/* Ensure this instruction is decoded completely */ +static inline int insn_complete(struct insn *insn) +{ + return insn->opcode.got && insn->modrm.got && insn->sib.got && + insn->displacement.got && insn->immediate.got; +} + +/** + * insn_decode() - Decode an x86 instruction + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @buf_len: length of the insn buffer at @kaddr + * @m: insn mode, see enum insn_mode + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. + */ +int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m) +{ + int ret; + +/* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */ + + if (m == INSN_MODE_KERN) + insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64)); + else + insn_init(insn, kaddr, buf_len, m == INSN_MODE_64); + + ret = insn_get_length(insn); + if (ret) + return ret; + + if (insn_complete(insn)) + return 0; + + return -EINVAL; } diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1e299ac73c86..1cc9da6e29c7 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,7 +4,7 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/export.h> .pushsection .noinstr.text, "ax" diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 41902fe8b859..64801010d312 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -8,7 +8,7 @@ */ #include <linux/linkage.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/export.h> #undef memmove diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 0bfd26e4ca9e..9827ae267f96 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -3,7 +3,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/export.h> /* diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index 419365c48b2a..cc5f4ea943d3 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -14,7 +14,7 @@ * tested so far for any MMX solution figured. * * 22/09/2000 - Arjan van de Ven - * Improved for non-egineering-sample Athlons + * Improved for non-engineering-sample Athlons * */ #include <linux/hardirq.h> diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index 75a0915b0d01..40bbe56bde32 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info) rv->err = wrmsr_safe_regs(rv->regs); } -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { int err; struct msr_regs_info rv; @@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) } EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { int err; struct msr_regs_info rv; diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 3bd905e10ee2..b09cd2ad426c 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL(msrs_free); * argument @m. * */ -int msr_read(u32 msr, struct msr *m) +static int msr_read(u32 msr, struct msr *m) { int err; u64 val; @@ -54,7 +54,7 @@ int msr_read(u32 msr, struct msr *m) * @msr: MSR to write * @m: value to write */ -int msr_write(u32 msr, struct msr *m) +static int msr_write(u32 msr, struct msr *m) { return wrmsrl_safe(msr, m->q); } diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index f6fb1d218dcc..4d32cb06ffd5 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -4,33 +4,65 @@ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> #include <asm/frame.h> -.macro THUNK reg .section .text.__x86.indirect_thunk - .align 32 -SYM_FUNC_START(__x86_indirect_thunk_\reg) - JMP_NOSPEC \reg -SYM_FUNC_END(__x86_indirect_thunk_\reg) - -SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg) +.macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL - call .Ldo_rop_\@ + call .Ldo_rop_\@ .Lspec_trap_\@: UNWIND_HINT_EMPTY pause lfence - jmp .Lspec_trap_\@ + jmp .Lspec_trap_\@ .Ldo_rop_\@: - mov %\reg, (%_ASM_SP) + mov %\reg, (%_ASM_SP) UNWIND_HINT_FUNC ret -SYM_FUNC_END(__x86_retpoline_\reg) +.endm + +.macro THUNK reg + + .align 32 + +SYM_FUNC_START(__x86_indirect_thunk_\reg) + + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ + __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD + +SYM_FUNC_END(__x86_indirect_thunk_\reg) + +.endm + +/* + * This generates .altinstr_replacement symbols for use by objtool. They, + * however, must not actually live in .altinstr_replacement since that will be + * discarded after init, but module alternatives will also reference these + * symbols. + * + * Their names matches the "__x86_indirect_" prefix to mark them as retpolines. + */ +.macro ALT_THUNK reg + + .align 1 + +SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg) + ANNOTATE_RETPOLINE_SAFE +1: call *%\reg +2: .skip 5-(2b-1b), 0x90 +SYM_FUNC_END(__x86_indirect_alt_call_\reg) + +SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg) + ANNOTATE_RETPOLINE_SAFE +1: jmp *%\reg +2: .skip 5-(2b-1b), 0x90 +SYM_FUNC_END(__x86_indirect_alt_jmp_\reg) .endm @@ -48,7 +80,6 @@ SYM_FUNC_END(__x86_retpoline_\reg) #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) -#define EXPORT_RETPOLINE(reg) __EXPORT_THUNK(__x86_retpoline_ ## reg) #undef GEN #define GEN(reg) THUNK reg @@ -59,5 +90,13 @@ SYM_FUNC_END(__x86_retpoline_\reg) #include <asm/GEN-for-each-reg.h> #undef GEN -#define GEN(reg) EXPORT_RETPOLINE(reg) +#define GEN(reg) ALT_THUNK reg +#include <asm/GEN-for-each-reg.h> + +#undef GEN +#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_call_ ## reg) +#include <asm/GEN-for-each-reg.h> + +#undef GEN +#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_jmp_ ## reg) #include <asm/GEN-for-each-reg.h> diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c index 4a9887851ad8..990d847ae902 100644 --- a/arch/x86/math-emu/fpu_trig.c +++ b/arch/x86/math-emu/fpu_trig.c @@ -547,7 +547,7 @@ static void frndint_(FPU_REG *st0_ptr, u_char st0_tag) single_arg_error(st0_ptr, st0_tag); } -static int fsin(FPU_REG *st0_ptr, u_char tag) +static int f_sin(FPU_REG *st0_ptr, u_char tag) { u_char arg_sign = getsign(st0_ptr); @@ -608,6 +608,11 @@ static int fsin(FPU_REG *st0_ptr, u_char tag) } } +static void fsin(FPU_REG *st0_ptr, u_char tag) +{ + f_sin(st0_ptr, tag); +} + static int f_cos(FPU_REG *st0_ptr, u_char tag) { u_char st0_sign; @@ -724,7 +729,7 @@ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag) } reg_copy(st0_ptr, &arg); - if (!fsin(st0_ptr, st0_tag)) { + if (!f_sin(st0_ptr, st0_tag)) { push(); FPU_copy_to_reg0(&arg, st0_tag); f_cos(&st(0), st0_tag); @@ -1635,7 +1640,7 @@ void FPU_triga(void) } static FUNC_ST0 const trig_table_b[] = { - fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos + fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, fsin, fcos }; void FPU_trigb(void) diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index fe6246ff9887..7ca6417c0c8d 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -964,7 +964,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d) /* The return value (in eax) is zero if the result is exact, if bits are changed due to rounding, truncation, etc, then a non-zero value is returned */ -/* Overflow is signalled by a non-zero return value (in eax). +/* Overflow is signaled by a non-zero return value (in eax). In the case of overflow, the returned significand always has the largest possible value */ int FPU_round_to_int(FPU_REG *r, u_char tag) diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S index 11a1f798451b..4a9fc3cc5a4d 100644 --- a/arch/x86/math-emu/reg_round.S +++ b/arch/x86/math-emu/reg_round.S @@ -575,7 +575,7 @@ Normalise_result: #ifdef PECULIAR_486 /* * This implements a special feature of 80486 behaviour. - * Underflow will be signalled even if the number is + * Underflow will be signaled even if the number is * not a denormal after rounding. * This difference occurs only for masked underflow, and not * in the unmasked case. diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a73347e2cdfc..1c548ad00752 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1497,7 +1497,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) * userspace task is trying to access some valid (from guest's point of * view) memory which is not currently mapped by the host (e.g. the * memory is swapped out). Note, the corresponding "page ready" event - * which is injected when the memory becomes available, is delived via + * which is injected when the memory becomes available, is delivered via * an interrupt mechanism and not a #PF exception * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()). * @@ -1523,7 +1523,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) * * In case the fault hit a RCU idle region the conditional entry * code reenabled RCU to avoid subsequent wreckage which helps - * debugability. + * debuggability. */ state = irqentry_enter(regs); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index dd694fb93916..75ef19aa8903 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -29,7 +29,7 @@ /* * We need to define the tracepoints somewhere, and tlb.c - * is only compied when SMP=y. + * is only compiled when SMP=y. */ #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> @@ -756,7 +756,7 @@ void __init init_mem_mapping(void) #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { - /* can we preseve max_low_pfn ?*/ + /* can we preserve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else @@ -939,7 +939,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, - * decompresser could be confused by aligned initrd_end + * decompressor could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() @@ -1017,7 +1017,7 @@ void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index da31c2635ee4..21ffb03f6c72 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -755,8 +755,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b5a3fa4033d3..e527d829e1ed 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -172,7 +172,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end) /* * With folded p4d, pgd_none() is always false, we need to - * handle synchonization on p4d level. + * handle synchronization on p4d level. */ MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); p4d_ref = p4d_offset(pgd_ref, addr); @@ -826,6 +826,106 @@ void __init paging_init(void) zone_sizes_init(); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define PAGE_UNUSED 0xFD + +/* + * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges + * from unused_pmd_start to next PMD_SIZE boundary. + */ +static unsigned long unused_pmd_start __meminitdata; + +static void __meminit vmemmap_flush_unused_pmd(void) +{ + if (!unused_pmd_start) + return; + /* + * Clears (unused_pmd_start, PMD_END] + */ + memset((void *)unused_pmd_start, PAGE_UNUSED, + ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); + unused_pmd_start = 0; +} + +#ifdef CONFIG_MEMORY_HOTPLUG +/* Returns true if the PMD is completely unused and thus it can be freed */ +static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) +{ + unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); + + /* + * Flush the unused range cache to ensure that memchr_inv() will work + * for the whole range. + */ + vmemmap_flush_unused_pmd(); + memset((void *)addr, PAGE_UNUSED, end - addr); + + return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); +} +#endif + +static void __meminit __vmemmap_use_sub_pmd(unsigned long start) +{ + /* + * As we expect to add in the same granularity as we remove, it's + * sufficient to mark only some piece used to block the memmap page from + * getting removed when removing some other adjacent memmap (just in + * case the first memmap never gets initialized e.g., because the memory + * block never gets onlined). + */ + memset((void *)start, 0, sizeof(struct page)); +} + +static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) +{ + /* + * We only optimize if the new used range directly follows the + * previously unused range (esp., when populating consecutive sections). + */ + if (unused_pmd_start == start) { + if (likely(IS_ALIGNED(end, PMD_SIZE))) + unused_pmd_start = 0; + else + unused_pmd_start = end; + return; + } + + /* + * If the range does not contiguously follows previous one, make sure + * to mark the unused range of the previous one so it can be removed. + */ + vmemmap_flush_unused_pmd(); + __vmemmap_use_sub_pmd(start); +} + + +static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) +{ + vmemmap_flush_unused_pmd(); + + /* + * Could be our memmap page is filled with PAGE_UNUSED already from a + * previous remove. Make sure to reset it. + */ + __vmemmap_use_sub_pmd(start); + + /* + * Mark with PAGE_UNUSED the unused parts of the new memmap range + */ + if (!IS_ALIGNED(start, PMD_SIZE)) + memset((void *)start, PAGE_UNUSED, + start - ALIGN_DOWN(start, PMD_SIZE)); + + /* + * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of + * consecutive sections. Remember for the last added PMD where the + * unused range begins. + */ + if (!IS_ALIGNED(end, PMD_SIZE)) + unused_pmd_start = end; +} +#endif + /* * Memory hotplug specific functions */ @@ -871,8 +971,6 @@ int arch_add_memory(int nid, u64 start, u64 size, return add_pages(nid, start_pfn, nr_pages, params); } -#define PAGE_INUSE 0xFD - static void __meminit free_pagetable(struct page *page, int order) { unsigned long magic; @@ -962,7 +1060,6 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, { unsigned long next, pages = 0; pte_t *pte; - void *page_addr; phys_addr_t phys_addr; pte = pte_start + pte_index(addr); @@ -983,42 +1080,15 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, if (phys_addr < (phys_addr_t)0x40000000) return; - if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { - /* - * Do not free direct mapping pages since they were - * freed when offlining, or simplely not in use. - */ - if (!direct) - free_pagetable(pte_page(*pte), 0); - - spin_lock(&init_mm.page_table_lock); - pte_clear(&init_mm, addr, pte); - spin_unlock(&init_mm.page_table_lock); - - /* For non-direct mapping, pages means nothing. */ - pages++; - } else { - /* - * If we are here, we are freeing vmemmap pages since - * direct mapped memory ranges to be freed are aligned. - * - * If we are not removing the whole page, it means - * other page structs in this page are being used and - * we canot remove them. So fill the unused page_structs - * with 0xFD, and remove the page when it is wholly - * filled with 0xFD. - */ - memset((void *)addr, PAGE_INUSE, next - addr); + if (!direct) + free_pagetable(pte_page(*pte), 0); - page_addr = page_address(pte_page(*pte)); - if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { - free_pagetable(pte_page(*pte), 0); + spin_lock(&init_mm.page_table_lock); + pte_clear(&init_mm, addr, pte); + spin_unlock(&init_mm.page_table_lock); - spin_lock(&init_mm.page_table_lock); - pte_clear(&init_mm, addr, pte); - spin_unlock(&init_mm.page_table_lock); - } - } + /* For non-direct mapping, pages means nothing. */ + pages++; } /* Call free_pte_table() in remove_pmd_table(). */ @@ -1034,7 +1104,6 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, unsigned long next, pages = 0; pte_t *pte_base; pmd_t *pmd; - void *page_addr; pmd = pmd_start + pmd_index(addr); for (; addr < end; addr = next, pmd++) { @@ -1054,22 +1123,16 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); pages++; - } else { - /* If here, we are freeing vmemmap pages. */ - memset((void *)addr, PAGE_INUSE, next - addr); - - page_addr = page_address(pmd_page(*pmd)); - if (!memchr_inv(page_addr, PAGE_INUSE, - PMD_SIZE)) { + } +#ifdef CONFIG_SPARSEMEM_VMEMMAP + else if (vmemmap_pmd_is_unused(addr, next)) { free_hugepage_table(pmd_page(*pmd), altmap); - spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); - } } - +#endif continue; } @@ -1090,7 +1153,6 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, unsigned long next, pages = 0; pmd_t *pmd_base; pud_t *pud; - void *page_addr; pud = pud_start + pud_index(addr); for (; addr < end; addr = next, pud++) { @@ -1099,33 +1161,13 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, if (!pud_present(*pud)) continue; - if (pud_large(*pud)) { - if (IS_ALIGNED(addr, PUD_SIZE) && - IS_ALIGNED(next, PUD_SIZE)) { - if (!direct) - free_pagetable(pud_page(*pud), - get_order(PUD_SIZE)); - - spin_lock(&init_mm.page_table_lock); - pud_clear(pud); - spin_unlock(&init_mm.page_table_lock); - pages++; - } else { - /* If here, we are freeing vmemmap pages. */ - memset((void *)addr, PAGE_INUSE, next - addr); - - page_addr = page_address(pud_page(*pud)); - if (!memchr_inv(page_addr, PAGE_INUSE, - PUD_SIZE)) { - free_pagetable(pud_page(*pud), - get_order(PUD_SIZE)); - - spin_lock(&init_mm.page_table_lock); - pud_clear(pud); - spin_unlock(&init_mm.page_table_lock); - } - } - + if (pud_large(*pud) && + IS_ALIGNED(addr, PUD_SIZE) && + IS_ALIGNED(next, PUD_SIZE)) { + spin_lock(&init_mm.page_table_lock); + pud_clear(pud); + spin_unlock(&init_mm.page_table_lock); + pages++; continue; } @@ -1197,6 +1239,9 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct, void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { + VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); + VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); + remove_pagetable(start, end, false, altmap); } @@ -1306,8 +1351,6 @@ void __init mem_init(void) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); preallocate_vmalloc_pages(); - - mem_init_print_info(NULL); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1538,11 +1581,17 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; + + if (!IS_ALIGNED(addr, PMD_SIZE) || + !IS_ALIGNED(next, PMD_SIZE)) + vmemmap_use_new_sub_pmd(addr, next); + continue; } else if (altmap) return -ENOMEM; /* no fallback */ } else if (pmd_large(*pmd)) { vmemmap_verify((pte_t *)pmd, node, addr, next); + vmemmap_use_sub_pmd(addr, next); continue; } if (vmemmap_populate_basepages(addr, next, node, NULL)) @@ -1556,6 +1605,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, { int err; + VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); + VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); + if (end - start < PAGES_PER_SECTION * sizeof(struct page)) err = vmemmap_populate_basepages(start, end, node, NULL); else if (boot_cpu_has(X86_FEATURE_PSE)) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9e5ccc56f8e0..12c686c65ea9 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -481,25 +481,6 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} - -int __init arch_ioremap_pud_supported(void) -{ -#ifdef CONFIG_X86_64 - return boot_cpu_has(X86_FEATURE_GBPAGES); -#else - return 0; -#endif -} - -int __init arch_ioremap_pmd_supported(void) -{ - return boot_cpu_has(X86_FEATURE_PSE); -} - /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 6e6b39710e5f..557f0fe25dff 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -96,7 +96,7 @@ void __init kernel_randomize_memory(void) memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; - /* Adapt phyiscal memory region size based on available memory */ + /* Adapt physical memory region size based on available memory */ if (memory_tb < kaslr_regions[0].size_tb) kaslr_regions[0].size_tb = memory_tb; diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index be020a7bc414..d3efbc5b3449 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Support for MMIO probes. - * Benfit many code from kprobes + * Benefit many code from kprobes * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. * 2007 Alexander Eichner * 2008 Pekka Paalanen <pq@iki.fi> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 4b01f7dbaf30..ff08dc463634 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> +#include <linux/virtio_config.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -44,8 +45,6 @@ EXPORT_SYMBOL(sme_me_mask); DEFINE_STATIC_KEY_FALSE(sev_enable_key); EXPORT_SYMBOL_GPL(sev_enable_key); -bool sev_enabled __section(".data"); - /* Buffer used for early in-place encryption by BSP, no locking needed */ static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); @@ -262,7 +261,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) if (pgprot_val(old_prot) == pgprot_val(new_prot)) return; - pa = pfn << page_level_shift(level); + pa = pfn << PAGE_SHIFT; size = page_level_size(level); /* @@ -373,14 +372,14 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) * up under SME the trampoline area cannot be encrypted, whereas under SEV * the trampoline area must be encrypted. */ -bool sme_active(void) +bool sev_active(void) { - return sme_me_mask && !sev_enabled; + return sev_status & MSR_AMD64_SEV_ENABLED; } -bool sev_active(void) +bool sme_active(void) { - return sev_status & MSR_AMD64_SEV_ENABLED; + return sme_me_mask && !sev_active(); } EXPORT_SYMBOL_GPL(sev_active); @@ -484,3 +483,8 @@ void __init mem_encrypt_init(void) print_mem_encrypt_feature_info(); } +int arch_has_restricted_virtio_memory_access(void) +{ + return sev_active(); +} +EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 7a84fc8bc5c3..17d292b7072f 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -27,7 +27,7 @@ SYM_FUNC_START(sme_encrypt_execute) * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) * - intermediate copy buffer (PMD_PAGE_SIZE) - * R8 - physcial address of the pagetables to use for encryption + * R8 - physical address of the pagetables to use for encryption */ push %rbp diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 6c5eb6f3f14f..04aba7e80a36 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp) #define AMD_SME_BIT BIT(0) #define AMD_SEV_BIT BIT(1) - /* - * Set the feature mask (SME or SEV) based on whether we are - * running under a hypervisor. - */ - eax = 1; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; + + /* Check the SEV MSR whether SEV or SME is enabled */ + sev_status = __rdmsr(MSR_AMD64_SEV); + feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; /* * Check for the SME/SEV feature: @@ -530,22 +526,28 @@ void __init sme_enable(struct boot_params *bp) /* Check if memory encryption is enabled */ if (feature_mask == AMD_SME_BIT) { + /* + * No SME if Hypervisor bit is set. This check is here to + * prevent a guest from trying to enable SME. For running as a + * KVM guest the MSR_K8_SYSCFG will be sufficient, but there + * might be other hypervisors which emulate that MSR as non-zero + * or even pass it through to the guest. + * A malicious hypervisor can still trick a guest into this + * path, but there is no way to protect against that. + */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return; + /* For SME, check the SYSCFG MSR */ msr = __rdmsr(MSR_K8_SYSCFG); if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) return; } else { - /* For SEV, check the SEV MSR */ - msr = __rdmsr(MSR_AMD64_SEV); - if (!(msr & MSR_AMD64_SEV_ENABLED)) - return; - - /* Save SEV_STATUS to avoid reading MSR again */ - sev_status = msr; - /* SEV state cannot be controlled by a command line option */ sme_me_mask = me_mask; - sev_enabled = true; physical_mask &= ~sme_me_mask; return; } diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index ca311aaa67b8..3112ca7786ed 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -695,7 +695,7 @@ int memtype_free(u64 start, u64 end) /** - * lookup_memtype - Looksup the memory type for a physical address + * lookup_memtype - Looks up the memory type for a physical address * @paddr: physical address of which memory type needs to be looked up * * Only to be called when PAT is enabled @@ -800,6 +800,7 @@ void memtype_free_io(resource_size_t start, resource_size_t end) memtype_free(start, end); } +#ifdef CONFIG_X86_PAT int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) { enum page_cache_mode type = _PAGE_CACHE_MODE_WC; @@ -813,6 +814,7 @@ void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) memtype_free_io(start, start + size); } EXPORT_SYMBOL(arch_io_free_memtype_wc); +#endif pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 16f878c26667..156cd235659f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,8 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/libnvdimm.h> +#include <linux/vmstat.h> +#include <linux/kernel.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -91,6 +93,12 @@ static void split_page_count(int level) return; direct_pages_count[level]--; + if (system_state == SYSTEM_RUNNING) { + if (level == PG_LEVEL_2M) + count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); + else if (level == PG_LEVEL_1G) + count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); + } direct_pages_count[level - 1] += PTRS_PER_PTE; } @@ -680,7 +688,7 @@ pmd_t *lookup_pmd_address(unsigned long address) * end up in this kind of memory, for instance. * * This could be optimized, but it is only intended to be - * used at inititalization time, and keeping it + * used at initialization time, and keeping it * unoptimized should increase the testing coverage for * the more obscure platforms. */ diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index f6a9e2e36642..d27cf69e811d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -780,14 +780,6 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } -/* - * Until we support 512GB pages, skip them in the vmap area. - */ -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; -} - #ifdef CONFIG_X86_64 /** * pud_free_pmd_page - Clear pud entry and free pmd page. @@ -861,11 +853,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) #else /* !CONFIG_X86_64 */ -int pud_free_pmd_page(pud_t *pud, unsigned long addr) -{ - return pud_none(*pud); -} - /* * Disable free page handling on x86-PAE. This assures that ioremap() * does not update sync'd pmd entries. See vmalloc_sync_one(). diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 8873ed1438a9..a2332eef66e9 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -128,7 +128,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) | /* * Called from the FPU code when creating a fresh set of FPU * registers. This is called from a very specific context where - * we know the FPU regstiers are safe for use and we can use PKRU + * we know the FPU registers are safe for use and we can use PKRU * directly. */ void copy_init_pkru_to_fpregs(void) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 1aab92930569..5d5c7bb50ce9 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -361,7 +361,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, * global, so set it as global in both copies. Note: * the X86_FEATURE_PGE check is not _required_ because * the CPU ignores _PAGE_GLOBAL when PGE is not - * supported. The check keeps consistentency with + * supported. The check keeps consistency with * code that only set this bit when supported. */ if (boot_cpu_has(X86_FEATURE_PGE)) @@ -440,10 +440,9 @@ static void __init pti_clone_user_shared(void) for_each_possible_cpu(cpu) { /* - * The SYSCALL64 entry code needs to be able to find the - * thread stack and needs one word of scratch space in which - * to spill a register. All of this lives in the TSS, in - * the sp1 and sp2 slots. + * The SYSCALL64 entry code needs one word of scratch space + * in which to spill a register. It lives in the sp2 slot + * of the CPU's TSS. * * This is done for all possible CPUs during boot to ensure * that it's propagated to all mms. @@ -512,7 +511,7 @@ static void pti_clone_entry_text(void) static inline bool pti_kernel_image_global_ok(void) { /* - * Systems with PCIDs get litlle benefit from global + * Systems with PCIDs get little benefit from global * kernel text and are not worth the downsides. */ if (cpu_feature_enabled(X86_FEATURE_PCID)) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 569ac1d57f55..78804680e923 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -24,7 +24,7 @@ # define __flush_tlb_local native_flush_tlb_local # define __flush_tlb_global native_flush_tlb_global # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr) -# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info) +# define __flush_tlb_multi(msk, info) native_flush_tlb_multi(msk, info) #endif /* @@ -106,7 +106,7 @@ static inline u16 kern_pcid(u16 asid) #ifdef CONFIG_PAGE_TABLE_ISOLATION /* - * Make sure that the dynamic ASID space does not confict with the + * Make sure that the dynamic ASID space does not conflict with the * bit we are using to switch between user and kernel ASIDs. */ BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); @@ -300,7 +300,7 @@ void leave_mm(int cpu) return; /* Warn if we're not lazy. */ - WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); + WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy)); switch_mm(NULL, &init_mm, NULL); } @@ -316,7 +316,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_restore(flags); } -static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) +static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) { unsigned long next_tif = task_thread_info(next)->flags; unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB; @@ -424,7 +424,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, { struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy); + bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); unsigned cpu = smp_processor_id(); u64 next_tlb_gen; bool need_flush; @@ -439,7 +439,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * NB: leave_mm() calls us with prev == NULL and tsk == NULL. */ - /* We don't want flush_tlb_func_* to run concurrently with us. */ + /* We don't want flush_tlb_func() to run concurrently with us. */ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) WARN_ON_ONCE(!irqs_disabled()); @@ -469,7 +469,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, __flush_tlb_all(); } #endif - this_cpu_write(cpu_tlbstate.is_lazy, false); + if (was_lazy) + this_cpu_write(cpu_tlbstate_shared.is_lazy, false); /* * The membarrier system call requires a full memory barrier and @@ -490,7 +491,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, /* * Even in lazy TLB mode, the CPU should stay set in the * mm_cpumask. The TLB shootdown code can figure out from - * from cpu_tlbstate.is_lazy whether or not to send an IPI. + * cpu_tlbstate_shared.is_lazy whether or not to send an IPI. */ if (WARN_ON_ONCE(real_prev != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))) @@ -598,7 +599,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) return; - this_cpu_write(cpu_tlbstate.is_lazy, true); + this_cpu_write(cpu_tlbstate_shared.is_lazy, true); } /* @@ -647,14 +648,13 @@ void initialize_tlbstate_and_flush(void) } /* - * flush_tlb_func_common()'s memory ordering requirement is that any + * flush_tlb_func()'s memory ordering requirement is that any * TLB fills that happen after we flush the TLB are ordered after we * read active_mm's tlb_gen. We don't need any explicit barriers * because all x86 flush operations are serializing and the * atomic64_read operation won't be reordered by the compiler. */ -static void flush_tlb_func_common(const struct flush_tlb_info *f, - bool local, enum tlb_flush_reason reason) +static void flush_tlb_func(void *info) { /* * We have three different tlb_gen values in here. They are: @@ -665,28 +665,40 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * - f->new_tlb_gen: the generation that the requester of the flush * wants us to catch up to. */ + const struct flush_tlb_info *f = info; struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); + bool local = smp_processor_id() == f->initiating_cpu; + unsigned long nr_invalidate = 0; /* This code cannot presently handle being reentered. */ VM_WARN_ON(!irqs_disabled()); + if (!local) { + inc_irq_stat(irq_tlb_count); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + + /* Can only happen on remote CPUs */ + if (f->mm && f->mm != loaded_mm) + return; + } + if (unlikely(loaded_mm == &init_mm)) return; VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != loaded_mm->context.ctx_id); - if (this_cpu_read(cpu_tlbstate.is_lazy)) { + if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) { /* * We're in lazy mode. We need to at least flush our * paging-structure cache to avoid speculatively reading * garbage into our TLB. Since switching to init_mm is barely * slower than a minimal flush, just switch to init_mm. * - * This should be rare, with native_flush_tlb_others skipping + * This should be rare, with native_flush_tlb_multi() skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -700,8 +712,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * be handled can catch us all the way up, leaving no work for * the second flush. */ - trace_tlb_flush(reason, 0); - return; + goto done; } WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); @@ -736,7 +747,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * 3, we'd be break the invariant: we'd update local_tlb_gen above * 1 without the full flush that's needed for tlb_gen 2. * - * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation. + * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization. * Partial TLB flushes are not all that much cheaper than full TLB * flushes, so it seems unlikely that it would be a performance win * to do a partial flush if that won't bring our TLB fully up to @@ -748,56 +759,54 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, f->new_tlb_gen == local_tlb_gen + 1 && f->new_tlb_gen == mm_tlb_gen) { /* Partial flush */ - unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift; unsigned long addr = f->start; + nr_invalidate = (f->end - f->start) >> f->stride_shift; + while (addr < f->end) { flush_tlb_one_user(addr); addr += 1UL << f->stride_shift; } if (local) count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate); - trace_tlb_flush(reason, nr_invalidate); } else { /* Full flush. */ + nr_invalidate = TLB_FLUSH_ALL; + flush_tlb_local(); if (local) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); - trace_tlb_flush(reason, TLB_FLUSH_ALL); } /* Both paths above update our state to mm_tlb_gen. */ this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); -} - -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) -{ - const struct flush_tlb_info *f = info; - flush_tlb_func_common(f, true, reason); + /* Tracing is done in a unified manner to reduce the code size */ +done: + trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN : + (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : + TLB_LOCAL_MM_SHOOTDOWN, + nr_invalidate); } -static void flush_tlb_func_remote(void *info) +static bool tlb_is_not_lazy(int cpu) { - const struct flush_tlb_info *f = info; - - inc_irq_stat(irq_tlb_count); - - if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) - return; - - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); - flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); + return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); } -static bool tlb_is_not_lazy(int cpu, void *data) -{ - return !per_cpu(cpu_tlbstate.is_lazy, cpu); -} +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared); -STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask, +STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info) { + /* + * Do accounting and tracing. Note that there are (and have always been) + * cases in which a remote TLB flush will be traced, but eventually + * would not happen. + */ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); if (info->end == TLB_FLUSH_ALL) trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); @@ -815,18 +824,42 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask, * up on the new contents of what used to be page tables, while * doing a speculative memory access. */ - if (info->freed_tables) - smp_call_function_many(cpumask, flush_tlb_func_remote, - (void *)info, 1); - else - on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote, - (void *)info, 1, cpumask); + if (info->freed_tables) { + on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); + } else { + /* + * Although we could have used on_each_cpu_cond_mask(), + * open-coding it has performance advantages, as it eliminates + * the need for indirect calls or retpolines. In addition, it + * allows to use a designated cpumask for evaluating the + * condition, instead of allocating one. + * + * This code works under the assumption that there are no nested + * TLB flushes, an assumption that is already made in + * flush_tlb_mm_range(). + * + * cond_cpumask is logically a stack-local variable, but it is + * more efficient to have it off the stack and not to allocate + * it on demand. Preemption is disabled and this code is + * non-reentrant. + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true); + } } -void flush_tlb_others(const struct cpumask *cpumask, +void flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info) { - __flush_tlb_others(cpumask, info); + __flush_tlb_multi(cpumask, info); } /* @@ -847,7 +880,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info); static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx); #endif -static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, +static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, bool freed_tables, u64 new_tlb_gen) @@ -869,14 +902,15 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, info->stride_shift = stride_shift; info->freed_tables = freed_tables; info->new_tlb_gen = new_tlb_gen; + info->initiating_cpu = smp_processor_id(); return info; } -static inline void put_flush_tlb_info(void) +static void put_flush_tlb_info(void) { #ifdef CONFIG_DEBUG_VM - /* Complete reentrency prevention checks */ + /* Complete reentrancy prevention checks */ barrier(); this_cpu_dec(flush_tlb_info_idx); #endif @@ -905,16 +939,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables, new_tlb_gen); - if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { + /* + * flush_tlb_multi() is not optimized for the common case in which only + * a local TLB flush is needed. Optimize this use-case by calling + * flush_tlb_func_local() directly in this case. + */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + flush_tlb_multi(mm_cpumask(mm), info); + } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { lockdep_assert_irqs_enabled(); local_irq_disable(); - flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN); + flush_tlb_func(info); local_irq_enable(); } - if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), info); - put_flush_tlb_info(); put_cpu(); } @@ -1119,34 +1157,30 @@ void __flush_tlb_all(void) } EXPORT_SYMBOL_GPL(__flush_tlb_all); -/* - * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm. - * This means that the 'struct flush_tlb_info' that describes which mappings to - * flush is actually fixed. We therefore set a single fixed struct and use it in - * arch_tlbbatch_flush(). - */ -static const struct flush_tlb_info full_flush_tlb_info = { - .mm = NULL, - .start = 0, - .end = TLB_FLUSH_ALL, -}; - void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { + struct flush_tlb_info *info; + int cpu = get_cpu(); - if (cpumask_test_cpu(cpu, &batch->cpumask)) { + info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0); + /* + * flush_tlb_multi() is not optimized for the common case in which only + * a local TLB flush is needed. Optimize this use-case by calling + * flush_tlb_func_local() directly in this case. + */ + if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { + flush_tlb_multi(&batch->cpumask, info); + } else if (cpumask_test_cpu(cpu, &batch->cpumask)) { lockdep_assert_irqs_enabled(); local_irq_disable(); - flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN); + flush_tlb_func(info); local_irq_enable(); } - if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) - flush_tlb_others(&batch->cpumask, &full_flush_tlb_info); - cpumask_clear(&batch->cpumask); + put_flush_tlb_info(); put_cpu(); } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 6926d0ca6c71..2a2e290fa5d8 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -282,7 +282,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ - memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); + memcpy(prog, x86_nops[5], cnt); prog += cnt; if (!ebpf_from_cbpf) { if (tail_call_reachable && !is_subprog) @@ -330,7 +330,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *old_addr, void *new_addr, const bool text_live) { - const u8 *nop_insn = ideal_nops[NOP_ATOMIC5]; + const u8 *nop_insn = x86_nops[5]; u8 old_insn[X86_PATCH_SIZE]; u8 new_insn[X86_PATCH_SIZE]; u8 *prog; @@ -560,7 +560,7 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); - memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); + memcpy(prog, x86_nops[5], X86_PATCH_SIZE); prog += X86_PATCH_SIZE; /* out: */ @@ -881,7 +881,7 @@ static int emit_nops(u8 **pprog, int len) noplen = ASM_NOP_MAX; for (i = 0; i < noplen; i++) - EMIT1(ideal_nops[noplen][i]); + EMIT1(x86_nops[noplen][i]); len -= noplen; } @@ -1556,7 +1556,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */ if (is_imm8(jmp_offset)) { if (jmp_padding) { /* To keep the jmp_offset valid, the extra bytes are - * padded before the jump insn, so we substract the + * padded before the jump insn, so we subtract the * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. * * If the previous pass already emits an imm8 @@ -1631,7 +1631,7 @@ emit_jmp: if (jmp_padding) { /* To avoid breaking jmp_offset, the extra bytes * are padded before the actual jmp insn, so - * 2 bytes is substracted from INSN_SZ_DIFF. + * 2 bytes is subtracted from INSN_SZ_DIFF. * * If the previous pass already emits an imm8 * jmp, there is nothing to pad (0 byte). @@ -1689,7 +1689,16 @@ emit_jmp: } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } @@ -1936,7 +1945,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, * add rsp, 8 // skip eth_type_trans's frame * ret // return to its caller */ -int arch_prepare_bpf_trampoline(void *image, void *image_end, +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_progs *tprogs, void *orig_call) @@ -1975,6 +1984,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, save_regs(m, &prog, nr_args, stack_size); + if (flags & BPF_TRAMP_F_CALL_ORIG) { + /* arg1: mov rdi, im */ + emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); + if (emit_call(&prog, __bpf_tramp_enter, prog)) { + ret = -EINVAL; + goto cleanup; + } + } + if (fentry->nr_progs) if (invoke_bpf(m, &prog, fentry, stack_size)) return -EINVAL; @@ -1993,8 +2011,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - if (fentry->nr_progs || fmod_ret->nr_progs) - restore_regs(m, &prog, nr_args, stack_size); + restore_regs(m, &prog, nr_args, stack_size); /* call original function */ if (emit_call(&prog, orig_call, prog)) { @@ -2003,6 +2020,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, } /* remember return value in a stack for bpf prog to access */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + im->ip_after_call = prog; + memcpy(prog, x86_nops[5], X86_PATCH_SIZE); + prog += X86_PATCH_SIZE; } if (fmod_ret->nr_progs) { @@ -2033,9 +2053,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, * the return value is only updated on the stack and still needs to be * restored to R0. */ - if (flags & BPF_TRAMP_F_CALL_ORIG) + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = prog; + /* arg1: mov rdi, im */ + emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); + if (emit_call(&prog, __bpf_tramp_exit, prog)) { + ret = -EINVAL; + goto cleanup; + } /* restore original return value back into RAX */ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); + } EMIT1(0x5B); /* pop rbx */ EMIT1(0xC9); /* leave */ @@ -2225,7 +2253,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) padding = true; goto skip_init_addrs; } - addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); + addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; @@ -2317,7 +2345,7 @@ out_image: if (image) bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: - kfree(addrs); + kvfree(addrs); kfree(jit_data); prog->aux->jit_data = NULL; } @@ -2327,3 +2355,8 @@ out: tmp : orig_prog); return prog; } + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index d17b67c69f89..3da88ded6ee3 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1390,6 +1390,19 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog) *pprog = prog; } +static void emit_push_r32(const u8 src[], u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); + /* push ecx */ + EMIT1(0x51); + + *pprog = prog; +} + static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo) { u8 jmp_cond; @@ -1459,6 +1472,174 @@ static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo) return jmp_cond; } +/* i386 kernel compiles with "-mregparm=3". From gcc document: + * + * ==== snippet ==== + * regparm (number) + * On x86-32 targets, the regparm attribute causes the compiler + * to pass arguments number one to (number) if they are of integral + * type in registers EAX, EDX, and ECX instead of on the stack. + * Functions that take a variable number of arguments continue + * to be passed all of their arguments on the stack. + * ==== snippet ==== + * + * The first three args of a function will be considered for + * putting into the 32bit register EAX, EDX, and ECX. + * + * Two 32bit registers are used to pass a 64bit arg. + * + * For example, + * void foo(u32 a, u32 b, u32 c, u32 d): + * u32 a: EAX + * u32 b: EDX + * u32 c: ECX + * u32 d: stack + * + * void foo(u64 a, u32 b, u32 c): + * u64 a: EAX (lo32) EDX (hi32) + * u32 b: ECX + * u32 c: stack + * + * void foo(u32 a, u64 b, u32 c): + * u32 a: EAX + * u64 b: EDX (lo32) ECX (hi32) + * u32 c: stack + * + * void foo(u32 a, u32 b, u64 c): + * u32 a: EAX + * u32 b: EDX + * u64 c: stack + * + * The return value will be stored in the EAX (and EDX for 64bit value). + * + * For example, + * u32 foo(u32 a, u32 b, u32 c): + * return value: EAX + * + * u64 foo(u32 a, u32 b, u32 c): + * return value: EAX (lo32) EDX (hi32) + * + * Notes: + * The verifier only accepts function having integer and pointers + * as its args and return value, so it does not have + * struct-by-value. + * + * emit_kfunc_call() finds out the btf_func_model by calling + * bpf_jit_find_kfunc_model(). A btf_func_model + * has the details about the number of args, size of each arg, + * and the size of the return value. + * + * It first decides how many args can be passed by EAX, EDX, and ECX. + * That will decide what args should be pushed to the stack: + * [first_stack_regno, last_stack_regno] are the bpf regnos + * that should be pushed to the stack. + * + * It will first push all args to the stack because the push + * will need to use ECX. Then, it moves + * [BPF_REG_1, first_stack_regno) to EAX, EDX, and ECX. + * + * When emitting a call (0xE8), it needs to figure out + * the jmp_offset relative to the jit-insn address immediately + * following the call (0xE8) instruction. At this point, it knows + * the end of the jit-insn address after completely translated the + * current (BPF_JMP | BPF_CALL) bpf-insn. It is passed as "end_addr" + * to the emit_kfunc_call(). Thus, it can learn the "immediate-follow-call" + * address by figuring out how many jit-insn is generated between + * the call (0xE8) and the end_addr: + * - 0-1 jit-insn (3 bytes each) to restore the esp pointer if there + * is arg pushed to the stack. + * - 0-2 jit-insns (3 bytes each) to handle the return value. + */ +static int emit_kfunc_call(const struct bpf_prog *bpf_prog, u8 *end_addr, + const struct bpf_insn *insn, u8 **pprog) +{ + const u8 arg_regs[] = { IA32_EAX, IA32_EDX, IA32_ECX }; + int i, cnt = 0, first_stack_regno, last_stack_regno; + int free_arg_regs = ARRAY_SIZE(arg_regs); + const struct btf_func_model *fm; + int bytes_in_stack = 0; + const u8 *cur_arg_reg; + u8 *prog = *pprog; + s64 jmp_offset; + + fm = bpf_jit_find_kfunc_model(bpf_prog, insn); + if (!fm) + return -EINVAL; + + first_stack_regno = BPF_REG_1; + for (i = 0; i < fm->nr_args; i++) { + int regs_needed = fm->arg_size[i] > sizeof(u32) ? 2 : 1; + + if (regs_needed > free_arg_regs) + break; + + free_arg_regs -= regs_needed; + first_stack_regno++; + } + + /* Push the args to the stack */ + last_stack_regno = BPF_REG_0 + fm->nr_args; + for (i = last_stack_regno; i >= first_stack_regno; i--) { + if (fm->arg_size[i - 1] > sizeof(u32)) { + emit_push_r64(bpf2ia32[i], &prog); + bytes_in_stack += 8; + } else { + emit_push_r32(bpf2ia32[i], &prog); + bytes_in_stack += 4; + } + } + + cur_arg_reg = &arg_regs[0]; + for (i = BPF_REG_1; i < first_stack_regno; i++) { + /* mov e[adc]x,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++), + STACK_VAR(bpf2ia32[i][0])); + if (fm->arg_size[i - 1] > sizeof(u32)) + /* mov e[adc]x,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++), + STACK_VAR(bpf2ia32[i][1])); + } + + if (bytes_in_stack) + /* add esp,"bytes_in_stack" */ + end_addr -= 3; + + /* mov dword ptr [ebp+off],edx */ + if (fm->ret_size > sizeof(u32)) + end_addr -= 3; + + /* mov dword ptr [ebp+off],eax */ + if (fm->ret_size) + end_addr -= 3; + + jmp_offset = (u8 *)__bpf_call_base + insn->imm - end_addr; + if (!is_simm32(jmp_offset)) { + pr_err("unsupported BPF kernel function jmp_offset:%lld\n", + jmp_offset); + return -EINVAL; + } + + EMIT1_off32(0xE8, jmp_offset); + + if (fm->ret_size) + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(bpf2ia32[BPF_REG_0][0])); + + if (fm->ret_size > sizeof(u32)) + /* mov dword ptr [ebp+off],edx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(bpf2ia32[BPF_REG_0][1])); + + if (bytes_in_stack) + /* add esp,"bytes_in_stack" */ + EMIT3(0x83, add_1reg(0xC0, IA32_ESP), bytes_in_stack); + + *pprog = prog; + + return 0; +} + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { @@ -1888,6 +2069,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (insn->src_reg == BPF_PSEUDO_CALL) goto notyet; + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { + int err; + + err = emit_kfunc_call(bpf_prog, + image + addrs[i], + insn, &prog); + + if (err) + return err; + break; + } + func = (u8 *) __bpf_call_base + imm32; jmp_offset = func - (image + addrs[i]); @@ -2276,7 +2469,16 @@ notyet: } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } @@ -2393,3 +2595,8 @@ out: tmp : orig_prog); return prog; } + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index bfa50e65ef6c..ae744b6a0785 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -126,7 +126,7 @@ static int __init early_root_info_init(void) node = (reg >> 4) & 0x07; link = (reg >> 8) & 0x03; - info = alloc_pci_root_info(min_bus, max_bus, node, link); + alloc_pci_root_info(min_bus, max_bus, node, link); } /* diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 0a0e168be1cb..02dc64625e64 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -375,7 +375,7 @@ static const struct dmi_system_id msi_k8t_dmi_table[] = { * The BIOS only gives options "DISABLED" and "AUTO". This code sets * the corresponding register-value to enable the soundcard. * - * The soundcard is only enabled, if the mainborad is identified + * The soundcard is only enabled, if the mainboard is identified * via DMI-tables and the soundcard is detected to be off. */ static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 1b82d77019b1..df7b5477fc4f 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -195,7 +195,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) } /* - * Certain firmware versions are way too sentimential and still believe + * Certain firmware versions are way too sentimental and still believe * they are exclusive and unquestionable owners of the first physical page, * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY * (but then write-access it later during SetVirtualAddressMap()). @@ -457,7 +457,7 @@ void __init efi_dump_pagetable(void) * in a kernel thread and user context. Preemption needs to remain disabled * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm * can not change under us. - * It should be ensured that there are no concurent calls to this function. + * It should be ensured that there are no concurrent calls to this function. */ void efi_enter_mm(void) { diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 67d93a243c35..7850111008a8 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -441,7 +441,7 @@ void __init efi_free_boot_services(void) * 1.4.4 with SGX enabled booting Linux via Fedora 24's * grub2-efi on a hard disk. (And no, I don't know why * this happened, but Linux should still try to boot rather - * panicing early.) + * panicking early.) */ rm_size = real_mode_size_needed(); if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { @@ -726,7 +726,7 @@ void efi_crash_gracefully_on_page_fault(unsigned long phys_addr) * Buggy efi_reset_system() is handled differently from other EFI * Runtime Services as it doesn't use efi_rts_wq. Although, * native_machine_emergency_restart() says that machine_real_restart() - * could fail, it's better not to compilcate this fault handler + * could fail, it's better not to complicate this fault handler * because this case occurs *very* rarely and hence could be improved * on a need by basis. */ diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index 0286fe1b14b5..d3d456925b2a 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * imr.c -- Intel Isolated Memory Region driver * * Copyright(c) 2013 Intel Corporation. @@ -551,7 +551,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev) /* * Setup an unlocked IMR around the physical extent of the kernel - * from the beginning of the .text secton to the end of the + * from the beginning of the .text section to the end of the * .rodata section as one physically contiguous block. * * We don't round up @size since it is already PAGE_SIZE aligned. diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c index 570e3062faac..761f3689f60a 100644 --- a/arch/x86/platform/intel-quark/imr_selftest.c +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * imr_selftest.c -- Intel Isolated Memory Region self-test driver * * Copyright(c) 2013 Intel Corporation. diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index 526f70f27c1c..fdd49d70b437 100644 --- a/arch/x86/platform/intel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -187,7 +187,7 @@ bool iosf_mbi_available(void) EXPORT_SYMBOL(iosf_mbi_available); /* - **************** P-Unit/kernel shared I2C bus arbritration **************** + **************** P-Unit/kernel shared I2C bus arbitration **************** * * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel) * share a single I2C bus to the PMIC. Below are helpers to arbitrate the @@ -493,7 +493,7 @@ static void iosf_sideband_debug_init(void) /* mcrx */ debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); - /* mcr - initiates mailbox tranaction */ + /* mcr - initiates mailbox transaction */ debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); } diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c index 1ac8578258af..b42bfdab01a9 100644 --- a/arch/x86/platform/iris/iris.c +++ b/arch/x86/platform/iris/iris.c @@ -27,7 +27,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>"); MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille"); -MODULE_SUPPORTED_DEVICE("Eurobraille/Iris"); static bool force; diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c index 85f4638764d6..994a229cb79f 100644 --- a/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -27,7 +27,7 @@ static bool lid_wake_on_close; * wake-on-close. This is implemented as standard by the XO-1.5 DSDT. * * We provide here a sysfs attribute that will additionally enable - * wake-on-close behavior. This is useful (e.g.) when we oportunistically + * wake-on-close behavior. This is useful (e.g.) when we opportunistically * suspend with the display running; if the lid is then closed, we want to * wake up to turn the display off. * diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 26d1f6693789..75e3319e8bee 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -131,7 +131,7 @@ void * __init prom_early_alloc(unsigned long size) const size_t chunk_size = max(PAGE_SIZE, size); /* - * To mimimize the number of allocations, grab at least + * To minimize the number of allocations, grab at least * PAGE_SIZE of memory (that's an arbitrary choice that's * fast enough on the platforms we care about while minimizing * wasted bootmem) and hand off chunks of it to callers. diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index d2ccadc247e6..72c1e42d121d 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -30,10 +30,10 @@ * the boot start info structure. * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. * - `cr4`: all bits are cleared. - * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’ - * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified. + * - `cs `: must be a 32-bit read/execute code segment with a base of `0` + * and a limit of `0xFFFFFFFF`. The selector value is unspecified. * - `ds`, `es`: must be a 32-bit read/write data segment with a base of - * ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all + * `0` and a limit of `0xFFFFFFFF`. The selector values are all * unspecified. * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit * of '0x67'. @@ -46,10 +46,8 @@ #define PVH_GDT_ENTRY_CS 1 #define PVH_GDT_ENTRY_DS 2 -#define PVH_GDT_ENTRY_CANARY 3 #define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) -#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) SYM_CODE_START_LOCAL(pvh_start_xen) cld @@ -111,17 +109,6 @@ SYM_CODE_START_LOCAL(pvh_start_xen) #else /* CONFIG_X86_64 */ - /* Set base address in stack canary descriptor. */ - movl $_pa(gdt_start),%eax - movl $_pa(canary),%ecx - movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) - shrl $16, %ecx - movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) - movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) - - mov $PVH_CANARY_SEL,%eax - mov %eax,%gs - call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax @@ -165,7 +152,6 @@ SYM_DATA_START_LOCAL(gdt_start) .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ - .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end) .balign 16 diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index eafc530c8767..1e9ff28bc2e0 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -24,6 +24,7 @@ #include <asm/kdebug.h> #include <asm/local64.h> #include <asm/nmi.h> +#include <asm/reboot.h> #include <asm/traps.h> #include <asm/uv/uv.h> #include <asm/uv/uv_hub.h> @@ -91,6 +92,8 @@ static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1); static atomic_t uv_nmi_slave_continue; static cpumask_var_t uv_nmi_cpu_mask; +static atomic_t uv_nmi_kexec_failed; + /* Values for uv_nmi_slave_continue */ #define SLAVE_CLEAR 0 #define SLAVE_CONTINUE 1 @@ -834,38 +837,35 @@ static void uv_nmi_touch_watchdogs(void) touch_nmi_watchdog(); } -static atomic_t uv_nmi_kexec_failed; - -#if defined(CONFIG_KEXEC_CORE) -static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) +static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs) { + /* Check if kdump kernel loaded for both main and secondary CPUs */ + if (!kexec_crash_image) { + if (main) + pr_err("UV: NMI error: kdump kernel not loaded\n"); + return; + } + /* Call crash to dump system state */ - if (master) { + if (main) { pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu); crash_kexec(regs); - pr_emerg("UV: crash_kexec unexpectedly returned, "); + pr_emerg("UV: crash_kexec unexpectedly returned\n"); atomic_set(&uv_nmi_kexec_failed, 1); - if (!kexec_crash_image) { - pr_cont("crash kernel not loaded\n"); - return; - } - pr_cont("kexec busy, stalling cpus while waiting\n"); - } - /* If crash exec fails the slaves should return, otherwise stall */ - while (atomic_read(&uv_nmi_kexec_failed) == 0) - mdelay(10); -} + } else { /* secondary */ -#else /* !CONFIG_KEXEC_CORE */ -static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) -{ - if (master) - pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n"); - atomic_set(&uv_nmi_kexec_failed, 1); + /* If kdump kernel fails, secondaries will exit this loop */ + while (atomic_read(&uv_nmi_kexec_failed) == 0) { + + /* Once shootdown cpus starts, they do not return */ + run_crash_ipi_callback(regs); + + mdelay(10); + } + } } -#endif /* !CONFIG_KEXEC_CORE */ #ifdef CONFIG_KGDB #ifdef CONFIG_KGDB_KDB @@ -889,7 +889,7 @@ static inline int uv_nmi_kdb_reason(void) * Call KGDB/KDB from NMI handler * * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or - * 'kdb' has no affect on which is used. See the KGDB documention for further + * 'kdb' has no affect on which is used. See the KGDB documentation for further * information. */ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index db1378c6ff26..3a070e7cdb8b 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -99,11 +99,8 @@ static void __save_processor_state(struct saved_context *ctxt) /* * segment registers */ -#ifdef CONFIG_X86_32_LAZY_GS savesegment(gs, ctxt->gs); -#endif #ifdef CONFIG_X86_64 - savesegment(gs, ctxt->gs); savesegment(fs, ctxt->fs); savesegment(ds, ctxt->ds); savesegment(es, ctxt->es); @@ -232,7 +229,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); #else loadsegment(fs, __KERNEL_PERCPU); - loadsegment(gs, __KERNEL_STACK_CANARY); #endif /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ @@ -255,7 +251,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) */ wrmsrl(MSR_FS_BASE, ctxt->fs_base); wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); -#elif defined(CONFIG_X86_32_LAZY_GS) +#else loadsegment(gs, ctxt->gs); #endif @@ -321,7 +317,7 @@ int hibernate_resume_nonboot_cpu_disable(void) /* * When bsp_check() is called in hibernate and suspend, cpu hotplug - * is disabled already. So it's unnessary to handle race condition between + * is disabled already. So it's unnecessary to handle race condition between * cpumask query and cpu hotplug. */ static int bsp_check(void) diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index cd3914fc9f3d..e94e0050a583 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -13,8 +13,8 @@ #include <linux/kdebug.h> #include <linux/cpu.h> #include <linux/pgtable.h> - -#include <crypto/hash.h> +#include <linux/types.h> +#include <linux/crc32.h> #include <asm/e820/api.h> #include <asm/init.h> @@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn) return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn; } - -#define MD5_DIGEST_SIZE 16 - struct restore_data_record { unsigned long jump_address; unsigned long jump_address_phys; unsigned long cr3; unsigned long magic; - u8 e820_digest[MD5_DIGEST_SIZE]; + unsigned long e820_checksum; }; -#if IS_BUILTIN(CONFIG_CRYPTO_MD5) /** - * get_e820_md5 - calculate md5 according to given e820 table + * compute_e820_crc32 - calculate crc32 of a given e820 table * * @table: the e820 table to be calculated - * @buf: the md5 result to be stored to + * + * Return: the resulting checksum */ -static int get_e820_md5(struct e820_table *table, void *buf) +static inline u32 compute_e820_crc32(struct e820_table *table) { - struct crypto_shash *tfm; - struct shash_desc *desc; - int size; - int ret = 0; - - tfm = crypto_alloc_shash("md5", 0, 0); - if (IS_ERR(tfm)) - return -ENOMEM; - - desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), - GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto free_tfm; - } - - desc->tfm = tfm; - - size = offsetof(struct e820_table, entries) + + int size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries; - if (crypto_shash_digest(desc, (u8 *)table, size, buf)) - ret = -EINVAL; - - kfree_sensitive(desc); - -free_tfm: - crypto_free_shash(tfm); - return ret; -} - -static int hibernation_e820_save(void *buf) -{ - return get_e820_md5(e820_table_firmware, buf); -} - -static bool hibernation_e820_mismatch(void *buf) -{ - int ret; - u8 result[MD5_DIGEST_SIZE]; - - memset(result, 0, MD5_DIGEST_SIZE); - /* If there is no digest in suspend kernel, let it go. */ - if (!memcmp(result, buf, MD5_DIGEST_SIZE)) - return false; - - ret = get_e820_md5(e820_table_firmware, result); - if (ret) - return true; - - return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false; -} -#else -static int hibernation_e820_save(void *buf) -{ - return 0; -} - -static bool hibernation_e820_mismatch(void *buf) -{ - /* If md5 is not builtin for restore kernel, let it go. */ - return false; + return ~crc32_le(~0, (unsigned char const *)table, size); } -#endif #ifdef CONFIG_X86_64 -#define RESTORE_MAGIC 0x23456789ABCDEF01UL +#define RESTORE_MAGIC 0x23456789ABCDEF02UL #else -#define RESTORE_MAGIC 0x12345678UL +#define RESTORE_MAGIC 0x12345679UL #endif /** @@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) */ rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK; - return hibernation_e820_save(rdr->e820_digest); + rdr->e820_checksum = compute_e820_crc32(e820_table_firmware); + return 0; } /** @@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr) jump_address_phys = rdr->jump_address_phys; restore_cr3 = rdr->cr3; - if (hibernation_e820_mismatch(rdr->e820_digest)) { + if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) { pr_crit("Hibernate inconsistent memory map detected!\n"); return -ENODEV; } diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 22fda7d99159..1be71ef5e4c4 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -103,7 +103,7 @@ static void __init setup_real_mode(void) *ptr += phys_base; } - /* Must be perfomed *after* relocation. */ + /* Must be performed *after* relocation. */ trampoline_header = (struct trampoline_header *) __va(real_mode_header->trampoline_header); diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c index 34eda63c124b..472540aeabc2 100644 --- a/arch/x86/tools/insn_decoder_test.c +++ b/arch/x86/tools/insn_decoder_test.c @@ -120,7 +120,7 @@ int main(int argc, char **argv) while (fgets(line, BUFSIZE, stdin)) { char copy[BUFSIZE], *s, *tab1, *tab2; - int nb = 0; + int nb = 0, ret; unsigned int b; if (line[0] == '<') { @@ -148,10 +148,12 @@ int main(int argc, char **argv) } else break; } + /* Decode an instruction */ - insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64); - insn_get_length(&insn); - if (insn.length != nb) { + ret = insn_decode(&insn, insn_buff, sizeof(insn_buff), + x86_64 ? INSN_MODE_64 : INSN_MODE_32); + + if (ret < 0 || insn.length != nb) { warnings++; pr_warn("Found an x86 instruction decoder bug, " "please report this.\n", sym); diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index c6a0000ae635..213f35f94feb 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -218,8 +218,8 @@ static void parse_args(int argc, char **argv) int main(int argc, char **argv) { + int insns = 0, ret; struct insn insn; - int insns = 0; int errors = 0; unsigned long i; unsigned char insn_buff[MAX_INSN_SIZE * 2]; @@ -237,15 +237,15 @@ int main(int argc, char **argv) continue; /* Decode an instruction */ - insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64); - insn_get_length(&insn); + ret = insn_decode(&insn, insn_buff, sizeof(insn_buff), + x86_64 ? INSN_MODE_64 : INSN_MODE_32); if (insn.next_byte <= insn.kaddr || insn.kaddr + MAX_INSN_SIZE < insn.next_byte) { /* Access out-of-range memory */ dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn); errors++; - } else if (verbose && !insn_complete(&insn)) + } else if (verbose && ret < 0) dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn); else if (verbose >= 2) dump_insn(stdout, &insn); diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 77f70b969d14..5ccb18290d71 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -21,6 +21,7 @@ obj-y += checksum_32.o syscalls_32.o obj-$(CONFIG_ELF_CORE) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o +subarch-y += ../lib/cmpxchg8b_emu.o ../lib/atomic64_386_32.o subarch-y += ../kernel/sys_ia32.o else diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index c907b20d4993..dcaf3b38a9e0 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -212,6 +212,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); extern long elf_aux_hwcap; #define ELF_HWCAP (elf_aux_hwcap) -#define SET_PERSONALITY(ex) do ; while(0) +#define SET_PERSONALITY(ex) do {} while(0) #endif diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index c3891c1ada26..b95db9daf0e8 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -77,7 +77,7 @@ static inline void trap_myself(void) __asm("int3"); } -static void inline remap_stack_and_trap(void) +static inline void remap_stack_and_trap(void) { __asm__ volatile ( "movl %%esp,%%ebx ;" diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index dc0a337f985b..17503fed2017 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1070,8 +1070,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_pmc = xen_read_pmc, - .iret = xen_iret, - .load_tr_desc = paravirt_nop, .set_ldt = xen_set_ldt, .load_gdt = xen_load_gdt, @@ -1204,7 +1202,6 @@ static void __init xen_setup_gdt(int cpu) pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; pv_ops.cpu.load_gdt = xen_load_gdt_boot; - setup_stack_canary_segment(cpu); switch_to_new_gdt(cpu); pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry; @@ -1233,8 +1230,8 @@ asmlinkage __visible void __init xen_start_kernel(void) /* Install Xen paravirt ops */ pv_info = xen_info; - pv_ops.init.patch = paravirt_patch_default; pv_ops.cpu = xen_cpu_ops; + paravirt_iret = xen_iret; xen_init_irq_ops(); /* diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index cf2ade864c30..ade789e73ee4 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1247,8 +1247,8 @@ static void xen_flush_tlb_one_user(unsigned long addr) preempt_enable(); } -static void xen_flush_tlb_others(const struct cpumask *cpus, - const struct flush_tlb_info *info) +static void xen_flush_tlb_multi(const struct cpumask *cpus, + const struct flush_tlb_info *info) { struct { struct mmuext_op op; @@ -1258,7 +1258,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, const size_t mc_entry_size = sizeof(args->op) + sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus()); - trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); + trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end); if (cpumask_empty(cpus)) return; /* nothing to do */ @@ -1267,9 +1267,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, args = mcs.args; args->op.arg2.vcpumask = to_cpumask(args->mask); - /* Remove us, and any offline CPUS. */ + /* Remove any offline CPUs */ cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; if (info->end != TLB_FLUSH_ALL && @@ -2086,7 +2085,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, .flush_tlb_one_user = xen_flush_tlb_one_user, - .flush_tlb_others = xen_flush_tlb_others, + .flush_tlb_multi = xen_flush_tlb_multi, .tlb_remove_table = tlb_remove_table, .pgd_alloc = xen_pgd_alloc, @@ -2410,7 +2409,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, rmd.prot = prot; /* * We use the err_ptr to indicate if there we are doing a contiguous - * mapping or a discontigious mapping. + * mapping or a discontiguous mapping. */ rmd.contiguous = !err_ptr; rmd.no_translate = no_translate; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 17d80f751fcb..ac06ca32e9ef 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size); unsigned long xen_max_p2m_pfn __read_mostly; EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT -#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT +#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT +#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT #else #define P2M_LIMIT 0 #endif @@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void) xen_p2m_last_pfn = xen_max_p2m_pfn; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; - if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC)) - p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO; - vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), PMD_SIZE * PMDS_PER_MID_PAGE); diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 19ae3e4fe4e9..54f9aa7e8457 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void) void __init pci_xen_swiotlb_init(void) { if (xen_swiotlb) { - xen_swiotlb_init(1, true /* early */); + xen_swiotlb_init_early(); dma_ops = &xen_swiotlb_dma_ops; #ifdef CONFIG_PCI @@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void) if (xen_swiotlb) return 0; - rc = xen_swiotlb_init(1, false /* late */); + rc = xen_swiotlb_init(); if (rc) return rc; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1a3b75652fa4..8bfc10330107 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -59,6 +59,18 @@ static struct { } xen_remap_buf __initdata __aligned(PAGE_SIZE); static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; +/* + * The maximum amount of extra memory compared to the base size. The + * main scaling factor is the size of struct page. At extreme ratios + * of base:extra, all the base memory can be filled with page + * structures for the extra memory, leaving no space for anything + * else. + * + * 10x seems like a reasonable balance between scaling flexibility and + * leaving a practically usable system. + */ +#define EXTRA_MEM_RATIO (10) + static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); static void __init xen_parse_512gb(void) @@ -778,13 +790,13 @@ char * __init xen_memory_setup(void) extra_pages += max_pages - max_pfn; /* - * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. * * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. */ - extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages, max_pages - max_pfn); i = 0; addr = xen_e820_table.entries[0].addr; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 91f5b330dcc6..d9c945ee1100 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -379,11 +379,6 @@ void xen_timer_resume(void) } } -static const struct pv_time_ops xen_time_ops __initconst = { - .sched_clock = xen_sched_clock, - .steal_clock = xen_steal_clock, -}; - static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; static u64 xen_clock_value_saved; @@ -525,17 +520,24 @@ static void __init xen_time_init(void) pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); } -void __init xen_init_time_ops(void) +static void __init xen_init_time_common(void) { xen_sched_clock_offset = xen_clocksource_read(); - pv_ops.time = xen_time_ops; + static_call_update(pv_steal_clock, xen_steal_clock); + paravirt_set_sched_clock(xen_sched_clock); + + x86_platform.calibrate_tsc = xen_tsc_khz; + x86_platform.get_wallclock = xen_get_wallclock; +} + +void __init xen_init_time_ops(void) +{ + xen_init_time_common(); x86_init.timers.timer_init = xen_time_init; x86_init.timers.setup_percpu_clockev = x86_init_noop; x86_cpuinit.setup_percpu_clockev = x86_init_noop; - x86_platform.calibrate_tsc = xen_tsc_khz; - x86_platform.get_wallclock = xen_get_wallclock; /* Dom0 uses the native method to set the hardware RTC. */ if (!xen_initial_domain()) x86_platform.set_wallclock = xen_set_wallclock; @@ -569,13 +571,11 @@ void __init xen_hvm_init_time_ops(void) return; } - xen_sched_clock_offset = xen_clocksource_read(); - pv_ops.time = xen_time_ops; + xen_init_time_common(); + x86_init.timers.setup_percpu_clockev = xen_time_init; x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; - x86_platform.calibrate_tsc = xen_tsc_khz; - x86_platform.get_wallclock = xen_get_wallclock; x86_platform.set_wallclock = xen_set_wallclock; } #endif diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 9ad6b7b82707..2332b2156993 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -7,6 +7,7 @@ config XTENSA select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU select ARCH_HAS_DMA_SET_UNCACHED if MMU + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS @@ -84,6 +85,12 @@ config KASAN_SHADOW_OFFSET hex default 0x6e400000 +config CPU_BIG_ENDIAN + def_bool $(success,test "$(shell,echo __XTENSA_EB__ | $(CC) -E -P -)" = 1) + +config CPU_LITTLE_ENDIAN + def_bool !CPU_BIG_ENDIAN + menu "Processor type and features" choice @@ -387,6 +394,28 @@ config PARSE_BOOTPARAM If unsure, say Y. +choice + prompt "Semihosting interface" + default XTENSA_SIMCALL_ISS + depends on XTENSA_PLATFORM_ISS + help + Choose semihosting interface that will be used for serial port, + block device and networking. + +config XTENSA_SIMCALL_ISS + bool "simcall" + help + Use simcall instruction. simcall is only available on simulators, + it does nothing on hardware. + +config XTENSA_SIMCALL_GDBIO + bool "GDBIO" + help + Use break instruction. It is available on real hardware when GDB + is attached to it via JTAG. + +endchoice + config BLK_DEV_SIMDISK tristate "Host file-based simulated block device support" default n @@ -466,7 +495,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX then enter your normal kernel breakpoints once the MMU was mapped to the kernel mappings (0XC0000000). - This unfortunately won't work for U-Boot and likely also wont + This unfortunately won't work for U-Boot and likely also won't work for using KEXEC to have a hot kernel ready for doing a KDUMP. diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index cf0940708702..e9c8f064c44d 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -19,12 +19,8 @@ variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME)) VARIANT = $(variant-y) export VARIANT -# Test for cross compiling - ifneq ($(VARIANT),) - COMPILE_ARCH = $(shell uname -m) - - ifneq ($(COMPILE_ARCH), xtensa) + ifdef cross_compiling ifndef CROSS_COMPILE CROSS_COMPILE = xtensa_$(VARIANT)- endif @@ -52,14 +48,7 @@ ifneq ($(CONFIG_LD_NO_RELAX),) KBUILD_LDFLAGS := --no-relax endif -ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1) -CHECKFLAGS += -D__XTENSA_EB__ -KBUILD_CPPFLAGS += -DCONFIG_CPU_BIG_ENDIAN -endif -ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1) -CHECKFLAGS += -D__XTENSA_EL__ -KBUILD_CPPFLAGS += -DCONFIG_CPU_LITTLE_ENDIAN -endif +CHECKFLAGS += -D$(if $(CONFIG_CPU_BIG_ENDIAN),__XTENSA_EB__,__XTENSA_EL__) vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index f6bb352f94b4..a65b7a9ebff2 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -12,10 +12,6 @@ KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include HOSTFLAGS += -Iarch/$(ARCH)/boot/include -BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#") - -export BIG_ENDIAN - subdir-y := lib targets += vmlinux.bin vmlinux.bin.gz targets += uImage xipImage diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index f7c775d53012..faec2002923b 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -4,15 +4,10 @@ # for more details. # -ifeq ($(BIG_ENDIAN),1) -OBJCOPY_ARGS := -O elf32-xtensa-be -else -OBJCOPY_ARGS := -O elf32-xtensa-le -endif +OBJCOPY_ARGS := -O $(if $(CONFIG_CPU_BIG_ENDIAN),elf32-xtensa-be,elf32-xtensa-le) -export OBJCOPY_ARGS -export CPPFLAGS_boot.lds += -P -C -export KBUILD_AFLAGS += -mtext-section-literals +CPPFLAGS_boot.lds += -P -C +KBUILD_AFLAGS += -mtext-section-literals boot-y := bootstrap.o targets += $(boot-y) boot.lds diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile index 07cb24afedc2..1d1d46215b1c 100644 --- a/arch/xtensa/boot/boot-redboot/Makefile +++ b/arch/xtensa/boot/boot-redboot/Makefile @@ -4,11 +4,7 @@ # for more details. # -ifeq ($(BIG_ENDIAN),1) -OBJCOPY_ARGS := -O elf32-xtensa-be -else -OBJCOPY_ARGS := -O elf32-xtensa-le -endif +OBJCOPY_ARGS := -O $(if $(CONFIG_CPU_BIG_ENDIAN),elf32-xtensa-be,elf32-xtensa-le) LD_ARGS = -T $(srctree)/$(obj)/boot.ld diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index 4f1ff9531f6a..062148e17135 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -72,7 +72,6 @@ CONFIG_MARVELL_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -CONFIG_DEVKMEM=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 05cb13dfe6f4..9793b49fc641 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -73,7 +73,7 @@ _j 2f .align 4 -1: movi a2, 0x10000000 +1: #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul #define TEMP_MAPPING_VADDR 0x40000000 diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 4dc04e6c01d7..d7fc45c920c2 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -280,7 +280,9 @@ static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITABLE; return pte; } -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK)) +#define pgprot_noncached(prot) \ + ((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \ + _PAGE_CA_BYPASS))) /* * Conversion functions: convert a page and protection to a page entry, diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index c426b846beef..45cc0ae0af6f 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -100,37 +100,6 @@ LOAD_CP_REGS_TAB(7) /* - * coprocessor_flush(struct thread_info*, index) - * a2 a3 - * - * Save coprocessor registers for coprocessor 'index'. - * The register values are saved to or loaded from the coprocessor area - * inside the task_info structure. - * - * Note that this function doesn't update the coprocessor_owner information! - * - */ - -ENTRY(coprocessor_flush) - - /* reserve 4 bytes on stack to save a0 */ - abi_entry(4) - - s32i a0, a1, 0 - movi a0, .Lsave_cp_regs_jump_table - addx8 a3, a3, a0 - l32i a4, a3, 4 - l32i a3, a3, 0 - add a2, a2, a4 - beqz a3, 1f - callx0 a3 -1: l32i a0, a1, 0 - - abi_ret(4) - -ENDPROC(coprocessor_flush) - -/* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) @@ -245,6 +214,39 @@ ENTRY(fast_coprocessor) ENDPROC(fast_coprocessor) + .text + +/* + * coprocessor_flush(struct thread_info*, index) + * a2 a3 + * + * Save coprocessor registers for coprocessor 'index'. + * The register values are saved to or loaded from the coprocessor area + * inside the task_info structure. + * + * Note that this function doesn't update the coprocessor_owner information! + * + */ + +ENTRY(coprocessor_flush) + + /* reserve 4 bytes on stack to save a0 */ + abi_entry(4) + + s32i a0, a1, 0 + movi a0, .Lsave_cp_regs_jump_table + addx8 a3, a3, a0 + l32i a4, a3, 4 + l32i a3, a3, 0 + add a2, a2, a4 + beqz a3, 1f + callx0 a3 +1: l32i a0, a1, 0 + + abi_ret(4) + +ENDPROC(coprocessor_flush) + .data ENTRY(coprocessor_owner) diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index e0c1fac0910f..b9b81e76beea 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -212,7 +212,7 @@ ENTRY(_startup) * * The linker script used to build the Linux kernel image * creates a table located at __boot_reloc_table_start - * that contans the information what data needs to be unpacked. + * that contains the information what data needs to be unpacked. * * Uses a2-a7. */ @@ -222,7 +222,7 @@ ENTRY(_startup) 1: beq a2, a3, 3f # no more entries? l32i a4, a2, 0 # start destination (in RAM) - l32i a5, a2, 4 # end desination (in RAM) + l32i a5, a2, 4 # end destination (in RAM) l32i a6, a2, 8 # start source (in ROM) addi a2, a2, 12 # next entry beq a4, a5, 1b # skip, empty entry diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 3f32e275997a..62c900e400d6 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -76,7 +76,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata; resource_size_t ioaddr = pci_resource_start(pdev, bar); - if (pci_ctrl == 0) + if (!pci_ctrl) return -EINVAL; /* should never happen */ /* Convert to an offset within this PCI controller */ diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 2c415fce6801..201356faa7e6 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -17,7 +17,6 @@ */ #include <linux/uaccess.h> #include <asm/syscall.h> -#include <asm/unistd.h> #include <linux/linkage.h> #include <linux/stringify.h> #include <linux/errno.h> @@ -28,12 +27,9 @@ #include <linux/sched/mm.h> #include <linux/shm.h> -syscall_t sys_call_table[__NR_syscalls] /* FIXME __cacheline_aligned */= { - [0 ... __NR_syscalls - 1] = (syscall_t)&sys_ni_syscall, - -#define __SYSCALL(nr, entry, nargs)[nr] = (syscall_t)entry, +syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= { +#define __SYSCALL(nr, entry) (syscall_t)entry, #include <asm/syscall_table.h> -#undef __SYSCALL }; #define COLOUR_ALIGN(addr, pgoff) \ diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile index 285aaba832d9..6713c65a25e1 100644 --- a/arch/xtensa/kernel/syscalls/Makefile +++ b/arch/xtensa/kernel/syscalls/Makefile @@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') syscall := $(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 365a9b849224..9d76d433d3d6 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -413,3 +413,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr +443 common quotactl_path sys_quotactl_path +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/xtensa/kernel/syscalls/syscallhdr.sh b/arch/xtensa/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index eebfb8a8ace6..000000000000 --- a/arch/xtensa/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_XTENSA_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/xtensa/kernel/syscalls/syscalltbl.sh b/arch/xtensa/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 85d78d9309ad..000000000000 --- a/arch/xtensa/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry ; do - emit $((nxt+offset)) $((nr+offset)) $entry - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 085b8c77b9d9..19e5a478a7e8 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -81,13 +81,8 @@ static inline void kmap_invalidate_coherent(struct page *page, static inline void *coherent_kvaddr(struct page *page, unsigned long base, unsigned long vaddr, unsigned long *paddr) { - if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { - *paddr = page_to_phys(page); - return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); - } else { - *paddr = 0; - return page_to_virt(page); - } + *paddr = page_to_phys(page); + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); } void clear_user_highpage(struct page *page, unsigned long vaddr) diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 7666408ce12a..95a74890c7e9 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -112,8 +112,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto bad_page_fault; return; + } if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 2daeba9e454e..6a32b2cf2718 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -119,7 +119,6 @@ void __init mem_init(void) memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index 25cd67debee6..0527bf6e3211 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -118,20 +118,13 @@ ENTRY(clear_page_alias) abi_entry_default - /* Skip setting up a temporary DTLB if not aliased low page. */ - movi a5, PAGE_OFFSET - movi a6, 0 - beqz a3, 1f - - /* Setup a temporary DTLB for the addr. */ - addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync -1: movi a3, 0 + movi a3, 0 __loopi a2, a7, PAGE_SIZE, 32 s32i a3, a2, 0 s32i a3, a2, 4 @@ -143,12 +136,9 @@ ENTRY(clear_page_alias) s32i a3, a2, 28 __endla a2, a7, 32 - bnez a6, 1f - abi_ret_default - - /* We need to invalidate the temporary idtlb entry, if any. */ + /* We need to invalidate the temporary dtlb entry. */ -1: idtlb a4 + idtlb a4 dsync abi_ret_default @@ -166,22 +156,12 @@ ENTRY(copy_page_alias) abi_entry_default - /* Skip setting up a temporary DTLB for destination if not aliased. */ - - movi a6, 0 - movi a7, 0 - beqz a4, 1f - /* Setup a temporary DTLB for destination. */ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) wdtlb a6, a2 dsync - /* Skip setting up a temporary DTLB for source if not aliased. */ - -1: beqz a5, 1f - /* Setup a temporary DTLB for source. */ addi a7, a5, PAGE_KERNEL @@ -219,17 +199,11 @@ ENTRY(copy_page_alias) /* We need to invalidate any temporary mapping! */ - bnez a6, 1f - bnez a7, 2f - abi_ret_default - -1: addi a2, a2, -PAGE_SIZE + addi a2, a2, -PAGE_SIZE idtlb a2 dsync - bnez a7, 2f - abi_ret_default -2: addi a3, a3, -PAGE_SIZE+1 + addi a3, a3, -PAGE_SIZE+1 idtlb a3 dsync diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index af81a62faba6..a3dda25a4e45 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -31,48 +31,23 @@ #define SERIAL_MAX_NUM_LINES 1 #define SERIAL_TIMER_VALUE (HZ / 10) +static void rs_poll(struct timer_list *); + static struct tty_driver *serial_driver; static struct tty_port serial_port; -static struct timer_list serial_timer; - +static DEFINE_TIMER(serial_timer, rs_poll); static DEFINE_SPINLOCK(timer_lock); -static char *serial_version = "0.1"; -static char *serial_name = "ISS serial driver"; - -/* - * This routine is called whenever a serial port is opened. It - * enables interrupts for a serial port, linking in its async structure into - * the IRQ chain. It also performs the serial-specific - * initialization for the tty structure. - */ - -static void rs_poll(struct timer_list *); - static int rs_open(struct tty_struct *tty, struct file * filp) { - tty->port = &serial_port; spin_lock_bh(&timer_lock); - if (tty->count == 1) { - timer_setup(&serial_timer, rs_poll, 0); + if (tty->count == 1) mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); - } spin_unlock_bh(&timer_lock); return 0; } - -/* - * ------------------------------------------------------------ - * iss_serial_close() - * - * This routine is called when the serial port gets closed. First, we - * wait for the last remaining data to be sent. Then, we unlink its - * async structure from the interrupt chain if necessary, and we free - * that IRQ if nothing is left in the chain. - * ------------------------------------------------------------ - */ static void rs_close(struct tty_struct *tty, struct file * filp) { spin_lock_bh(&timer_lock); @@ -149,7 +124,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) static int rs_proc_show(struct seq_file *m, void *v) { - seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version); + seq_printf(m, "serinfo:1.0 driver:0.1\n"); return 0; } @@ -166,14 +141,12 @@ static const struct tty_operations serial_ops = { .proc_show = rs_proc_show, }; -int __init rs_init(void) +static int __init rs_init(void) { tty_port_init(&serial_port); serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); - pr_info("%s %s\n", serial_name, serial_version); - /* Initialize the tty_driver structure */ serial_driver->driver_name = "iss_serial"; @@ -198,11 +171,7 @@ int __init rs_init(void) static __exit void rs_exit(void) { - int error; - - if ((error = tty_unregister_driver(serial_driver))) - pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n", - error); + tty_unregister_driver(serial_driver); put_tty_driver(serial_driver); tty_port_destroy(&serial_port); } diff --git a/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h b/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h new file mode 100644 index 000000000000..e642860e25a8 --- /dev/null +++ b/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Cadence Design Systems Inc. */ + +#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H +#define _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H + +/* + * System call like services offered by the GDBIO host. + */ + +#define SYS_open -2 +#define SYS_close -3 +#define SYS_read -4 +#define SYS_write -5 +#define SYS_lseek -6 + +static int errno; + +static inline int __simc(int a, int b, int c, int d) +{ + register int a1 asm("a2") = a; + register int b1 asm("a6") = b; + register int c1 asm("a3") = c; + register int d1 asm("a4") = d; + __asm__ __volatile__ ( + "break 1, 14\n" + : "+r"(a1), "+r"(c1) + : "r"(b1), "r"(d1) + : "memory"); + errno = c1; + return a1; +} + +#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H */ diff --git a/arch/xtensa/platforms/iss/include/platform/simcall-iss.h b/arch/xtensa/platforms/iss/include/platform/simcall-iss.h new file mode 100644 index 000000000000..5a1e7a1f182e --- /dev/null +++ b/arch/xtensa/platforms/iss/include/platform/simcall-iss.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Cadence Design Systems Inc. */ + +#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H +#define _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H + +/* + * System call like services offered by the simulator host. + */ + +#define SYS_nop 0 /* unused */ +#define SYS_exit 1 /*x*/ +#define SYS_fork 2 +#define SYS_read 3 /*x*/ +#define SYS_write 4 /*x*/ +#define SYS_open 5 /*x*/ +#define SYS_close 6 /*x*/ +#define SYS_rename 7 /*x 38 - waitpid */ +#define SYS_creat 8 /*x*/ +#define SYS_link 9 /*x (not implemented on WIN32) */ +#define SYS_unlink 10 /*x*/ +#define SYS_execv 11 /* n/a - execve */ +#define SYS_execve 12 /* 11 - chdir */ +#define SYS_pipe 13 /* 42 - time */ +#define SYS_stat 14 /* 106 - mknod */ +#define SYS_chmod 15 +#define SYS_chown 16 /* 202 - lchown */ +#define SYS_utime 17 /* 30 - break */ +#define SYS_wait 18 /* n/a - oldstat */ +#define SYS_lseek 19 /*x*/ +#define SYS_getpid 20 +#define SYS_isatty 21 /* n/a - mount */ +#define SYS_fstat 22 /* 108 - oldumount */ +#define SYS_time 23 /* 13 - setuid */ +#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */ +#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */ +#define SYS_socket 26 +#define SYS_sendto 27 +#define SYS_recvfrom 28 +#define SYS_select_one 29 /* not compatible select, one file descriptor at the time */ +#define SYS_bind 30 +#define SYS_ioctl 31 + +#define SYS_iss_argc 1000 /* returns value of argc */ +#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */ +#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */ + +/* + * SYS_select_one specifiers + */ + +#define XTISS_SELECT_ONE_READ 1 +#define XTISS_SELECT_ONE_WRITE 2 +#define XTISS_SELECT_ONE_EXCEPT 3 + +static int errno; + +static inline int __simc(int a, int b, int c, int d) +{ + register int a1 asm("a2") = a; + register int b1 asm("a3") = b; + register int c1 asm("a4") = c; + register int d1 asm("a5") = d; + __asm__ __volatile__ ( + "simcall\n" + : "+r"(a1), "+r"(b1) + : "r"(c1), "r"(d1) + : "memory"); + errno = b1; + return a1; +} + +#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H */ diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h index f42870ab551b..e1ec50ce39ee 100644 --- a/arch/xtensa/platforms/iss/include/platform/simcall.h +++ b/arch/xtensa/platforms/iss/include/platform/simcall.h @@ -6,82 +6,29 @@ * for more details. * * Copyright (C) 2001 Tensilica Inc. - * Copyright (C) 2017 Cadence Design Systems Inc. + * Copyright (C) 2017 - 2021 Cadence Design Systems Inc. */ #ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H #define _XTENSA_PLATFORM_ISS_SIMCALL_H +#include <linux/bug.h> -/* - * System call like services offered by the simulator host. - */ - -#define SYS_nop 0 /* unused */ -#define SYS_exit 1 /*x*/ -#define SYS_fork 2 -#define SYS_read 3 /*x*/ -#define SYS_write 4 /*x*/ -#define SYS_open 5 /*x*/ -#define SYS_close 6 /*x*/ -#define SYS_rename 7 /*x 38 - waitpid */ -#define SYS_creat 8 /*x*/ -#define SYS_link 9 /*x (not implemented on WIN32) */ -#define SYS_unlink 10 /*x*/ -#define SYS_execv 11 /* n/a - execve */ -#define SYS_execve 12 /* 11 - chdir */ -#define SYS_pipe 13 /* 42 - time */ -#define SYS_stat 14 /* 106 - mknod */ -#define SYS_chmod 15 -#define SYS_chown 16 /* 202 - lchown */ -#define SYS_utime 17 /* 30 - break */ -#define SYS_wait 18 /* n/a - oldstat */ -#define SYS_lseek 19 /*x*/ -#define SYS_getpid 20 -#define SYS_isatty 21 /* n/a - mount */ -#define SYS_fstat 22 /* 108 - oldumount */ -#define SYS_time 23 /* 13 - setuid */ -#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */ -#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */ -#define SYS_socket 26 -#define SYS_sendto 27 -#define SYS_recvfrom 28 -#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */ -#define SYS_bind 30 -#define SYS_ioctl 31 - -#define SYS_iss_argc 1000 /* returns value of argc */ -#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */ -#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */ - -/* - * SYS_select_one specifiers - */ - -#define XTISS_SELECT_ONE_READ 1 -#define XTISS_SELECT_ONE_WRITE 2 -#define XTISS_SELECT_ONE_EXCEPT 3 - -static int errno; - -static inline int __simc(int a, int b, int c, int d) -{ - register int a1 asm("a2") = a; - register int b1 asm("a3") = b; - register int c1 asm("a4") = c; - register int d1 asm("a5") = d; - __asm__ __volatile__ ( - "simcall\n" - : "+r"(a1), "+r"(b1) - : "r"(c1), "r"(d1) - : "memory"); - errno = b1; - return a1; -} +#ifdef CONFIG_XTENSA_SIMCALL_ISS +#include <platform/simcall-iss.h> +#endif +#ifdef CONFIG_XTENSA_SIMCALL_GDBIO +#include <platform/simcall-gdbio.h> +#endif static inline int simc_exit(int exit_code) { +#ifdef SYS_exit return __simc(SYS_exit, exit_code, 0, 0); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); + return -1; +#endif } static inline int simc_open(const char *file, int flags, int mode) @@ -96,7 +43,12 @@ static inline int simc_close(int fd) static inline int simc_ioctl(int fd, int request, void *arg) { +#ifdef SYS_ioctl return __simc(SYS_ioctl, fd, request, (int) arg); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); + return -1; +#endif } static inline int simc_read(int fd, void *buf, size_t count) @@ -111,9 +63,14 @@ static inline int simc_write(int fd, const void *buf, size_t count) static inline int simc_poll(int fd) { +#ifdef SYS_select_one long timeval[2] = { 0, 0 }; return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&timeval); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); + return -1; +#endif } static inline int simc_lseek(int fd, uint32_t off, int whence) @@ -123,18 +80,31 @@ static inline int simc_lseek(int fd, uint32_t off, int whence) static inline int simc_argc(void) { +#ifdef SYS_iss_argc return __simc(SYS_iss_argc, 0, 0, 0); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); + return 0; +#endif } static inline int simc_argv_size(void) { +#ifdef SYS_iss_argv_size return __simc(SYS_iss_argv_size, 0, 0, 0); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); + return 0; +#endif } static inline void simc_argv(void *buf) { +#ifdef SYS_iss_set_argv __simc(SYS_iss_set_argv, (int)buf, 0, 0); +#else + WARN_ONCE(1, "%s: not implemented\n", __func__); +#endif } #endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */ - |