diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-03-31 10:44:53 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-03-31 10:44:53 -0400 |
commit | cf39d37539068d53e015d8b4f1dcf42c65306b0d (patch) | |
tree | c8dcbbc3a5c92beee82e83e2a2b597f328a919a6 /arch | |
parent | 830948eb68265ac7f3f364aa9801550feafec0d6 (diff) | |
parent | 463050599742a89e0508355e626e032e8d0dab8d (diff) | |
download | lwn-cf39d37539068d53e015d8b4f1dcf42c65306b0d.tar.gz lwn-cf39d37539068d53e015d8b4f1dcf42c65306b0d.zip |
Merge tag 'kvmarm-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for Linux 5.7
- GICv4.1 support
- 32bit host removal
Diffstat (limited to 'arch')
144 files changed, 1330 insertions, 6776 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 97864aabc2a6..a07bec7f4d8d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2091,5 +2091,3 @@ source "drivers/firmware/Kconfig" if CRYPTO source "arch/arm/crypto/Kconfig" endif - -source "arch/arm/kvm/Kconfig" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index db857d07114f..b4ce96f55ddd 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -278,7 +278,6 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/)) core-$(CONFIG_VFP) += arch/arm/vfp/ core-$(CONFIG_XEN) += arch/arm/xen/ -core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ core-$(CONFIG_VDSO) += arch/arm/vdso/ # If we have a machine-specific directory, then include it in the build. diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig index 6ea7dafa4c9e..46075216ee6d 100644 --- a/arch/arm/configs/axm55xx_defconfig +++ b/arch/arm/configs/axm55xx_defconfig @@ -236,5 +236,3 @@ CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_SHA256=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index c815477b4303..413abfb42989 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -38,71 +38,6 @@ #define ICC_AP1R2 __ICC_AP1Rx(2) #define ICC_AP1R3 __ICC_AP1Rx(3) -#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5) - -#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4) -#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0) -#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1) -#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2) -#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3) -#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5) -#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7) - -#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x) -#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x) - -#define ICH_LR0 __LR0(0) -#define ICH_LR1 __LR0(1) -#define ICH_LR2 __LR0(2) -#define ICH_LR3 __LR0(3) -#define ICH_LR4 __LR0(4) -#define ICH_LR5 __LR0(5) -#define ICH_LR6 __LR0(6) -#define ICH_LR7 __LR0(7) -#define ICH_LR8 __LR8(0) -#define ICH_LR9 __LR8(1) -#define ICH_LR10 __LR8(2) -#define ICH_LR11 __LR8(3) -#define ICH_LR12 __LR8(4) -#define ICH_LR13 __LR8(5) -#define ICH_LR14 __LR8(6) -#define ICH_LR15 __LR8(7) - -/* LR top half */ -#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x) -#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x) - -#define ICH_LRC0 __LRC0(0) -#define ICH_LRC1 __LRC0(1) -#define ICH_LRC2 __LRC0(2) -#define ICH_LRC3 __LRC0(3) -#define ICH_LRC4 __LRC0(4) -#define ICH_LRC5 __LRC0(5) -#define ICH_LRC6 __LRC0(6) -#define ICH_LRC7 __LRC0(7) -#define ICH_LRC8 __LRC8(0) -#define ICH_LRC9 __LRC8(1) -#define ICH_LRC10 __LRC8(2) -#define ICH_LRC11 __LRC8(3) -#define ICH_LRC12 __LRC8(4) -#define ICH_LRC13 __LRC8(5) -#define ICH_LRC14 __LRC8(6) -#define ICH_LRC15 __LRC8(7) - -#define __ICH_AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x) -#define ICH_AP0R0 __ICH_AP0Rx(0) -#define ICH_AP0R1 __ICH_AP0Rx(1) -#define ICH_AP0R2 __ICH_AP0Rx(2) -#define ICH_AP0R3 __ICH_AP0Rx(3) - -#define __ICH_AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x) -#define ICH_AP1R0 __ICH_AP1Rx(0) -#define ICH_AP1R1 __ICH_AP1Rx(1) -#define ICH_AP1R2 __ICH_AP1Rx(2) -#define ICH_AP1R3 __ICH_AP1Rx(3) - -/* A32-to-A64 mappings used by VGIC save/restore */ - #define CPUIF_MAP(a32, a64) \ static inline void write_ ## a64(u32 val) \ { \ @@ -113,21 +48,6 @@ static inline u32 read_ ## a64(void) \ return read_sysreg(a32); \ } \ -#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \ -static inline void write_ ## a64(u64 val) \ -{ \ - write_sysreg(lower_32_bits(val), a32lo);\ - write_sysreg(upper_32_bits(val), a32hi);\ -} \ -static inline u64 read_ ## a64(void) \ -{ \ - u64 val = read_sysreg(a32lo); \ - \ - val |= (u64)read_sysreg(a32hi) << 32; \ - \ - return val; \ -} - CPUIF_MAP(ICC_PMR, ICC_PMR_EL1) CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1) CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1) @@ -138,40 +58,6 @@ CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1) CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1) CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1) -CPUIF_MAP(ICH_HCR, ICH_HCR_EL2) -CPUIF_MAP(ICH_VTR, ICH_VTR_EL2) -CPUIF_MAP(ICH_MISR, ICH_MISR_EL2) -CPUIF_MAP(ICH_EISR, ICH_EISR_EL2) -CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2) -CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2) -CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2) -CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2) -CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2) -CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2) -CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2) -CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2) -CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2) -CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2) -CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2) -CPUIF_MAP(ICC_SRE, ICC_SRE_EL1) - -CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2) -CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2) -CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2) -CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2) -CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2) -CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2) -CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2) -CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2) -CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2) -CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2) -CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2) -CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2) -CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2) -CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2) -CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2) -CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2) - #define read_gicreg(r) read_##r() #define write_gicreg(v, r) write_##r(v) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h deleted file mode 100644 index 9c04bd810d07..000000000000 --- a/arch/arm/include/asm/kvm_arm.h +++ /dev/null @@ -1,239 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_ARM_H__ -#define __ARM_KVM_ARM_H__ - -#include <linux/const.h> -#include <linux/types.h> - -/* Hyp Configuration Register (HCR) bits */ -#define HCR_TGE (1 << 27) -#define HCR_TVM (1 << 26) -#define HCR_TTLB (1 << 25) -#define HCR_TPU (1 << 24) -#define HCR_TPC (1 << 23) -#define HCR_TSW (1 << 22) -#define HCR_TAC (1 << 21) -#define HCR_TIDCP (1 << 20) -#define HCR_TSC (1 << 19) -#define HCR_TID3 (1 << 18) -#define HCR_TID2 (1 << 17) -#define HCR_TID1 (1 << 16) -#define HCR_TID0 (1 << 15) -#define HCR_TWE (1 << 14) -#define HCR_TWI (1 << 13) -#define HCR_DC (1 << 12) -#define HCR_BSU (3 << 10) -#define HCR_BSU_IS (1 << 10) -#define HCR_FB (1 << 9) -#define HCR_VA (1 << 8) -#define HCR_VI (1 << 7) -#define HCR_VF (1 << 6) -#define HCR_AMO (1 << 5) -#define HCR_IMO (1 << 4) -#define HCR_FMO (1 << 3) -#define HCR_PTW (1 << 2) -#define HCR_SWIO (1 << 1) -#define HCR_VM 1 - -/* - * The bits we set in HCR: - * TAC: Trap ACTLR - * TSC: Trap SMC - * TVM: Trap VM ops (until MMU and caches are on) - * TSW: Trap cache operations by set/way - * TWI: Trap WFI - * TWE: Trap WFE - * TIDCP: Trap L2CTLR/L2ECTLR - * BSU_IS: Upgrade barriers to the inner shareable domain - * FB: Force broadcast of all maintainance operations - * AMO: Override CPSR.A and enable signaling with VA - * IMO: Override CPSR.I and enable signaling with VI - * FMO: Override CPSR.F and enable signaling with VF - * SWIO: Turn set/way invalidates into set/way clean+invalidate - */ -#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ - HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ - HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP) - -/* System Control Register (SCTLR) bits */ -#define SCTLR_TE (1 << 30) -#define SCTLR_EE (1 << 25) -#define SCTLR_V (1 << 13) - -/* Hyp System Control Register (HSCTLR) bits */ -#define HSCTLR_TE (1 << 30) -#define HSCTLR_EE (1 << 25) -#define HSCTLR_FI (1 << 21) -#define HSCTLR_WXN (1 << 19) -#define HSCTLR_I (1 << 12) -#define HSCTLR_C (1 << 2) -#define HSCTLR_A (1 << 1) -#define HSCTLR_M 1 -#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ - HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) - -/* TTBCR and HTCR Registers bits */ -#define TTBCR_EAE (1 << 31) -#define TTBCR_IMP (1 << 30) -#define TTBCR_SH1 (3 << 28) -#define TTBCR_ORGN1 (3 << 26) -#define TTBCR_IRGN1 (3 << 24) -#define TTBCR_EPD1 (1 << 23) -#define TTBCR_A1 (1 << 22) -#define TTBCR_T1SZ (7 << 16) -#define TTBCR_SH0 (3 << 12) -#define TTBCR_ORGN0 (3 << 10) -#define TTBCR_IRGN0 (3 << 8) -#define TTBCR_EPD0 (1 << 7) -#define TTBCR_T0SZ (7 << 0) -#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) - -/* Hyp System Trap Register */ -#define HSTR_T(x) (1 << x) -#define HSTR_TTEE (1 << 16) -#define HSTR_TJDBX (1 << 17) - -/* Hyp Coprocessor Trap Register */ -#define HCPTR_TCP(x) (1 << x) -#define HCPTR_TCP_MASK (0x3fff) -#define HCPTR_TASE (1 << 15) -#define HCPTR_TTA (1 << 20) -#define HCPTR_TCPAC (1 << 31) - -/* Hyp Debug Configuration Register bits */ -#define HDCR_TDRA (1 << 11) -#define HDCR_TDOSA (1 << 10) -#define HDCR_TDA (1 << 9) -#define HDCR_TDE (1 << 8) -#define HDCR_HPME (1 << 7) -#define HDCR_TPM (1 << 6) -#define HDCR_TPMCR (1 << 5) -#define HDCR_HPMN_MASK (0x1F) - -/* - * The architecture supports 40-bit IPA as input to the 2nd stage translations - * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address - * space. - */ -#define KVM_PHYS_SHIFT (40) - -#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) - -/* Virtualization Translation Control Register (VTCR) bits */ -#define VTCR_SH0 (3 << 12) -#define VTCR_ORGN0 (3 << 10) -#define VTCR_IRGN0 (3 << 8) -#define VTCR_SL0 (3 << 6) -#define VTCR_S (1 << 4) -#define VTCR_T0SZ (0xf) -#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ - VTCR_S | VTCR_T0SZ) -#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) -#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */ -#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */ -#define KVM_VTCR_SL0 VTCR_SL_L1 -/* stage-2 input address range defined as 2^(32-T0SZ) */ -#define KVM_T0SZ (32 - KVM_PHYS_SHIFT) -#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ) -#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S) - -/* Virtualization Translation Table Base Register (VTTBR) bits */ -#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */ -#define VTTBR_X (14 - KVM_T0SZ) -#else -#define VTTBR_X (5 - KVM_T0SZ) -#endif -#define VTTBR_CNP_BIT _AC(1, UL) -#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X) -#define VTTBR_VMID_SHIFT _AC(48, ULL) -#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) - -/* Hyp Syndrome Register (HSR) bits */ -#define HSR_EC_SHIFT (26) -#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT) -#define HSR_IL (_AC(1, UL) << 25) -#define HSR_ISS (HSR_IL - 1) -#define HSR_ISV_SHIFT (24) -#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT) -#define HSR_SRT_SHIFT (16) -#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) -#define HSR_CM (1 << 8) -#define HSR_FSC (0x3f) -#define HSR_FSC_TYPE (0x3c) -#define HSR_SSE (1 << 21) -#define HSR_WNR (1 << 6) -#define HSR_CV_SHIFT (24) -#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT) -#define HSR_COND_SHIFT (20) -#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT) - -#define FSC_FAULT (0x04) -#define FSC_ACCESS (0x08) -#define FSC_PERM (0x0c) -#define FSC_SEA (0x10) -#define FSC_SEA_TTW0 (0x14) -#define FSC_SEA_TTW1 (0x15) -#define FSC_SEA_TTW2 (0x16) -#define FSC_SEA_TTW3 (0x17) -#define FSC_SECC (0x18) -#define FSC_SECC_TTW0 (0x1c) -#define FSC_SECC_TTW1 (0x1d) -#define FSC_SECC_TTW2 (0x1e) -#define FSC_SECC_TTW3 (0x1f) - -/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ -#define HPFAR_MASK (~0xf) - -#define HSR_EC_UNKNOWN (0x00) -#define HSR_EC_WFI (0x01) -#define HSR_EC_CP15_32 (0x03) -#define HSR_EC_CP15_64 (0x04) -#define HSR_EC_CP14_MR (0x05) -#define HSR_EC_CP14_LS (0x06) -#define HSR_EC_CP_0_13 (0x07) -#define HSR_EC_CP10_ID (0x08) -#define HSR_EC_JAZELLE (0x09) -#define HSR_EC_BXJ (0x0A) -#define HSR_EC_CP14_64 (0x0C) -#define HSR_EC_SVC_HYP (0x11) -#define HSR_EC_HVC (0x12) -#define HSR_EC_SMC (0x13) -#define HSR_EC_IABT (0x20) -#define HSR_EC_IABT_HYP (0x21) -#define HSR_EC_DABT (0x24) -#define HSR_EC_DABT_HYP (0x25) -#define HSR_EC_MAX (0x3f) - -#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) - -#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1) - -#define HSR_DABT_S1PTW (_AC(1, UL) << 7) -#define HSR_DABT_CM (_AC(1, UL) << 8) - -#define kvm_arm_exception_type \ - {0, "RESET" }, \ - {1, "UNDEFINED" }, \ - {2, "SOFTWARE" }, \ - {3, "PREF_ABORT" }, \ - {4, "DATA_ABORT" }, \ - {5, "IRQ" }, \ - {6, "FIQ" }, \ - {7, "HVC" } - -#define HSRECN(x) { HSR_EC_##x, #x } - -#define kvm_arm_exception_class \ - HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \ - HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \ - HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \ - HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \ - HSRECN(DABT), HSRECN(DABT_HYP) - - -#endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h deleted file mode 100644 index f615830f9f57..000000000000 --- a/arch/arm/include/asm/kvm_asm.h +++ /dev/null @@ -1,77 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_ASM_H__ -#define __ARM_KVM_ASM_H__ - -#include <asm/virt.h> - -#define ARM_EXIT_WITH_ABORT_BIT 31 -#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT)) -#define ARM_EXCEPTION_IS_TRAP(x) \ - (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \ - ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \ - ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC) -#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT)) - -#define ARM_EXCEPTION_RESET 0 -#define ARM_EXCEPTION_UNDEFINED 1 -#define ARM_EXCEPTION_SOFTWARE 2 -#define ARM_EXCEPTION_PREF_ABORT 3 -#define ARM_EXCEPTION_DATA_ABORT 4 -#define ARM_EXCEPTION_IRQ 5 -#define ARM_EXCEPTION_FIQ 6 -#define ARM_EXCEPTION_HVC 7 -#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR -/* - * The rr_lo_hi macro swaps a pair of registers depending on - * current endianness. It is used in conjunction with ldrd and strd - * instructions that load/store a 64-bit value from/to memory to/from - * a pair of registers which are used with the mrrc and mcrr instructions. - * If used with the ldrd/strd instructions, the a1 parameter is the first - * source/destination register and the a2 parameter is the second - * source/destination register. Note that the ldrd/strd instructions - * already swap the bytes within the words correctly according to the - * endianness setting, but the order of the registers need to be effectively - * swapped when used with the mrrc/mcrr instructions. - */ -#ifdef CONFIG_CPU_ENDIAN_BE8 -#define rr_lo_hi(a1, a2) a2, a1 -#else -#define rr_lo_hi(a1, a2) a1, a2 -#endif - -#define kvm_ksym_ref(kva) (kva) - -#ifndef __ASSEMBLY__ -struct kvm; -struct kvm_vcpu; - -extern char __kvm_hyp_init[]; -extern char __kvm_hyp_init_end[]; - -extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); - -extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); - -/* no VHE on 32-bit :( */ -static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; } - -extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu); - -extern void __init_stage2_translation(void); - -extern u64 __vgic_v3_get_ich_vtr_el2(void); -extern u64 __vgic_v3_read_vmcr(void); -extern void __vgic_v3_write_vmcr(u32 vmcr); -extern void __vgic_v3_init_lrs(void); - -#endif - -#endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h deleted file mode 100644 index a23826117dd6..000000000000 --- a/arch/arm/include/asm/kvm_coproc.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 Rusty Russell IBM Corporation - */ - -#ifndef __ARM_KVM_COPROC_H__ -#define __ARM_KVM_COPROC_H__ -#include <linux/kvm_host.h> - -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_coproc_target_table { - unsigned target; - const struct coproc_reg *table; - size_t num; -}; -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); - -unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); -void kvm_coproc_table_init(void); - -struct kvm_one_reg; -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); -#endif /* __ARM_KVM_COPROC_H__ */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h deleted file mode 100644 index 3944305e81df..000000000000 --- a/arch/arm/include/asm/kvm_emulate.h +++ /dev/null @@ -1,372 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_EMULATE_H__ -#define __ARM_KVM_EMULATE_H__ - -#include <linux/kvm_host.h> -#include <asm/kvm_asm.h> -#include <asm/kvm_arm.h> -#include <asm/cputype.h> - -/* arm64 compatibility macros */ -#define PSR_AA32_MODE_FIQ FIQ_MODE -#define PSR_AA32_MODE_SVC SVC_MODE -#define PSR_AA32_MODE_ABT ABT_MODE -#define PSR_AA32_MODE_UND UND_MODE -#define PSR_AA32_T_BIT PSR_T_BIT -#define PSR_AA32_F_BIT PSR_F_BIT -#define PSR_AA32_I_BIT PSR_I_BIT -#define PSR_AA32_A_BIT PSR_A_BIT -#define PSR_AA32_E_BIT PSR_E_BIT -#define PSR_AA32_IT_MASK PSR_IT_MASK -#define PSR_AA32_GE_MASK 0x000f0000 -#define PSR_AA32_DIT_BIT 0x00200000 -#define PSR_AA32_PAN_BIT 0x00400000 -#define PSR_AA32_SSBS_BIT 0x00800000 -#define PSR_AA32_Q_BIT PSR_Q_BIT -#define PSR_AA32_V_BIT PSR_V_BIT -#define PSR_AA32_C_BIT PSR_C_BIT -#define PSR_AA32_Z_BIT PSR_Z_BIT -#define PSR_AA32_N_BIT PSR_N_BIT - -unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); - -static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) -{ - return vcpu_reg(vcpu, reg_num); -} - -unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu); - -static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu) -{ - return *__vcpu_spsr(vcpu); -} - -static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) -{ - *__vcpu_spsr(vcpu) = v; -} - -static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) -{ - return spsr; -} - -static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, - u8 reg_num) -{ - return *vcpu_reg(vcpu, reg_num); -} - -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, - unsigned long val) -{ - *vcpu_reg(vcpu, reg_num) = val; -} - -bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); -void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); -void kvm_inject_undef32(struct kvm_vcpu *vcpu); -void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); -void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); -void kvm_inject_vabt(struct kvm_vcpu *vcpu); - -static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) -{ - kvm_inject_undef32(vcpu); -} - -static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - kvm_inject_dabt32(vcpu, addr); -} - -static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - kvm_inject_pabt32(vcpu, addr); -} - -static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) -{ - return kvm_condition_valid32(vcpu); -} - -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - kvm_skip_instr32(vcpu, is_wide_instr); -} - -static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) -{ - vcpu->arch.hcr = HCR_GUEST_MASK; -} - -static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu) -{ - return (unsigned long *)&vcpu->arch.hcr; -} - -static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) -{ - vcpu->arch.hcr &= ~HCR_TWE; -} - -static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) -{ - vcpu->arch.hcr |= HCR_TWE; -} - -static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) -{ - return true; -} - -static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) -{ - return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; -} - -static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) -{ - return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; -} - -static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) -{ - *vcpu_cpsr(vcpu) |= PSR_T_BIT; -} - -static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; - return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); -} - -static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; - return cpsr_mode > USR_MODE; -} - -static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hsr; -} - -static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) -{ - u32 hsr = kvm_vcpu_get_hsr(vcpu); - - if (hsr & HSR_CV) - return (hsr & HSR_COND) >> HSR_COND_SHIFT; - - return -1; -} - -static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hxfar; -} - -static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) -{ - return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; -} - -static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; -} - -static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC); -} - -static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; -} - -static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; -} - -static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) -{ - return false; -} - -static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) -{ - return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; -} - -static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; -} - -static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) -{ - return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); -} - -/* Get Access Size from a data abort */ -static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) -{ - switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { - case 0: - return 1; - case 1: - return 2; - case 2: - return 4; - default: - kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); - return 4; - } -} - -/* This one is not specific to Data Abort */ -static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_IL; -} - -static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; -} - -static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; -} - -static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; -} - -static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; -} - -static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) -{ - switch (kvm_vcpu_trap_get_fault(vcpu)) { - case FSC_SEA: - case FSC_SEA_TTW0: - case FSC_SEA_TTW1: - case FSC_SEA_TTW2: - case FSC_SEA_TTW3: - case FSC_SECC: - case FSC_SECC_TTW0: - case FSC_SECC_TTW1: - case FSC_SECC_TTW2: - case FSC_SECC_TTW3: - return true; - default: - return false; - } -} - -static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) -{ - if (kvm_vcpu_trap_is_iabt(vcpu)) - return false; - - return kvm_vcpu_dabt_iswrite(vcpu); -} - -static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; -} - -static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) -{ - return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; -} - -static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) -{ - return false; -} - -static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, - bool flag) -{ -} - -static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) -{ - *vcpu_cpsr(vcpu) |= PSR_E_BIT; -} - -static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) -{ - return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); -} - -static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, - unsigned long data, - unsigned int len) -{ - if (kvm_vcpu_is_be(vcpu)) { - switch (len) { - case 1: - return data & 0xff; - case 2: - return be16_to_cpu(data & 0xffff); - default: - return be32_to_cpu(data); - } - } else { - switch (len) { - case 1: - return data & 0xff; - case 2: - return le16_to_cpu(data & 0xffff); - default: - return le32_to_cpu(data); - } - } -} - -static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, - unsigned long data, - unsigned int len) -{ - if (kvm_vcpu_is_be(vcpu)) { - switch (len) { - case 1: - return data & 0xff; - case 2: - return cpu_to_be16(data & 0xffff); - default: - return cpu_to_be32(data); - } - } else { - switch (len) { - case 1: - return data & 0xff; - case 2: - return cpu_to_le16(data & 0xffff); - default: - return cpu_to_le32(data); - } - } -} - -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} - -#endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h deleted file mode 100644 index c3314b286a61..000000000000 --- a/arch/arm/include/asm/kvm_host.h +++ /dev/null @@ -1,459 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_HOST_H__ -#define __ARM_KVM_HOST_H__ - -#include <linux/arm-smccc.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/kvm_types.h> -#include <asm/cputype.h> -#include <asm/kvm.h> -#include <asm/kvm_asm.h> -#include <asm/fpstate.h> -#include <kvm/arm_arch_timer.h> - -#define __KVM_HAVE_ARCH_INTC_INITIALIZED - -#define KVM_USER_MEM_SLOTS 32 -#define KVM_HAVE_ONE_REG -#define KVM_HALT_POLL_NS_DEFAULT 500000 - -#define KVM_VCPU_MAX_FEATURES 2 - -#include <kvm/arm_vgic.h> - - -#ifdef CONFIG_ARM_GIC_V3 -#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#else -#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS -#endif - -#define KVM_REQ_SLEEP \ - KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) -#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) -#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) - -DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); - -static inline int kvm_arm_init_sve(void) { return 0; } - -u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); -int __attribute_const__ kvm_target_cpu(void); -int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_vmid { - /* The VMID generation used for the virt. memory system */ - u64 vmid_gen; - u32 vmid; -}; - -struct kvm_arch { - /* The last vcpu id that ran on each physical CPU */ - int __percpu *last_vcpu_ran; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* The VMID generation used for the virt. memory system */ - struct kvm_vmid vmid; - - /* Stage-2 page table */ - pgd_t *pgd; - phys_addr_t pgd_phys; - - /* Interrupt controller */ - struct vgic_dist vgic; - int max_vcpus; - - /* Mandated version of PSCI */ - u32 psci_version; - - /* - * If we encounter a data abort without valid instruction syndrome - * information, report this to user space. User space can (and - * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is - * supported. - */ - bool return_nisv_io_abort_to_user; -}; - -#define KVM_NR_MEM_OBJS 40 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - -struct kvm_vcpu_fault_info { - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ -}; - -/* - * 0 is reserved as an invalid value. - * Order should be kept in sync with the save/restore code. - */ -enum vcpu_sysreg { - __INVALID_SYSREG__, - c0_MPIDR, /* MultiProcessor ID Register */ - c0_CSSELR, /* Cache Size Selection Register */ - c1_SCTLR, /* System Control Register */ - c1_ACTLR, /* Auxiliary Control Register */ - c1_CPACR, /* Coprocessor Access Control */ - c2_TTBR0, /* Translation Table Base Register 0 */ - c2_TTBR0_high, /* TTBR0 top 32 bits */ - c2_TTBR1, /* Translation Table Base Register 1 */ - c2_TTBR1_high, /* TTBR1 top 32 bits */ - c2_TTBCR, /* Translation Table Base Control R. */ - c3_DACR, /* Domain Access Control Register */ - c5_DFSR, /* Data Fault Status Register */ - c5_IFSR, /* Instruction Fault Status Register */ - c5_ADFSR, /* Auxilary Data Fault Status R */ - c5_AIFSR, /* Auxilary Instrunction Fault Status R */ - c6_DFAR, /* Data Fault Address Register */ - c6_IFAR, /* Instruction Fault Address Register */ - c7_PAR, /* Physical Address Register */ - c7_PAR_high, /* PAR top 32 bits */ - c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ - c10_PRRR, /* Primary Region Remap Register */ - c10_NMRR, /* Normal Memory Remap Register */ - c12_VBAR, /* Vector Base Address Register */ - c13_CID, /* Context ID Register */ - c13_TID_URW, /* Thread ID, User R/W */ - c13_TID_URO, /* Thread ID, User R/O */ - c13_TID_PRIV, /* Thread ID, Privileged */ - c14_CNTKCTL, /* Timer Control Register (PL1) */ - c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ - c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ - NR_CP15_REGS /* Number of regs (incl. invalid) */ -}; - -struct kvm_cpu_context { - struct kvm_regs gp_regs; - struct vfp_hard_struct vfp; - u32 cp15[NR_CP15_REGS]; -}; - -struct kvm_host_data { - struct kvm_cpu_context host_ctxt; -}; - -typedef struct kvm_host_data kvm_host_data_t; - -static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) -{ - /* The host's MPIDR is immutable, so let's set it up at boot time */ - cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr(); -} - -struct vcpu_reset_state { - unsigned long pc; - unsigned long r0; - bool be; - bool reset; -}; - -struct kvm_vcpu_arch { - struct kvm_cpu_context ctxt; - - int target; /* Processor target */ - DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - - /* The CPU type we expose to the VM */ - u32 midr; - - /* HYP trapping configuration */ - u32 hcr; - - /* Exception Information */ - struct kvm_vcpu_fault_info fault; - - /* Host FP context */ - struct kvm_cpu_context *host_cpu_context; - - /* VGIC state */ - struct vgic_cpu vgic_cpu; - struct arch_timer_cpu timer_cpu; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* vcpu power-off state */ - bool power_off; - - /* Don't run the guest (internal implementation need) */ - bool pause; - - /* Cache some mmu pages needed inside spinlock regions */ - struct kvm_mmu_memory_cache mmu_page_cache; - - struct vcpu_reset_state reset_state; - - /* Detect first run of a vcpu */ - bool has_run_once; -}; - -struct kvm_vm_stat { - ulong remote_tlb_flush; -}; - -struct kvm_vcpu_stat { - u64 halt_successful_poll; - u64 halt_attempted_poll; - u64 halt_poll_invalid; - u64 halt_wakeup; - u64 hvc_exit_stat; - u64 wfe_exit_stat; - u64 wfi_exit_stat; - u64 mmio_exit_user; - u64 mmio_exit_kernel; - u64 exits; -}; - -#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] - -int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); - -unsigned long __kvm_call_hyp(void *hypfn, ...); - -/* - * The has_vhe() part doesn't get emitted, but is used for type-checking. - */ -#define kvm_call_hyp(f, ...) \ - do { \ - if (has_vhe()) { \ - f(__VA_ARGS__); \ - } else { \ - __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ - } \ - } while(0) - -#define kvm_call_hyp_ret(f, ...) \ - ({ \ - typeof(f(__VA_ARGS__)) ret; \ - \ - if (has_vhe()) { \ - ret = f(__VA_ARGS__); \ - } else { \ - ret = __kvm_call_hyp(kvm_ksym_ref(f), \ - ##__VA_ARGS__); \ - } \ - \ - ret; \ - }) - -void force_vm_exit(const cpumask_t *mask); -int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, - struct kvm_vcpu_events *events); - -int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, - struct kvm_vcpu_events *events); - -#define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); - -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); - -void kvm_arm_halt_guest(struct kvm *kvm); -void kvm_arm_resume_guest(struct kvm *kvm); - -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); - -int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index); - -static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) {} - -/* MMIO helpers */ -void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); -unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); - -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa); - -static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, - unsigned long hyp_stack_ptr, - unsigned long vector_ptr) -{ - /* - * Call initialization code, and switch to the full blown HYP - * code. The init code doesn't need to preserve these - * registers as r0-r3 are already callee saved according to - * the AAPCS. - * Note that we slightly misuse the prototype by casting the - * stack pointer to a void *. - - * The PGDs are always passed as the third argument, in order - * to be passed into r2-r3 to the init code (yes, this is - * compliant with the PCS!). - */ - - __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); -} - -static inline void __cpu_init_stage2(void) -{ - kvm_call_hyp(__init_stage2_translation); -} - -static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) -{ - return 0; -} - -int kvm_perf_init(void); -int kvm_perf_teardown(void); - -static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) -{ - return SMCCC_RET_NOT_SUPPORTED; -} - -static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) -{ - return GPA_INVALID; -} - -static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu) -{ -} - -static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) -{ -} - -static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) -{ - return false; -} - -void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); - -struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); - -static inline bool kvm_arch_requires_vhe(void) { return false; } -static inline void kvm_arch_hardware_unsetup(void) {} -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} -static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {} - -static inline void kvm_arm_init_debug(void) {} -static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} -static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} -static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} - -int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr); -int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr); -int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr); - -/* - * VFP/NEON switching is all done by the hyp switch code, so no need to - * coordinate with host context handling for this state: - */ -static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} - -static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} -static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} - -static inline void kvm_arm_vhe_guest_enter(void) {} -static inline void kvm_arm_vhe_guest_exit(void) {} - -#define KVM_BP_HARDEN_UNKNOWN -1 -#define KVM_BP_HARDEN_WA_NEEDED 0 -#define KVM_BP_HARDEN_NOT_REQUIRED 1 - -static inline int kvm_arm_harden_branch_predictor(void) -{ - switch(read_cpuid_part()) { -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR - case ARM_CPU_PART_BRAHMA_B15: - case ARM_CPU_PART_CORTEX_A12: - case ARM_CPU_PART_CORTEX_A15: - case ARM_CPU_PART_CORTEX_A17: - return KVM_BP_HARDEN_WA_NEEDED; -#endif - case ARM_CPU_PART_CORTEX_A7: - return KVM_BP_HARDEN_NOT_REQUIRED; - default: - return KVM_BP_HARDEN_UNKNOWN; - } -} - -#define KVM_SSBD_UNKNOWN -1 -#define KVM_SSBD_FORCE_DISABLE 0 -#define KVM_SSBD_KERNEL 1 -#define KVM_SSBD_FORCE_ENABLE 2 -#define KVM_SSBD_MITIGATED 3 - -static inline int kvm_arm_have_ssbd(void) -{ - /* No way to detect it yet, pretend it is not there. */ - return KVM_SSBD_UNKNOWN; -} - -static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} -static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} - -#define __KVM_HAVE_ARCH_VM_ALLOC -struct kvm *kvm_arch_alloc_vm(void); -void kvm_arch_free_vm(struct kvm *kvm); - -static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) -{ - /* - * On 32bit ARM, VMs get a static 40bit IPA stage2 setup, - * so any non-zero value used as type is illegal. - */ - if (type) - return -EINVAL; - return 0; -} - -static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) -{ - return -EINVAL; -} - -static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) -{ - return true; -} - -#endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h deleted file mode 100644 index 3c1b55ecc578..000000000000 --- a/arch/arm/include/asm/kvm_hyp.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier <marc.zyngier@arm.com> - */ - -#ifndef __ARM_KVM_HYP_H__ -#define __ARM_KVM_HYP_H__ - -#include <linux/compiler.h> -#include <linux/kvm_host.h> -#include <asm/cp15.h> -#include <asm/kvm_arm.h> -#include <asm/vfp.h> - -#define __hyp_text __section(.hyp.text) notrace - -#define __ACCESS_VFP(CRn) \ - "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 - -#define write_special(v, r) \ - asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) -#define read_special(r) ({ \ - u32 __val; \ - asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ - __val; \ -}) - -#define TTBR0 __ACCESS_CP15_64(0, c2) -#define TTBR1 __ACCESS_CP15_64(1, c2) -#define VTTBR __ACCESS_CP15_64(6, c2) -#define PAR __ACCESS_CP15_64(0, c7) -#define CNTP_CVAL __ACCESS_CP15_64(2, c14) -#define CNTV_CVAL __ACCESS_CP15_64(3, c14) -#define CNTVOFF __ACCESS_CP15_64(4, c14) - -#define MIDR __ACCESS_CP15(c0, 0, c0, 0) -#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) -#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) -#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) -#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) -#define CPACR __ACCESS_CP15(c1, 0, c0, 2) -#define HCR __ACCESS_CP15(c1, 4, c1, 0) -#define HDCR __ACCESS_CP15(c1, 4, c1, 1) -#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) -#define HSTR __ACCESS_CP15(c1, 4, c1, 3) -#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) -#define HTCR __ACCESS_CP15(c2, 4, c0, 2) -#define VTCR __ACCESS_CP15(c2, 4, c1, 2) -#define DACR __ACCESS_CP15(c3, 0, c0, 0) -#define DFSR __ACCESS_CP15(c5, 0, c0, 0) -#define IFSR __ACCESS_CP15(c5, 0, c0, 1) -#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) -#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) -#define HSR __ACCESS_CP15(c5, 4, c2, 0) -#define DFAR __ACCESS_CP15(c6, 0, c0, 0) -#define IFAR __ACCESS_CP15(c6, 0, c0, 2) -#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) -#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) -#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) -#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) -#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6) -#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1) -#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) -#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) -#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) -#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) -#define PRRR __ACCESS_CP15(c10, 0, c2, 0) -#define NMRR __ACCESS_CP15(c10, 0, c2, 1) -#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) -#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) -#define VBAR __ACCESS_CP15(c12, 0, c0, 0) -#define CID __ACCESS_CP15(c13, 0, c0, 1) -#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) -#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) -#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) -#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) -#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) -#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1) -#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) -#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) - -#define VFP_FPEXC __ACCESS_VFP(FPEXC) - -/* AArch64 compatibility macros, only for the timer so far */ -#define read_sysreg_el0(r) read_sysreg(r##_EL0) -#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0) - -#define SYS_CNTP_CTL_EL0 CNTP_CTL -#define SYS_CNTP_CVAL_EL0 CNTP_CVAL -#define SYS_CNTV_CTL_EL0 CNTV_CTL -#define SYS_CNTV_CVAL_EL0 CNTV_CVAL - -#define cntvoff_el2 CNTVOFF -#define cnthctl_el2 CNTHCTL - -void __timer_enable_traps(struct kvm_vcpu *vcpu); -void __timer_disable_traps(struct kvm_vcpu *vcpu); - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); - -void __vgic_v3_save_state(struct kvm_vcpu *vcpu); -void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); -void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu); -void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu); -void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu); -void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu); - -asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp); -asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp); -static inline bool __vfp_enabled(void) -{ - return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); -} - -void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); -void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); - -asmlinkage int __guest_enter(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *host); -asmlinkage int __hyp_do_panic(const char *, int, u32); - -#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h deleted file mode 100644 index 0d84d50bf9ba..000000000000 --- a/arch/arm/include/asm/kvm_mmu.h +++ /dev/null @@ -1,435 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_MMU_H__ -#define __ARM_KVM_MMU_H__ - -#include <asm/memory.h> -#include <asm/page.h> - -/* - * We directly use the kernel VA for the HYP, as we can directly share - * the mapping (HTTBR "covers" TTBR1). - */ -#define kern_hyp_va(kva) (kva) - -/* Contrary to arm64, there is no need to generate a PC-relative address */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr = &(s); \ - addr; \ - }) - -#ifndef __ASSEMBLY__ - -#include <linux/highmem.h> -#include <asm/cacheflush.h> -#include <asm/cputype.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_hyp.h> -#include <asm/pgalloc.h> -#include <asm/stage2_pgtable.h> - -/* Ensure compatibility with arm64 */ -#define VA_BITS 32 - -#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT -#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm)) -#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL) -#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK - -#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) - -int create_hyp_mappings(void *from, void *to, pgprot_t prot); -int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, - void __iomem **kaddr, - void __iomem **haddr); -int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, - void **haddr); -void free_hyp_pgds(void); - -void stage2_unmap_vm(struct kvm *kvm); -int kvm_alloc_stage2_pgd(struct kvm *kvm); -void kvm_free_stage2_pgd(struct kvm *kvm); -int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size, bool writable); - -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); - -void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); - -phys_addr_t kvm_mmu_get_httbr(void); -phys_addr_t kvm_get_idmap_vector(void); -int kvm_mmu_init(void); -void kvm_clear_hyp_idmap(void); - -#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE) -#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE) -#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; }) - -#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) -#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) -#define kvm_pfn_pud(pfn, prot) (__pud(0)) - -#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; }) - - -#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) -/* No support for pud hugepages */ -#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; }) - -/* - * The following kvm_*pud*() functions are provided strictly to allow - * sharing code with arm64. They should never be called in practice. - */ -static inline void kvm_set_s2pud_readonly(pud_t *pud) -{ - WARN_ON(1); -} - -static inline bool kvm_s2pud_readonly(pud_t *pud) -{ - WARN_ON(1); - return false; -} - -static inline void kvm_set_pud(pud_t *pud, pud_t new_pud) -{ - WARN_ON(1); -} - -static inline pud_t kvm_s2pud_mkwrite(pud_t pud) -{ - WARN_ON(1); - return pud; -} - -static inline pud_t kvm_s2pud_mkexec(pud_t pud) -{ - WARN_ON(1); - return pud; -} - -static inline bool kvm_s2pud_exec(pud_t *pud) -{ - WARN_ON(1); - return false; -} - -static inline pud_t kvm_s2pud_mkyoung(pud_t pud) -{ - BUG(); - return pud; -} - -static inline bool kvm_s2pud_young(pud_t pud) -{ - WARN_ON(1); - return false; -} - -static inline pte_t kvm_s2pte_mkwrite(pte_t pte) -{ - pte_val(pte) |= L_PTE_S2_RDWR; - return pte; -} - -static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) -{ - pmd_val(pmd) |= L_PMD_S2_RDWR; - return pmd; -} - -static inline pte_t kvm_s2pte_mkexec(pte_t pte) -{ - pte_val(pte) &= ~L_PTE_XN; - return pte; -} - -static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) -{ - pmd_val(pmd) &= ~PMD_SECT_XN; - return pmd; -} - -static inline void kvm_set_s2pte_readonly(pte_t *pte) -{ - pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; -} - -static inline bool kvm_s2pte_readonly(pte_t *pte) -{ - return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY; -} - -static inline bool kvm_s2pte_exec(pte_t *pte) -{ - return !(pte_val(*pte) & L_PTE_XN); -} - -static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) -{ - pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; -} - -static inline bool kvm_s2pmd_readonly(pmd_t *pmd) -{ - return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; -} - -static inline bool kvm_s2pmd_exec(pmd_t *pmd) -{ - return !(pmd_val(*pmd) & PMD_SECT_XN); -} - -static inline bool kvm_page_empty(void *ptr) -{ - struct page *ptr_page = virt_to_page(ptr); - return page_count(ptr_page) == 1; -} - -#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) -#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) -#define kvm_pud_table_empty(kvm, pudp) false - -#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) -#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) -#define hyp_pud_table_empty(pudp) false - -struct kvm; - -#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) - -static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) -{ - return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; -} - -static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) -{ - /* - * Clean the dcache to the Point of Coherency. - * - * We need to do this through a kernel mapping (using the - * user-space mapping has proved to be the wrong - * solution). For that, we need to kmap one page at a time, - * and iterate over the range. - */ - - VM_BUG_ON(size & ~PAGE_MASK); - - while (size) { - void *va = kmap_atomic_pfn(pfn); - - kvm_flush_dcache_to_poc(va, PAGE_SIZE); - - size -= PAGE_SIZE; - pfn++; - - kunmap_atomic(va); - } -} - -static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, - unsigned long size) -{ - u32 iclsz; - - /* - * If we are going to insert an instruction page and the icache is - * either VIPT or PIPT, there is a potential problem where the host - * (or another VM) may have used the same page as this guest, and we - * read incorrect data from the icache. If we're using a PIPT cache, - * we can invalidate just that page, but if we are using a VIPT cache - * we need to invalidate the entire icache - damn shame - as written - * in the ARM ARM (DDI 0406C.b - Page B3-1393). - * - * VIVT caches are tagged using both the ASID and the VMID and doesn't - * need any kind of flushing (DDI 0406C.b - Page B3-1392). - */ - - VM_BUG_ON(size & ~PAGE_MASK); - - if (icache_is_vivt_asid_tagged()) - return; - - if (!icache_is_pipt()) { - /* any kind of VIPT cache */ - __flush_icache_all(); - return; - } - - /* - * CTR IminLine contains Log2 of the number of words in the - * cache line, so we can get the number of words as - * 2 << (IminLine - 1). To get the number of bytes, we - * multiply by 4 (the number of bytes in a 32-bit word), and - * get 4 << (IminLine). - */ - iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf); - - while (size) { - void *va = kmap_atomic_pfn(pfn); - void *end = va + PAGE_SIZE; - void *addr = va; - - do { - write_sysreg(addr, ICIMVAU); - addr += iclsz; - } while (addr < end); - - dsb(ishst); - isb(); - - size -= PAGE_SIZE; - pfn++; - - kunmap_atomic(va); - } - - /* Check if we need to invalidate the BTB */ - if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) { - write_sysreg(0, BPIALLIS); - dsb(ishst); - isb(); - } -} - -static inline void __kvm_flush_dcache_pte(pte_t pte) -{ - void *va = kmap_atomic(pte_page(pte)); - - kvm_flush_dcache_to_poc(va, PAGE_SIZE); - - kunmap_atomic(va); -} - -static inline void __kvm_flush_dcache_pmd(pmd_t pmd) -{ - unsigned long size = PMD_SIZE; - kvm_pfn_t pfn = pmd_pfn(pmd); - - while (size) { - void *va = kmap_atomic_pfn(pfn); - - kvm_flush_dcache_to_poc(va, PAGE_SIZE); - - pfn++; - size -= PAGE_SIZE; - - kunmap_atomic(va); - } -} - -static inline void __kvm_flush_dcache_pud(pud_t pud) -{ -} - -#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) - -void kvm_set_way_flush(struct kvm_vcpu *vcpu); -void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); - -static inline bool __kvm_cpu_uses_extended_idmap(void) -{ - return false; -} - -static inline unsigned long __kvm_idmap_ptrs_per_pgd(void) -{ - return PTRS_PER_PGD; -} - -static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, - pgd_t *hyp_pgd, - pgd_t *merged_hyp_pgd, - unsigned long hyp_idmap_start) { } - -static inline unsigned int kvm_get_vmid_bits(void) -{ - return 8; -} - -/* - * We are not in the kvm->srcu critical section most of the time, so we take - * the SRCU read lock here. Since we copy the data from the user page, we - * can immediately drop the lock again. - */ -static inline int kvm_read_guest_lock(struct kvm *kvm, - gpa_t gpa, void *data, unsigned long len) -{ - int srcu_idx = srcu_read_lock(&kvm->srcu); - int ret = kvm_read_guest(kvm, gpa, data, len); - - srcu_read_unlock(&kvm->srcu, srcu_idx); - - return ret; -} - -static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, - const void *data, unsigned long len) -{ - int srcu_idx = srcu_read_lock(&kvm->srcu); - int ret = kvm_write_guest(kvm, gpa, data, len); - - srcu_read_unlock(&kvm->srcu, srcu_idx); - - return ret; -} - -static inline void *kvm_get_hyp_vector(void) -{ - switch(read_cpuid_part()) { -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR - case ARM_CPU_PART_CORTEX_A12: - case ARM_CPU_PART_CORTEX_A17: - { - extern char __kvm_hyp_vector_bp_inv[]; - return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); - } - - case ARM_CPU_PART_BRAHMA_B15: - case ARM_CPU_PART_CORTEX_A15: - { - extern char __kvm_hyp_vector_ic_inv[]; - return kvm_ksym_ref(__kvm_hyp_vector_ic_inv); - } -#endif - default: - { - extern char __kvm_hyp_vector[]; - return kvm_ksym_ref(__kvm_hyp_vector); - } - } -} - -static inline int kvm_map_vectors(void) -{ - return 0; -} - -static inline int hyp_map_aux_data(void) -{ - return 0; -} - -#define kvm_phys_to_vttbr(addr) (addr) - -static inline void kvm_set_ipa_limit(void) {} - -static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) -{ - struct kvm_vmid *vmid = &kvm->arch.vmid; - u64 vmid_field, baddr; - - baddr = kvm->arch.pgd_phys; - vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; - return kvm_phys_to_vttbr(baddr) | vmid_field; -} - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_ras.h b/arch/arm/include/asm/kvm_ras.h deleted file mode 100644 index e9577292dfe4..000000000000 --- a/arch/arm/include/asm/kvm_ras.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2018 - Arm Ltd */ - -#ifndef __ARM_KVM_RAS_H__ -#define __ARM_KVM_RAS_H__ - -#include <linux/types.h> - -static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) -{ - return -1; -} - -#endif /* __ARM_KVM_RAS_H__ */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index ad55ab068dbf..36805f94939e 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -104,26 +104,6 @@ */ #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ -/* - * 2nd stage PTE definitions for LPAE. - */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ -#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ -#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) - -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ - -#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */ -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ - -/* - * Hyp-mode PL2 PTE definitions for LPAE. - */ -#define L_PTE_HYP L_PTE_USER - #ifndef __ASSEMBLY__ #define pud_none(pud) (!pud_val(pud)) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index eabcb48a7840..0483cf413315 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -80,9 +80,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); extern pgprot_t pgprot_user; extern pgprot_t pgprot_kernel; -extern pgprot_t pgprot_hyp_device; -extern pgprot_t pgprot_s2; -extern pgprot_t pgprot_s2_device; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) @@ -95,12 +92,6 @@ extern pgprot_t pgprot_s2_device; #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN) -#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) -#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) -#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h index 4ceb4f757d4d..700b8bcdf9bd 100644 --- a/arch/arm/include/asm/sections.h +++ b/arch/arm/include/asm/sections.h @@ -10,8 +10,6 @@ extern char __idmap_text_start[]; extern char __idmap_text_end[]; extern char __entry_text_start[]; extern char __entry_text_end[]; -extern char __hyp_idmap_text_start[]; -extern char __hyp_idmap_text_end[]; static inline bool in_entry_text(unsigned long addr) { @@ -22,9 +20,7 @@ static inline bool in_entry_text(unsigned long addr) static inline bool in_idmap_text(unsigned long addr) { void *a = (void *)addr; - return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) || - memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end, - a, 1); + return memory_contains(__idmap_text_start, __idmap_text_end, a, 1); } #endif /* _ASM_ARM_SECTIONS_H */ diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h deleted file mode 100644 index aaceec7855ec..000000000000 --- a/arch/arm/include/asm/stage2_pgtable.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2016 - ARM Ltd - * - * stage2 page table helpers - */ - -#ifndef __ARM_S2_PGTABLE_H_ -#define __ARM_S2_PGTABLE_H_ - -/* - * kvm_mmu_cache_min_pages() is the number of pages required - * to install a stage-2 translation. We pre-allocate the entry - * level table at VM creation. Since we have a 3 level page-table, - * we need only two pages to add a new mapping. - */ -#define kvm_mmu_cache_min_pages(kvm) 2 - -#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) -#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) -#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) -#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(kvm, pud) do { } while (0) - -#define stage2_pud_none(kvm, pud) pud_none(pud) -#define stage2_pud_clear(kvm, pud) pud_clear(pud) -#define stage2_pud_present(kvm, pud) pud_present(pud) -#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd) - -#define stage2_pud_huge(kvm, pud) pud_huge(pud) - -/* Open coded p*d_addr_end that can deal with 64bit addresses */ -static inline phys_addr_t -stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) -{ - phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK; - - return (boundary - 1 < end - 1) ? boundary : end; -} - -#define stage2_pud_addr_end(kvm, addr, end) (end) - -static inline phys_addr_t -stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) -{ - phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK; - - return (boundary - 1 < end - 1) ? boundary : end; -} - -#define stage2_pgd_index(kvm, addr) pgd_index(addr) - -#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) -#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) -#define stage2_pud_table_empty(kvm, pudp) false - -static inline bool kvm_stage2_has_pud(struct kvm *kvm) -{ - return false; -} - -#define S2_PMD_MASK PMD_MASK -#define S2_PMD_SIZE PMD_SIZE -#define S2_PUD_MASK PUD_MASK -#define S2_PUD_SIZE PUD_SIZE - -static inline bool kvm_stage2_has_pmd(struct kvm *kvm) -{ - return true; -} - -#endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 17c26ccd126d..dd9697b2bde8 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -39,8 +39,6 @@ static inline void sync_boot_mode(void) sync_cache_r(&__boot_cpu_mode); } -void __hyp_set_vectors(unsigned long phys_vector_base); -void __hyp_reset_vectors(void); #else #define __boot_cpu_mode (SVC_MODE) #define sync_boot_mode() @@ -67,18 +65,6 @@ static inline bool is_kernel_in_hyp_mode(void) return false; } -static inline bool has_vhe(void) -{ - return false; -} - -/* The section containing the hypervisor idmap text */ -extern char __hyp_idmap_text_start[]; -extern char __hyp_idmap_text_end[]; - -/* The section containing the hypervisor text */ -extern char __hyp_text_start[]; -extern char __hyp_text_end[]; #endif #else @@ -87,9 +73,6 @@ extern char __hyp_text_end[]; #define HVC_SET_VECTORS 0 #define HVC_SOFT_RESTART 1 -#define HVC_RESET_VECTORS 2 - -#define HVC_STUB_HCALL_NR 3 #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h deleted file mode 100644 index 03cd7c19a683..000000000000 --- a/arch/arm/include/uapi/asm/kvm.h +++ /dev/null @@ -1,314 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_H__ -#define __ARM_KVM_H__ - -#include <linux/types.h> -#include <linux/psci.h> -#include <asm/ptrace.h> - -#define __KVM_HAVE_GUEST_DEBUG -#define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_READONLY_MEM -#define __KVM_HAVE_VCPU_EVENTS - -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 - -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - -/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] - -/* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] - -struct kvm_regs { - struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ - unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ - unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ -}; - -/* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_TARGET_CORTEX_A7 1 -#define KVM_ARM_NUM_TARGETS 2 - -/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ -#define KVM_ARM_DEVICE_TYPE_SHIFT 0 -#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) -#define KVM_ARM_DEVICE_ID_SHIFT 16 -#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) - -/* Supported device IDs */ -#define KVM_ARM_DEVICE_VGIC_V2 0 - -/* Supported VGIC address types */ -#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 -#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 - -#define KVM_VGIC_V2_DIST_SIZE 0x1000 -#define KVM_VGIC_V2_CPU_SIZE 0x2000 - -/* Supported VGICv3 address types */ -#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 -#define KVM_VGIC_ITS_ADDR_TYPE 4 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 - -#define KVM_VGIC_V3_DIST_SIZE SZ_64K -#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) -#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) - -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ -#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ - -struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -struct kvm_guest_debug_arch { -}; - -struct kvm_debug_exit_arch { -}; - -struct kvm_sync_regs { - /* Used with KVM_CAP_ARM_USER_IRQ */ - __u64 device_irq_level; -}; - -struct kvm_arch_memory_slot { -}; - -/* for KVM_GET/SET_VCPU_EVENTS */ -struct kvm_vcpu_events { - struct { - __u8 serror_pending; - __u8 serror_has_esr; - __u8 ext_dabt_pending; - /* Align it to 8 bytes */ - __u8 pad[5]; - __u64 serror_esr; - } exception; - __u32 reserved[12]; -}; - -/* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 -/* - * For KVM currently all guest registers are nonsecure, but we reserve a bit - * in the encoding to distinguish secure from nonsecure for AArch32 system - * registers that are banked by security. This is 1 for the secure banked - * register, and 0 for the nonsecure banked register or if the register is - * not banked by security. - */ -#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 -#define KVM_REG_ARM_SECURE_SHIFT 28 - -#define ARM_CP15_REG_SHIFT_MASK(x,n) \ - (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) - -#define __ARM_CP15_REG(op1,crn,crm,op2) \ - (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ - ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ - ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ - ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ - ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) - -#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) - -#define __ARM_CP15_REG64(op1,crm) \ - (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) -#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) - -/* PL1 Physical Timer Registers */ -#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) -#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) -#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) - -/* Virtual Timer Registers */ -#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) -#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) - -/* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) - -/* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 - -/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A - -/* KVM-as-firmware specific pseudo-registers */ -#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_FW | ((r) & 0xffff)) -#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) - -/* Device Control API: ARM VGIC */ -#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 -#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 -#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 -#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 -#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) -#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 -#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \ - (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) -#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 -#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) -#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) -#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 -#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 -#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 -#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 -#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 -#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ - (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff -#define VGIC_LEVEL_INFO_LINE_LEVEL 0 - -/* Device Control API on vcpu fd */ -#define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_TIMER_CTRL 1 -#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 -#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 - -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 -#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 -#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 -#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 -#define KVM_DEV_ARM_ITS_CTRL_RESET 4 - -/* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_VCPU2_SHIFT 28 -#define KVM_ARM_IRQ_VCPU2_MASK 0xf -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xf -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff - -/* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 - -/* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 - -/* - * This used to hold the highest supported SPI, but it is now obsolete - * and only here to provide source code level compatibility with older - * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. - */ -#ifndef __KERNEL__ -#define KVM_ARM_IRQ_GIC_MAX 127 -#endif - -/* One single KVM irqchip, ie. the VGIC */ -#define KVM_NR_IRQCHIPS 1 - -/* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) - -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) - -#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS -#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED -#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS -#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED - -#endif /* __ARM_KVM_H__ */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c773b829ee8e..c036a4a2f8e2 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -11,9 +11,6 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> -#ifdef CONFIG_KVM_ARM_HOST -#include <linux/kvm_host.h> -#endif #include <asm/cacheflush.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> @@ -167,14 +164,6 @@ int main(void) DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); -#ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); - DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); - DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); - DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); -#endif - BLANK(); #ifdef CONFIG_VDSO DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); #endif diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 6607fa817bba..26d8e03b1dd3 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -189,19 +189,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: +#ifdef ZIMAGE teq r0, #HVC_SET_VECTORS bne 1f + /* Only the ZIMAGE stubs can change the HYP vectors */ mcr p15, 4, r1, c12, c0, 0 @ set HVBAR b __hyp_stub_exit +#endif 1: teq r0, #HVC_SOFT_RESTART - bne 1f + bne 2f bx r1 -1: teq r0, #HVC_RESET_VECTORS - beq __hyp_stub_exit - - ldr r0, =HVC_STUB_ERR +2: ldr r0, =HVC_STUB_ERR __ERET __hyp_stub_exit: @@ -210,26 +210,9 @@ __hyp_stub_exit: ENDPROC(__hyp_stub_do_trap) /* - * __hyp_set_vectors: Call this after boot to set the initial hypervisor - * vectors as part of hypervisor installation. On an SMP system, this should - * be called on each CPU. - * - * r0 must be the physical address of the new vector table (which must lie in - * the bottom 4GB of physical address space. - * - * r0 must be 32-byte aligned. - * - * Before calling this, you must check that the stub hypervisor is installed - * everywhere, by waiting for any secondary CPUs to be brought up and then - * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true. - * - * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or - * something else went wrong... in such cases, trying to install a new - * hypervisor is unlikely to work as desired. - * - * When you call into your shiny new hypervisor, sp_hyp will contain junk, - * so you will need to set that to something sensible at the new hypervisor's - * initialisation entry point. + * __hyp_set_vectors is only used when ZIMAGE must bounce between HYP + * and SVC. For the kernel itself, the vectors are set once and for + * all by the stubs. */ ENTRY(__hyp_set_vectors) mov r1, r0 @@ -245,12 +228,6 @@ ENTRY(__hyp_soft_restart) ret lr ENDPROC(__hyp_soft_restart) -ENTRY(__hyp_reset_vectors) - mov r0, #HVC_RESET_VECTORS - __HVC(0) - ret lr -ENDPROC(__hyp_reset_vectors) - #ifndef ZIMAGE .align 2 .L__boot_cpu_mode_offset: diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 21b8b271c80d..6d2be994ae58 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -162,14 +162,6 @@ SECTIONS ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") -/* - * The HYP init code can't be more than a page long, - * and should not cross a page boundary. - * The above comment applies as well. - */ -ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, - "HYP init code too big or misaligned") - #ifdef CONFIG_XIP_DEFLATED_DATA /* * The .bss is used as a stack area for __inflate_kernel_data() whose stack diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 319ccb10846a..88a720da443b 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -170,12 +170,4 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT); ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") -/* - * The HYP init code can't be more than a page long, - * and should not cross a page boundary. - * The above comment applies as well. - */ -ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, - "HYP init code too big or misaligned") - #endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h index 8247bc15addc..381a8e105fa5 100644 --- a/arch/arm/kernel/vmlinux.lds.h +++ b/arch/arm/kernel/vmlinux.lds.h @@ -31,20 +31,11 @@ *(.proc.info.init) \ __proc_info_end = .; -#define HYPERVISOR_TEXT \ - __hyp_text_start = .; \ - *(.hyp.text) \ - __hyp_text_end = .; - #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ __idmap_text_start = .; \ *(.idmap.text) \ __idmap_text_end = .; \ - . = ALIGN(PAGE_SIZE); \ - __hyp_idmap_text_start = .; \ - *(.hyp.idmap.text) \ - __hyp_idmap_text_end = .; #define ARM_DISCARD \ *(.ARM.exidx.exit.text) \ @@ -72,7 +63,6 @@ SCHED_TEXT \ CPUIDLE_TEXT \ LOCK_TEXT \ - HYPERVISOR_TEXT \ KPROBES_TEXT \ *(.gnu.warning) \ *(.glue_7) \ diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig deleted file mode 100644 index f591026347a5..000000000000 --- a/arch/arm/kvm/Kconfig +++ /dev/null @@ -1,59 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# KVM configuration -# - -source "virt/kvm/Kconfig" -source "virt/lib/Kconfig" - -menuconfig VIRTUALIZATION - bool "Virtualization" - ---help--- - Say Y here to get to see options for using your Linux host to run - other operating systems inside virtual machines (guests). - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and - disabled. - -if VIRTUALIZATION - -config KVM - bool "Kernel-based Virtual Machine (KVM) support" - depends on MMU && OF - select PREEMPT_NOTIFIERS - select ARM_GIC - select ARM_GIC_V3 - select ARM_GIC_V3_ITS - select HAVE_KVM_CPU_RELAX_INTERCEPT - select HAVE_KVM_ARCH_TLB_FLUSH_ALL - select KVM_MMIO - select KVM_ARM_HOST - select KVM_GENERIC_DIRTYLOG_READ_PROTECT - select SRCU - select MMU_NOTIFIER - select KVM_VFIO - select HAVE_KVM_EVENTFD - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQ_ROUTING - select HAVE_KVM_MSI - select IRQ_BYPASS_MANAGER - select HAVE_KVM_IRQ_BYPASS - depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER - ---help--- - Support hosting virtualized guest machines. - - This module provides access to the hardware capabilities through - a character device node named /dev/kvm. - - If unsure, say N. - -config KVM_ARM_HOST - bool - ---help--- - Provides host support for ARM processors. - -source "drivers/vhost/Kconfig" - -endif # VIRTUALIZATION diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile deleted file mode 100644 index e442d82821df..000000000000 --- a/arch/arm/kvm/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for Kernel-based Virtual Machine module -# - -plus_virt := $(call as-instr,.arch_extension virt,+virt) -ifeq ($(plus_virt),+virt) - plus_virt_def := -DREQUIRES_VIRT=1 -endif - -KVM := ../../../virt/kvm - -ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic -CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def) - -AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) -AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) - -kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o - -obj-$(CONFIG_KVM_ARM_HOST) += hyp/ - -obj-y += kvm-arm.o init.o interrupts.o -obj-y += handle_exit.o guest.o emulate.o reset.o -obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o -obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o -obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o -obj-y += $(KVM)/arm/aarch32.o - -obj-y += $(KVM)/arm/vgic/vgic.o -obj-y += $(KVM)/arm/vgic/vgic-init.o -obj-y += $(KVM)/arm/vgic/vgic-irqfd.o -obj-y += $(KVM)/arm/vgic/vgic-v2.o -obj-y += $(KVM)/arm/vgic/vgic-v3.o -obj-y += $(KVM)/arm/vgic/vgic-v4.o -obj-y += $(KVM)/arm/vgic/vgic-mmio.o -obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o -obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o -obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o -obj-y += $(KVM)/arm/vgic/vgic-its.o -obj-y += $(KVM)/arm/vgic/vgic-debug.o -obj-y += $(KVM)/irqchip.o -obj-y += $(KVM)/arm/arch_timer.o diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c deleted file mode 100644 index f0c09049ee99..000000000000 --- a/arch/arm/kvm/coproc.c +++ /dev/null @@ -1,1454 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell <rusty@rustcorp.com.au> - * Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/bsearch.h> -#include <linux/mm.h> -#include <linux/kvm_host.h> -#include <linux/uaccess.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_emulate.h> -#include <asm/kvm_coproc.h> -#include <asm/kvm_mmu.h> -#include <asm/cacheflush.h> -#include <asm/cputype.h> -#include <trace/events/kvm.h> -#include <asm/vfp.h> -#include "../vfp/vfpinstr.h" - -#define CREATE_TRACE_POINTS -#include "trace.h" -#include "coproc.h" - - -/****************************************************************************** - * Co-processor emulation - *****************************************************************************/ - -static bool write_to_read_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - WARN_ONCE(1, "CP15 write to read-only register\n"); - print_cp_instr(params); - kvm_inject_undefined(vcpu); - return false; -} - -static bool read_from_write_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - WARN_ONCE(1, "CP15 read to write-only register\n"); - print_cp_instr(params); - kvm_inject_undefined(vcpu); - return false; -} - -/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ -static u32 cache_levels; - -/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ -#define CSSELR_MAX 12 - -/* - * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some - * of cp15 registers can be viewed either as couple of two u32 registers - * or one u64 register. Current u64 register encoding is that least - * significant u32 word is followed by most significant u32 word. - */ -static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, - const struct coproc_reg *r, - u64 val) -{ - vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; - vcpu_cp15(vcpu, r->reg + 1) = val >> 32; -} - -static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - u64 val; - - val = vcpu_cp15(vcpu, r->reg + 1); - val = val << 32; - val = val | vcpu_cp15(vcpu, r->reg); - return val; -} - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* - * We can get here, if the host has been built without VFPv3 support, - * but the guest attempted a floating point operation. - */ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - /* - * Compute guest MPIDR. We build a virtual cluster out of the - * vcpu_id, but we read the 'U' bit from the underlying - * hardware directly. - */ - vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | - ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | - (vcpu->vcpu_id & 3)); -} - -/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ -static bool access_actlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); - return true; -} - -/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ -static bool access_cbar(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return write_to_read_only(vcpu, p); - return read_zero(vcpu, p); -} - -/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ -static bool access_l2ctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); - return true; -} - -static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 l2ctlr, ncores; - - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); - l2ctlr &= ~(3 << 24); - ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; - /* How many cores in the current cluster and the next ones */ - ncores -= (vcpu->vcpu_id & ~3); - /* Cap it to the maximum number of cores in a single cluster */ - ncores = min(ncores, 3U); - l2ctlr |= (ncores & 3) << 24; - - vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; -} - -static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 actlr; - - /* ACTLR contains SMP bit: make sure you create all cpus first! */ - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); - /* Make the SMP bit consistent with the guest configuration */ - if (atomic_read(&vcpu->kvm->online_vcpus) > 1) - actlr |= 1U << 6; - else - actlr &= ~(1U << 6); - - vcpu_cp15(vcpu, c1_ACTLR) = actlr; -} - -/* - * TRM entries: A7:4.3.50, A15:4.3.49 - * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). - */ -static bool access_l2ectlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -/* - * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). - */ -static bool access_dcsw(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (!p->is_write) - return read_from_write_only(vcpu, p); - - kvm_set_way_flush(vcpu); - return true; -} - -/* - * Generic accessor for VM registers. Only called as long as HCR_TVM - * is set. If the guest enables the MMU, we stop trapping the VM - * sys_regs and leave it in complete control of the caches. - * - * Used by the cpu-specific code. - */ -bool access_vm_reg(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - bool was_enabled = vcpu_has_cache_enabled(vcpu); - - BUG_ON(!p->is_write); - - vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); - if (p->is_64bit) - vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); - - kvm_toggle_cache(vcpu, was_enabled); - return true; -} - -static bool access_gic_sgi(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u64 reg; - bool g1; - - if (!p->is_write) - return read_from_write_only(vcpu, p); - - reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; - reg |= *vcpu_reg(vcpu, p->Rt1) ; - - /* - * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates - * Group0 SGIs only, while ICC_SGI1R can generate either group, - * depending on the SGI configuration. ICC_ASGI1R is effectively - * equivalent to ICC_SGI0R, as there is no "alternative" secure - * group. - */ - switch (p->Op1) { - default: /* Keep GCC quiet */ - case 0: /* ICC_SGI1R */ - g1 = true; - break; - case 1: /* ICC_ASGI1R */ - case 2: /* ICC_SGI0R */ - g1 = false; - break; - } - - vgic_v3_dispatch_sgi(vcpu, reg, g1); - - return true; -} - -static bool access_gic_sre(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; - - return true; -} - -static bool access_cntp_tval(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u32 val; - - if (p->is_write) { - val = *vcpu_reg(vcpu, p->Rt1); - kvm_arm_timer_write_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_TVAL, val); - } else { - val = kvm_arm_timer_read_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_TVAL); - *vcpu_reg(vcpu, p->Rt1) = val; - } - - return true; -} - -static bool access_cntp_ctl(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u32 val; - - if (p->is_write) { - val = *vcpu_reg(vcpu, p->Rt1); - kvm_arm_timer_write_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_CTL, val); - } else { - val = kvm_arm_timer_read_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_CTL); - *vcpu_reg(vcpu, p->Rt1) = val; - } - - return true; -} - -static bool access_cntp_cval(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u64 val; - - if (p->is_write) { - val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; - val |= *vcpu_reg(vcpu, p->Rt1); - kvm_arm_timer_write_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_CVAL, val); - } else { - val = kvm_arm_timer_read_sysreg(vcpu, - TIMER_PTIMER, TIMER_REG_CVAL); - *vcpu_reg(vcpu, p->Rt1) = val; - *vcpu_reg(vcpu, p->Rt2) = val >> 32; - } - - return true; -} - -/* - * We could trap ID_DFR0 and tell the guest we don't support performance - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was - * NAKed, so it will read the PMCR anyway. - * - * Therefore we tell the guest we have 0 counters. Unfortunately, we - * must always support PMCCNTR (the cycle counter): we just RAZ/WI for - * all PM registers, which doesn't crash the guest kernel at least. - */ -static bool trap_raz_wi(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - else - return read_zero(vcpu, p); -} - -#define access_pmcr trap_raz_wi -#define access_pmcntenset trap_raz_wi -#define access_pmcntenclr trap_raz_wi -#define access_pmovsr trap_raz_wi -#define access_pmselr trap_raz_wi -#define access_pmceid0 trap_raz_wi -#define access_pmceid1 trap_raz_wi -#define access_pmccntr trap_raz_wi -#define access_pmxevtyper trap_raz_wi -#define access_pmxevcntr trap_raz_wi -#define access_pmuserenr trap_raz_wi -#define access_pmintenset trap_raz_wi -#define access_pmintenclr trap_raz_wi - -/* Architected CP15 registers. - * CRn denotes the primary register number, but is copied to the CRm in the - * user space API for 64-bit register access in line with the terminology used - * in the ARM ARM. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit - * registers preceding 32-bit ones. - */ -static const struct coproc_reg cp15_regs[] = { - /* MPIDR: we use VMPIDR for guest access. */ - { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, - NULL, reset_mpidr, c0_MPIDR }, - - /* CSSELR: swapped by interrupt.S. */ - { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, - NULL, reset_unknown, c0_CSSELR }, - - /* ACTLR: trapped by HCR.TAC bit. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, - access_actlr, reset_actlr, c1_ACTLR }, - - /* CPACR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c1_CPACR, 0x00000000 }, - - /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ - { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, - { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c2_TTBR0 }, - { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_unknown, c2_TTBR1 }, - { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, - access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, - { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, - - - /* DACR: swapped by interrupt.S. */ - { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c3_DACR }, - - /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ - { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c5_DFSR }, - { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_unknown, c5_IFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c5_ADFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_unknown, c5_AIFSR }, - - /* DFAR/IFAR: swapped by interrupt.S. */ - { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c6_DFAR }, - { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, - access_vm_reg, reset_unknown, c6_IFAR }, - - /* PAR swapped by interrupt.S */ - { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, - - /* - * DC{C,I,CI}SW operations: - */ - { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, - /* - * L2CTLR access (guest wants to know #CPUs). - */ - { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, - access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, - { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, - - /* - * Dummy performance monitor implementation. - */ - { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, - { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, - { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, - { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, - { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, - { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, - { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, - - /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ - { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c10_PRRR}, - { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_unknown, c10_NMRR}, - - /* AMAIR0/AMAIR1: swapped by interrupt.S. */ - { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_unknown, c10_AMAIR0}, - { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_unknown, c10_AMAIR1}, - - /* ICC_SGI1R */ - { CRm64(12), Op1( 0), is64, access_gic_sgi}, - - /* VBAR: swapped by interrupt.S. */ - { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c12_VBAR, 0x00000000 }, - - /* ICC_ASGI1R */ - { CRm64(12), Op1( 1), is64, access_gic_sgi}, - /* ICC_SGI0R */ - { CRm64(12), Op1( 2), is64, access_gic_sgi}, - /* ICC_SRE */ - { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, - - /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ - { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, - access_vm_reg, reset_val, c13_CID, 0x00000000 }, - { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c13_TID_URW }, - { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, - NULL, reset_unknown, c13_TID_URO }, - { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, - NULL, reset_unknown, c13_TID_PRIV }, - - /* CNTP */ - { CRm64(14), Op1( 2), is64, access_cntp_cval}, - - /* CNTKCTL: swapped by interrupt.S. */ - { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, - NULL, reset_val, c14_CNTKCTL, 0x00000000 }, - - /* CNTP */ - { CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval }, - { CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl }, - - /* The Configuration Base Address Register. */ - { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, -}; - -static int check_reg_table(const struct coproc_reg *table, unsigned int n) -{ - unsigned int i; - - for (i = 1; i < n; i++) { - if (cmp_reg(&table[i-1], &table[i]) >= 0) { - kvm_err("reg table %p out of order (%d)\n", table, i - 1); - return 1; - } - } - - return 0; -} - -/* Target specific emulation tables */ -static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; - -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) -{ - BUG_ON(check_reg_table(table->table, table->num)); - target_tables[table->target] = table; -} - -/* Get specific register table for this target. */ -static const struct coproc_reg *get_target_table(unsigned target, size_t *num) -{ - struct kvm_coproc_target_table *table; - - table = target_tables[target]; - *num = table->num; - return table->table; -} - -#define reg_to_match_value(x) \ - ({ \ - unsigned long val; \ - val = (x)->CRn << 11; \ - val |= (x)->CRm << 7; \ - val |= (x)->Op1 << 4; \ - val |= (x)->Op2 << 1; \ - val |= !(x)->is_64bit; \ - val; \ - }) - -static int match_reg(const void *key, const void *elt) -{ - const unsigned long pval = (unsigned long)key; - const struct coproc_reg *r = elt; - - return pval - reg_to_match_value(r); -} - -static const struct coproc_reg *find_reg(const struct coproc_params *params, - const struct coproc_reg table[], - unsigned int num) -{ - unsigned long pval = reg_to_match_value(params); - - return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); -} - -static int emulate_cp15(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - size_t num; - const struct coproc_reg *table, *r; - - trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, - params->CRm, params->Op2, params->is_write); - - table = get_target_table(vcpu->arch.target, &num); - - /* Search target-specific then generic table. */ - r = find_reg(params, table, num); - if (!r) - r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); - - if (likely(r)) { - /* If we don't have an accessor, we should never get here! */ - BUG_ON(!r->access); - - if (likely(r->access(vcpu, params, r))) { - /* Skip instruction, since it was emulated */ - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - } - } else { - /* If access function fails, it should complain. */ - kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n", - *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - print_cp_instr(params); - kvm_inject_undefined(vcpu); - } - - return 1; -} - -static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) -{ - struct coproc_params params; - - params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; - params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; - params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); - params.is_64bit = true; - - params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; - params.Op2 = 0; - params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; - params.CRm = 0; - - return params; -} - -/** - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params = decode_64bit_hsr(vcpu); - - return emulate_cp15(vcpu, ¶ms); -} - -/** - * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params = decode_64bit_hsr(vcpu); - - /* raz_wi cp14 */ - trap_raz_wi(vcpu, ¶ms, NULL); - - /* handled */ - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - return 1; -} - -static void reset_coproc_regs(struct kvm_vcpu *vcpu, - const struct coproc_reg *table, size_t num, - unsigned long *bmap) -{ - unsigned long i; - - for (i = 0; i < num; i++) - if (table[i].reset) { - int reg = table[i].reg; - - table[i].reset(vcpu, &table[i]); - if (reg > 0 && reg < NR_CP15_REGS) { - set_bit(reg, bmap); - if (table[i].is_64bit) - set_bit(reg + 1, bmap); - } - } -} - -static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) -{ - struct coproc_params params; - - params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; - params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; - params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); - params.is_64bit = false; - - params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; - params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; - params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; - params.Rt2 = 0; - - return params; -} - -/** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params = decode_32bit_hsr(vcpu); - return emulate_cp15(vcpu, ¶ms); -} - -/** - * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params = decode_32bit_hsr(vcpu); - - /* raz_wi cp14 */ - trap_raz_wi(vcpu, ¶ms, NULL); - - /* handled */ - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - return 1; -} - -/****************************************************************************** - * Userspace API - *****************************************************************************/ - -static bool index_to_params(u64 id, struct coproc_params *params) -{ - switch (id & KVM_REG_SIZE_MASK) { - case KVM_REG_SIZE_U32: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_32_CRN_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK - | KVM_REG_ARM_32_OPC2_MASK)) - return false; - - params->is_64bit = false; - params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) - >> KVM_REG_ARM_32_CRN_SHIFT); - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) - >> KVM_REG_ARM_32_OPC2_SHIFT); - return true; - case KVM_REG_SIZE_U64: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK)) - return false; - params->is_64bit = true; - /* CRm to CRn: see cp15_to_index for details */ - params->CRn = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = 0; - params->CRm = 0; - return true; - default: - return false; - } -} - -/* Decode an index value, and find the cp15 coproc_reg entry. */ -static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, - u64 id) -{ - size_t num; - const struct coproc_reg *table, *r; - struct coproc_params params; - - /* We only do cp15 for now. */ - if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) - return NULL; - - if (!index_to_params(id, ¶ms)) - return NULL; - - table = get_target_table(vcpu->arch.target, &num); - r = find_reg(¶ms, table, num); - if (!r) - r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); - - /* Not saved in the cp15 array? */ - if (r && !r->reg) - r = NULL; - - return r; -} - -/* - * These are the invariant cp15 registers: we let the guest see the host - * versions of these, so they're part of the guest state. - * - * A future CPU may provide a mechanism to present different values to - * the guest, or a future kvm may trap them. - */ -/* Unfortunately, there's no register-argument for mrc, so generate. */ -#define FUNCTION_FOR32(crn, crm, op1, op2, name) \ - static void get_##name(struct kvm_vcpu *v, \ - const struct coproc_reg *r) \ - { \ - u32 val; \ - \ - asm volatile("mrc p15, " __stringify(op1) \ - ", %0, c" __stringify(crn) \ - ", c" __stringify(crm) \ - ", " __stringify(op2) "\n" : "=r" (val)); \ - ((struct coproc_reg *)r)->val = val; \ - } - -FUNCTION_FOR32(0, 0, 0, 0, MIDR) -FUNCTION_FOR32(0, 0, 0, 1, CTR) -FUNCTION_FOR32(0, 0, 0, 2, TCMTR) -FUNCTION_FOR32(0, 0, 0, 3, TLBTR) -FUNCTION_FOR32(0, 0, 0, 6, REVIDR) -FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) -FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) -FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) -FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) -FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) -FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) -FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) -FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) -FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) -FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) -FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) -FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) -FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) -FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) -FUNCTION_FOR32(0, 0, 1, 1, CLIDR) -FUNCTION_FOR32(0, 0, 1, 7, AIDR) - -/* ->val is filled in by kvm_invariant_coproc_table_init() */ -static struct coproc_reg invariant_cp15[] = { - { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, - - { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, - - { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, -}; - -/* - * Reads a register value from a userspace address to a kernel - * variable. Make sure that register size matches sizeof(*__val). - */ -static int reg_from_user(void *val, const void __user *uaddr, u64 id) -{ - if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -/* - * Writes a register value to a userspace address from a kernel variable. - * Make sure that register size matches sizeof(*__val). - */ -static int reg_to_user(void __user *uaddr, const void *val, u64 id) -{ - if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -static int get_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - int ret; - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - ret = -ENOENT; - if (KVM_REG_SIZE(id) == 4) { - u32 val = r->val; - - ret = reg_to_user(uaddr, &val, id); - } else if (KVM_REG_SIZE(id) == 8) { - ret = reg_to_user(uaddr, &r->val, id); - } - return ret; -} - -static int set_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - int err; - u64 val; - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - err = -ENOENT; - if (KVM_REG_SIZE(id) == 4) { - u32 val32; - - err = reg_from_user(&val32, uaddr, id); - if (!err) - val = val32; - } else if (KVM_REG_SIZE(id) == 8) { - err = reg_from_user(&val, uaddr, id); - } - if (err) - return err; - - /* This is what we mean by invariant: you can't change it. */ - if (r->val != val) - return -EINVAL; - - return 0; -} - -static bool is_valid_cache(u32 val) -{ - u32 level, ctype; - - if (val >= CSSELR_MAX) - return false; - - /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ - level = (val >> 1); - ctype = (cache_levels >> (level * 3)) & 7; - - switch (ctype) { - case 0: /* No cache */ - return false; - case 1: /* Instruction cache only */ - return (val & 1); - case 2: /* Data cache only */ - case 4: /* Unified cache */ - return !(val & 1); - case 3: /* Separate instruction and data caches */ - return true; - default: /* Reserved: we can't know instruction or data. */ - return false; - } -} - -/* Which cache CCSIDR represents depends on CSSELR value. */ -static u32 get_ccsidr(u32 csselr) -{ - u32 ccsidr; - - /* Make sure noone else changes CSSELR during this! */ - local_irq_disable(); - /* Put value into CSSELR */ - asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); - isb(); - /* Read result out of CCSIDR */ - asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); - local_irq_enable(); - - return ccsidr; -} - -static int demux_c15_get(u64 id, void __user *uaddr) -{ - u32 val; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - return put_user(get_ccsidr(val), uval); - default: - return -ENOENT; - } -} - -static int demux_c15_set(u64 id, void __user *uaddr) -{ - u32 val, newval; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - if (get_user(newval, uval)) - return -EFAULT; - - /* This is also invariant: you can't change it. */ - if (newval != get_ccsidr(val)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} - -#ifdef CONFIG_VFPv3 -static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, - KVM_REG_ARM_VFP_FPSCR, - KVM_REG_ARM_VFP_FPINST, - KVM_REG_ARM_VFP_FPINST2, - KVM_REG_ARM_VFP_MVFR0, - KVM_REG_ARM_VFP_MVFR1, - KVM_REG_ARM_VFP_FPSID }; - -static unsigned int num_fp_regs(void) -{ - if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) - return 32; - else - return 16; -} - -static unsigned int num_vfp_regs(void) -{ - /* Normal FP regs + control regs. */ - return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - unsigned int i; - const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; - const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; - - for (i = 0; i < num_fp_regs(); i++) { - if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, - uindices)) - return -EFAULT; - uindices++; - } - - for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { - if (put_user(u32reg | vfp_sysregs[i], uindices)) - return -EFAULT; - uindices++; - } - - return num_vfp_regs(); -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], - id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); - case KVM_REG_ARM_VFP_MVFR0: - val = fmrx(MVFR0); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_MVFR1: - val = fmrx(MVFR1); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_FPSID: - val = fmrx(FPSID); - return reg_to_user(uaddr, &val, id); - default: - return -ENOENT; - } -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], - uaddr, id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); - /* These are invariant. */ - case KVM_REG_ARM_VFP_MVFR0: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR0)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_MVFR1: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR1)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_FPSID: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(FPSID)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} -#else /* !CONFIG_VFPv3 */ -static unsigned int num_vfp_regs(void) -{ - return 0; -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - return 0; -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - return -ENOENT; -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - return -ENOENT; -} -#endif /* !CONFIG_VFPv3 */ - -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - int ret; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_get(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_get_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return get_invariant_cp15(reg->id, uaddr); - - ret = -ENOENT; - if (KVM_REG_SIZE(reg->id) == 8) { - u64 val; - - val = vcpu_cp15_reg64_get(vcpu, r); - ret = reg_to_user(uaddr, &val, reg->id); - } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); - } - - return ret; -} - -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - int ret; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_set(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_set_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return set_invariant_cp15(reg->id, uaddr); - - ret = -ENOENT; - if (KVM_REG_SIZE(reg->id) == 8) { - u64 val; - - ret = reg_from_user(&val, uaddr, reg->id); - if (!ret) - vcpu_cp15_reg64_set(vcpu, r, val); - } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); - } - - return ret; -} - -static unsigned int num_demux_regs(void) -{ - unsigned int i, count = 0; - - for (i = 0; i < CSSELR_MAX; i++) - if (is_valid_cache(i)) - count++; - - return count; -} - -static int write_demux_regids(u64 __user *uindices) -{ - u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; - unsigned int i; - - val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i = 0; i < CSSELR_MAX; i++) { - if (!is_valid_cache(i)) - continue; - if (put_user(val | i, uindices)) - return -EFAULT; - uindices++; - } - return 0; -} - -static u64 cp15_to_index(const struct coproc_reg *reg) -{ - u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64bit) { - val |= KVM_REG_SIZE_U64; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - /* - * CRn always denotes the primary coproc. reg. nr. for the - * in-kernel representation, but the user space API uses the - * CRm for the encoding, because it is modelled after the - * MRRC/MCRR instructions: see the ARM ARM rev. c page - * B3-1445 - */ - val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); - } else { - val |= KVM_REG_SIZE_U32; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); - val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); - } - return val; -} - -static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) -{ - if (!*uind) - return true; - - if (put_user(cp15_to_index(reg), *uind)) - return false; - - (*uind)++; - return true; -} - -/* Assumed ordered tables, see kvm_coproc_table_init. */ -static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) -{ - const struct coproc_reg *i1, *i2, *end1, *end2; - unsigned int total = 0; - size_t num; - - /* We check for duplicates here, to allow arch-specific overrides. */ - i1 = get_target_table(vcpu->arch.target, &num); - end1 = i1 + num; - i2 = cp15_regs; - end2 = cp15_regs + ARRAY_SIZE(cp15_regs); - - BUG_ON(i1 == end1 || i2 == end2); - - /* Walk carefully, as both tables may refer to the same register. */ - while (i1 || i2) { - int cmp = cmp_reg(i1, i2); - /* target-specific overrides generic entry. */ - if (cmp <= 0) { - /* Ignore registers we trap but don't save. */ - if (i1->reg) { - if (!copy_reg_to_user(i1, &uind)) - return -EFAULT; - total++; - } - } else { - /* Ignore registers we trap but don't save. */ - if (i2->reg) { - if (!copy_reg_to_user(i2, &uind)) - return -EFAULT; - total++; - } - } - - if (cmp <= 0 && ++i1 == end1) - i1 = NULL; - if (cmp >= 0 && ++i2 == end2) - i2 = NULL; - } - return total; -} - -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) -{ - return ARRAY_SIZE(invariant_cp15) - + num_demux_regs() - + num_vfp_regs() - + walk_cp15(vcpu, (u64 __user *)NULL); -} - -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - int err; - - /* Then give them all the invariant registers' indices. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { - if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) - return -EFAULT; - uindices++; - } - - err = walk_cp15(vcpu, uindices); - if (err < 0) - return err; - uindices += err; - - err = copy_vfp_regids(uindices); - if (err < 0) - return err; - uindices += err; - - return write_demux_regids(uindices); -} - -void kvm_coproc_table_init(void) -{ - unsigned int i; - - /* Make sure tables are unique and in order. */ - BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); - BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15))); - - /* We abuse the reset function to overwrite the table itself. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) - invariant_cp15[i].reset(NULL, &invariant_cp15[i]); - - /* - * CLIDR format is awkward, so clean it up. See ARM B4.1.20: - * - * If software reads the Cache Type fields from Ctype1 - * upwards, once it has seen a value of 0b000, no caches - * exist at further-out levels of the hierarchy. So, for - * example, if Ctype3 is the first Cache Type field with a - * value of 0b000, the values of Ctype4 to Ctype7 must be - * ignored. - */ - asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); - for (i = 0; i < 7; i++) - if (((cache_levels >> (i*3)) & 7) == 0) - break; - /* Clear all higher bits. */ - cache_levels &= (1 << (i*3))-1; -} - -/** - * kvm_reset_coprocs - sets cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architecturally defined reset values. - */ -void kvm_reset_coprocs(struct kvm_vcpu *vcpu) -{ - size_t num; - const struct coproc_reg *table; - DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, }; - - /* Generic chip reset first (so target could override). */ - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap); - - table = get_target_table(vcpu->arch.target, &num); - reset_coproc_regs(vcpu, table, num, bmap); - - for (num = 1; num < NR_CP15_REGS; num++) - WARN(!test_bit(num, bmap), - "Didn't reset vcpu_cp15(vcpu, %zi)", num); -} diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h deleted file mode 100644 index 637065b13012..000000000000 --- a/arch/arm/kvm/coproc.h +++ /dev/null @@ -1,130 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM_KVM_COPROC_LOCAL_H__ -#define __ARM_KVM_COPROC_LOCAL_H__ - -struct coproc_params { - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - unsigned long Rt1; - unsigned long Rt2; - bool is_64bit; - bool is_write; -}; - -struct coproc_reg { - /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - - bool is_64bit; - - /* Trapped access from guest, if non-NULL. */ - bool (*access)(struct kvm_vcpu *, - const struct coproc_params *, - const struct coproc_reg *); - - /* Initialization for vcpu. */ - void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - - /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ - unsigned long reg; - - /* Value (usually reset value) */ - u64 val; -}; - -static inline void print_cp_instr(const struct coproc_params *p) -{ - /* Look, we even formatted it for you to paste into the table! */ - if (p->is_64bit) { - kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n", - p->CRn, p->Op1, p->is_write ? "write" : "read"); - } else { - kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," - " func_%s },\n", - p->CRn, p->CRm, p->Op1, p->Op2, - p->is_write ? "write" : "read"); - } -} - -static inline bool ignore_write(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - return true; -} - -static inline bool read_zero(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -/* Reset functions */ -static inline void reset_unknown(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu_cp15(vcpu, r->reg) = 0xdecafbad; -} - -static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu_cp15(vcpu, r->reg) = r->val; -} - -static inline void reset_unknown64(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - - vcpu_cp15(vcpu, r->reg) = 0xdecafbad; - vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; -} - -static inline int cmp_reg(const struct coproc_reg *i1, - const struct coproc_reg *i2) -{ - BUG_ON(i1 == i2); - if (!i1) - return 1; - else if (!i2) - return -1; - if (i1->CRn != i2->CRn) - return i1->CRn - i2->CRn; - if (i1->CRm != i2->CRm) - return i1->CRm - i2->CRm; - if (i1->Op1 != i2->Op1) - return i1->Op1 - i2->Op1; - if (i1->Op2 != i2->Op2) - return i1->Op2 - i2->Op2; - return i2->is_64bit - i1->is_64bit; -} - - -#define CRn(_x) .CRn = _x -#define CRm(_x) .CRm = _x -#define CRm64(_x) .CRn = _x, .CRm = 0 -#define Op1(_x) .Op1 = _x -#define Op2(_x) .Op2 = _x -#define is64 .is_64bit = true -#define is32 .is_64bit = false - -bool access_vm_reg(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r); - -#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c deleted file mode 100644 index 36bf15421ae8..000000000000 --- a/arch/arm/kvm/coproc_a15.c +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell <rusty@rustcorp.au> - * Christoffer Dall <c.dall@virtualopensystems.com> - */ -#include <linux/kvm_host.h> -#include <asm/kvm_coproc.h> -#include <asm/kvm_emulate.h> -#include <linux/init.h> - -#include "coproc.h" - -/* - * A15-specific CP15 registers. - * CRn denotes the primary register number, but is copied to the CRm in the - * user space API for 64-bit register access in line with the terminology used - * in the ARM ARM. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit - * registers preceding 32-bit ones. - */ -static const struct coproc_reg a15_regs[] = { - /* SCTLR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, -}; - -static struct kvm_coproc_target_table a15_target_table = { - .target = KVM_ARM_TARGET_CORTEX_A15, - .table = a15_regs, - .num = ARRAY_SIZE(a15_regs), -}; - -static int __init coproc_a15_init(void) -{ - kvm_register_target_coproc_table(&a15_target_table); - return 0; -} -late_initcall(coproc_a15_init); diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c deleted file mode 100644 index 40f643e1e05c..000000000000 --- a/arch/arm/kvm/coproc_a7.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Copyright (C) 2013 - ARM Ltd - * - * Authors: Rusty Russell <rusty@rustcorp.au> - * Christoffer Dall <c.dall@virtualopensystems.com> - * Jonathan Austin <jonathan.austin@arm.com> - */ -#include <linux/kvm_host.h> -#include <asm/kvm_coproc.h> -#include <asm/kvm_emulate.h> -#include <linux/init.h> - -#include "coproc.h" - -/* - * Cortex-A7 specific CP15 registers. - * CRn denotes the primary register number, but is copied to the CRm in the - * user space API for 64-bit register access in line with the terminology used - * in the ARM ARM. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit - * registers preceding 32-bit ones. - */ -static const struct coproc_reg a7_regs[] = { - /* SCTLR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, -}; - -static struct kvm_coproc_target_table a7_target_table = { - .target = KVM_ARM_TARGET_CORTEX_A7, - .table = a7_regs, - .num = ARRAY_SIZE(a7_regs), -}; - -static int __init coproc_a7_init(void) -{ - kvm_register_target_coproc_table(&a7_target_table); - return 0; -} -late_initcall(coproc_a7_init); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c deleted file mode 100644 index 29bb852140c5..000000000000 --- a/arch/arm/kvm/emulate.c +++ /dev/null @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/mm.h> -#include <linux/kvm_host.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_emulate.h> -#include <asm/opcodes.h> -#include <trace/events/kvm.h> - -#include "trace.h" - -#define VCPU_NR_MODES 6 -#define VCPU_REG_OFFSET_USR 0 -#define VCPU_REG_OFFSET_FIQ 1 -#define VCPU_REG_OFFSET_IRQ 2 -#define VCPU_REG_OFFSET_SVC 3 -#define VCPU_REG_OFFSET_ABT 4 -#define VCPU_REG_OFFSET_UND 5 -#define REG_OFFSET(_reg) \ - (offsetof(struct kvm_regs, _reg) / sizeof(u32)) - -#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) - -static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { - /* USR/SYS Registers */ - [VCPU_REG_OFFSET_USR] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), - }, - - /* FIQ Registers */ - [VCPU_REG_OFFSET_FIQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), - REG_OFFSET(fiq_regs[0]), /* r8 */ - REG_OFFSET(fiq_regs[1]), /* r9 */ - REG_OFFSET(fiq_regs[2]), /* r10 */ - REG_OFFSET(fiq_regs[3]), /* r11 */ - REG_OFFSET(fiq_regs[4]), /* r12 */ - REG_OFFSET(fiq_regs[5]), /* r13 */ - REG_OFFSET(fiq_regs[6]), /* r14 */ - }, - - /* IRQ Registers */ - [VCPU_REG_OFFSET_IRQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(irq_regs[0]), /* r13 */ - REG_OFFSET(irq_regs[1]), /* r14 */ - }, - - /* SVC Registers */ - [VCPU_REG_OFFSET_SVC] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(svc_regs[0]), /* r13 */ - REG_OFFSET(svc_regs[1]), /* r14 */ - }, - - /* ABT Registers */ - [VCPU_REG_OFFSET_ABT] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(abt_regs[0]), /* r13 */ - REG_OFFSET(abt_regs[1]), /* r14 */ - }, - - /* UND Registers */ - [VCPU_REG_OFFSET_UND] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(und_regs[0]), /* r13 */ - REG_OFFSET(und_regs[1]), /* r14 */ - }, -}; - -/* - * Return a pointer to the register number valid in the current mode of - * the virtual CPU. - */ -unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) -{ - unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; - unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; - - switch (mode) { - case USR_MODE...SVC_MODE: - mode &= ~MODE32_BIT; /* 0 ... 3 */ - break; - - case ABT_MODE: - mode = VCPU_REG_OFFSET_ABT; - break; - - case UND_MODE: - mode = VCPU_REG_OFFSET_UND; - break; - - case SYSTEM_MODE: - mode = VCPU_REG_OFFSET_USR; - break; - - default: - BUG(); - } - - return reg_array + vcpu_reg_offsets[mode][reg_num]; -} - -/* - * Return the SPSR for the current mode of the virtual CPU. - */ -unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu) -{ - unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; - switch (mode) { - case SVC_MODE: - return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; - case ABT_MODE: - return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; - case UND_MODE: - return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; - case IRQ_MODE: - return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; - case FIQ_MODE: - return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; - default: - BUG(); - } -} - -/****************************************************************************** - * Inject exceptions into the guest - */ - -/** - * kvm_inject_vabt - inject an async abort / SError into the guest - * @vcpu: The VCPU to receive the exception - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_vabt(struct kvm_vcpu *vcpu) -{ - *vcpu_hcr(vcpu) |= HCR_VA; -} diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c deleted file mode 100644 index 9f7ae0d8690f..000000000000 --- a/arch/arm/kvm/guest.c +++ /dev/null @@ -1,387 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/kvm_host.h> -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <kvm/arm_psci.h> -#include <asm/cputype.h> -#include <linux/uaccess.h> -#include <asm/kvm.h> -#include <asm/kvm_emulate.h> -#include <asm/kvm_coproc.h> - -#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } -#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } - -struct kvm_stats_debugfs_item debugfs_entries[] = { - VCPU_STAT(halt_successful_poll), - VCPU_STAT(halt_attempted_poll), - VCPU_STAT(halt_poll_invalid), - VCPU_STAT(halt_wakeup), - VCPU_STAT(hvc_exit_stat), - VCPU_STAT(wfe_exit_stat), - VCPU_STAT(wfi_exit_stat), - VCPU_STAT(mmio_exit_user), - VCPU_STAT(mmio_exit_kernel), - VCPU_STAT(exits), - { NULL } -}; - -static u64 core_reg_offset_from_id(u64 id) -{ - return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); -} - -static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; - u64 off; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - return put_user(((u32 *)regs)[off], uaddr); -} - -static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; - u64 off, val; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - if (get_user(val, uaddr) != 0) - return -EFAULT; - - if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) { - unsigned long mode = val & MODE_MASK; - switch (mode) { - case USR_MODE: - case FIQ_MODE: - case IRQ_MODE: - case SVC_MODE: - case ABT_MODE: - case UND_MODE: - break; - default: - return -EINVAL; - } - } - - ((u32 *)regs)[off] = val; - return 0; -} - -int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -#define NUM_TIMER_REGS 3 - -static bool is_timer_reg(u64 index) -{ - switch (index) { - case KVM_REG_ARM_TIMER_CTL: - case KVM_REG_ARM_TIMER_CNT: - case KVM_REG_ARM_TIMER_CVAL: - return true; - } - return false; -} - -static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) - return -EFAULT; - uindices++; - if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) - return -EFAULT; - uindices++; - if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) - return -EFAULT; - - return 0; -} - -static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - void __user *uaddr = (void __user *)(long)reg->addr; - u64 val; - int ret; - - ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); - if (ret != 0) - return -EFAULT; - - return kvm_arm_timer_set_reg(vcpu, reg->id, val); -} - -static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - void __user *uaddr = (void __user *)(long)reg->addr; - u64 val; - - val = kvm_arm_timer_get_reg(vcpu, reg->id); - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; -} - -static unsigned long num_core_regs(void) -{ - return sizeof(struct kvm_regs) / sizeof(u32); -} - -/** - * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG - * - * This is for all registers. - */ -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) -{ - return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) - + kvm_arm_get_fw_num_regs(vcpu) - + NUM_TIMER_REGS; -} - -/** - * kvm_arm_copy_reg_indices - get indices of all registers. - * - * We do core registers right here, then we append coproc regs. - */ -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; - int ret; - - for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { - if (put_user(core_reg | i, uindices)) - return -EFAULT; - uindices++; - } - - ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); - if (ret) - return ret; - uindices += kvm_arm_get_fw_num_regs(vcpu); - - ret = copy_timer_indices(vcpu, uindices); - if (ret) - return ret; - uindices += NUM_TIMER_REGS; - - return kvm_arm_copy_coproc_indices(vcpu, uindices); -} - -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we want a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return get_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_get_fw_reg(vcpu, reg); - - if (is_timer_reg(reg->id)) - return get_timer_reg(vcpu, reg); - - return kvm_arm_coproc_get_reg(vcpu, reg); -} - -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we set a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return set_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_set_fw_reg(vcpu, reg); - - if (is_timer_reg(reg->id)) - return set_timer_reg(vcpu, reg); - - return kvm_arm_coproc_set_reg(vcpu, reg); -} - -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - - -int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, - struct kvm_vcpu_events *events) -{ - events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA); - - /* - * We never return a pending ext_dabt here because we deliver it to - * the virtual CPU directly when setting the event and it's no longer - * 'pending' at this point. - */ - - return 0; -} - -int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, - struct kvm_vcpu_events *events) -{ - bool serror_pending = events->exception.serror_pending; - bool has_esr = events->exception.serror_has_esr; - bool ext_dabt_pending = events->exception.ext_dabt_pending; - - if (serror_pending && has_esr) - return -EINVAL; - else if (serror_pending) - kvm_inject_vabt(vcpu); - - if (ext_dabt_pending) - kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - - return 0; -} - -int __attribute_const__ kvm_target_cpu(void) -{ - switch (read_cpuid_part()) { - case ARM_CPU_PART_CORTEX_A7: - return KVM_ARM_TARGET_CORTEX_A7; - case ARM_CPU_PART_CORTEX_A15: - return KVM_ARM_TARGET_CORTEX_A15; - default: - return -EINVAL; - } -} - -int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) -{ - int target = kvm_target_cpu(); - - if (target < 0) - return -ENODEV; - - memset(init, 0, sizeof(*init)); - - /* - * For now, we don't return any features. - * In future, we might use features to return target - * specific features available for the preferred - * target type. - */ - init->target = (__u32)target; - - return 0; -} - -int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - -int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - int ret; - - switch (attr->group) { - case KVM_ARM_VCPU_TIMER_CTRL: - ret = kvm_arm_timer_set_attr(vcpu, attr); - break; - default: - ret = -ENXIO; - break; - } - - return ret; -} - -int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - int ret; - - switch (attr->group) { - case KVM_ARM_VCPU_TIMER_CTRL: - ret = kvm_arm_timer_get_attr(vcpu, attr); - break; - default: - ret = -ENXIO; - break; - } - - return ret; -} - -int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - int ret; - - switch (attr->group) { - case KVM_ARM_VCPU_TIMER_CTRL: - ret = kvm_arm_timer_has_attr(vcpu, attr); - break; - default: - ret = -ENXIO; - break; - } - - return ret; -} diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c deleted file mode 100644 index e58a89d2f13f..000000000000 --- a/arch/arm/kvm/handle_exit.c +++ /dev/null @@ -1,175 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/kvm.h> -#include <linux/kvm_host.h> -#include <asm/kvm_emulate.h> -#include <asm/kvm_coproc.h> -#include <asm/kvm_mmu.h> -#include <kvm/arm_hypercalls.h> -#include <trace/events/kvm.h> - -#include "trace.h" - -typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); - -static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - int ret; - - trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), - kvm_vcpu_hvc_get_imm(vcpu)); - vcpu->stat.hvc_exit_stat++; - - ret = kvm_hvc_call_handler(vcpu); - if (ret < 0) { - vcpu_set_reg(vcpu, 0, ~0UL); - return 1; - } - - return ret; -} - -static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* - * "If an SMC instruction executed at Non-secure EL1 is - * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a - * Trap exception, not a Secure Monitor Call exception [...]" - * - * We need to advance the PC after the trap, as it would - * otherwise return to the same address... - */ - vcpu_set_reg(vcpu, 0, ~0UL); - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - return 1; -} - -/** - * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests - * @vcpu: the vcpu pointer - * @run: the kvm_run structure pointer - * - * WFE: Yield the CPU and come back to this vcpu when the scheduler - * decides to. - * WFI: Simply call kvm_vcpu_block(), which will halt execution of - * world-switches and schedule other host processes until there is an - * incoming IRQ or FIQ to the VM. - */ -static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) { - trace_kvm_wfx(*vcpu_pc(vcpu), true); - vcpu->stat.wfe_exit_stat++; - kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); - } else { - trace_kvm_wfx(*vcpu_pc(vcpu), false); - vcpu->stat.wfi_exit_stat++; - kvm_vcpu_block(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); - } - - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - - return 1; -} - -static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - u32 hsr = kvm_vcpu_get_hsr(vcpu); - - kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n", - hsr); - - kvm_inject_undefined(vcpu); - return 1; -} - -static exit_handle_fn arm_exit_handlers[] = { - [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec, - [HSR_EC_WFI] = kvm_handle_wfx, - [HSR_EC_CP15_32] = kvm_handle_cp15_32, - [HSR_EC_CP15_64] = kvm_handle_cp15_64, - [HSR_EC_CP14_MR] = kvm_handle_cp14_32, - [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, - [HSR_EC_CP14_64] = kvm_handle_cp14_64, - [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, - [HSR_EC_CP10_ID] = kvm_handle_cp10_id, - [HSR_EC_HVC] = handle_hvc, - [HSR_EC_SMC] = handle_smc, - [HSR_EC_IABT] = kvm_handle_guest_abort, - [HSR_EC_DABT] = kvm_handle_guest_abort, -}; - -static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) -{ - u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); - - return arm_exit_handlers[hsr_ec]; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on - * proper exit to userspace. - */ -int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) -{ - exit_handle_fn exit_handler; - - if (ARM_ABORT_PENDING(exception_index)) { - u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); - - /* - * HVC/SMC already have an adjusted PC, which we need - * to correct in order to return to after having - * injected the abort. - */ - if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) { - u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; - *vcpu_pc(vcpu) -= adj; - } - - kvm_inject_vabt(vcpu); - return 1; - } - - exception_index = ARM_EXCEPTION_CODE(exception_index); - - switch (exception_index) { - case ARM_EXCEPTION_IRQ: - return 1; - case ARM_EXCEPTION_HVC: - /* - * See ARM ARM B1.14.1: "Hyp traps on instructions - * that fail their condition code check" - */ - if (!kvm_condition_valid(vcpu)) { - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - return 1; - } - - exit_handler = kvm_get_exit_handler(vcpu); - - return exit_handler(vcpu, run); - case ARM_EXCEPTION_DATA_ABORT: - kvm_inject_vabt(vcpu); - return 1; - case ARM_EXCEPTION_HYP_GONE: - /* - * HYP has been reset to the hyp-stub. This happens - * when a guest is pre-empted by kvm_reboot()'s - * shutdown call. - */ - run->exit_reason = KVM_EXIT_FAIL_ENTRY; - return 0; - default: - kvm_pr_unimpl("Unsupported exception type: %d", - exception_index); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return 0; - } -} diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile deleted file mode 100644 index ba88b1eca93c..000000000000 --- a/arch/arm/kvm/hyp/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for Kernel-based Virtual Machine module, HYP part -# - -ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING - -KVM=../../../../virt/kvm - -CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) - -obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o - -obj-$(CONFIG_KVM_ARM_HOST) += tlb.o -obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += vfp.o -obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o -CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE) - -obj-$(CONFIG_KVM_ARM_HOST) += entry.o -obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o -obj-$(CONFIG_KVM_ARM_HOST) += switch.o -CFLAGS_switch.o += $(CFLAGS_ARMV7VE) -obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o - -# KVM code is run at a different exception code with a different map, so -# compiler instrumentation that inserts callbacks or checks into the code may -# cause crashes. Just disable it. -GCOV_PROFILE := n -KASAN_SANITIZE := n -UBSAN_SANITIZE := n -KCOV_INSTRUMENT := n diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c deleted file mode 100644 index c4632ed9e819..000000000000 --- a/arch/arm/kvm/hyp/banked-sr.c +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Original code: - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - * - * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> - */ - -#include <asm/kvm_hyp.h> - -/* - * gcc before 4.9 doesn't understand -march=armv7ve, so we have to - * trick the assembler. - */ -__asm__(".arch_extension virt"); - -void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) -{ - ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); - ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); - ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); - ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); - ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); - ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); - ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); - ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); - ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); - ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und); - ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und); - ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und); - ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq); - ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq); - ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq); - ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq); - ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq); -} - -void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt) -{ - write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr); - write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp); - write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf); - write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc); - write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc); - write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc); - write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt); - write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt); - write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt); - write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und); - write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und); - write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und); - write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq); - write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq); - write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq); - write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq); -} diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c deleted file mode 100644 index e6923306f698..000000000000 --- a/arch/arm/kvm/hyp/cp15-sr.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Original code: - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - * - * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> - */ - -#include <asm/kvm_hyp.h> - -static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) -{ - return (u64 *)(ctxt->cp15 + idx); -} - -void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) -{ - ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); - ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); - ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); - *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); - *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); - ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); - ctxt->cp15[c3_DACR] = read_sysreg(DACR); - ctxt->cp15[c5_DFSR] = read_sysreg(DFSR); - ctxt->cp15[c5_IFSR] = read_sysreg(IFSR); - ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR); - ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR); - ctxt->cp15[c6_DFAR] = read_sysreg(DFAR); - ctxt->cp15[c6_IFAR] = read_sysreg(IFAR); - *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR); - ctxt->cp15[c10_PRRR] = read_sysreg(PRRR); - ctxt->cp15[c10_NMRR] = read_sysreg(NMRR); - ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0); - ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1); - ctxt->cp15[c12_VBAR] = read_sysreg(VBAR); - ctxt->cp15[c13_CID] = read_sysreg(CID); - ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW); - ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO); - ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV); - ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL); -} - -void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) -{ - write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR); - write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR); - write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR); - write_sysreg(ctxt->cp15[c1_CPACR], CPACR); - write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0); - write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1); - write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR); - write_sysreg(ctxt->cp15[c3_DACR], DACR); - write_sysreg(ctxt->cp15[c5_DFSR], DFSR); - write_sysreg(ctxt->cp15[c5_IFSR], IFSR); - write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR); - write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR); - write_sysreg(ctxt->cp15[c6_DFAR], DFAR); - write_sysreg(ctxt->cp15[c6_IFAR], IFAR); - write_sysreg(*cp15_64(ctxt, c7_PAR), PAR); - write_sysreg(ctxt->cp15[c10_PRRR], PRRR); - write_sysreg(ctxt->cp15[c10_NMRR], NMRR); - write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0); - write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1); - write_sysreg(ctxt->cp15[c12_VBAR], VBAR); - write_sysreg(ctxt->cp15[c13_CID], CID); - write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW); - write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO); - write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV); - write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL); -} diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S deleted file mode 100644 index 4bd1f6a74180..000000000000 --- a/arch/arm/kvm/hyp/entry.S +++ /dev/null @@ -1,121 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2016 - ARM Ltd - * Author: Marc Zyngier <marc.zyngier@arm.com> -*/ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_asm.h> - - .arch_extension virt - - .text - .pushsection .hyp.text, "ax" - -#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR) - -/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */ -ENTRY(__guest_enter) - @ Save host registers - add r1, r1, #(USR_REGS_OFFSET + S_R4) - stm r1!, {r4-r12} - str lr, [r1, #4] @ Skip SP_usr (already saved) - - @ Restore guest registers - add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) - ldr lr, [r0, #S_LR] - ldm r0, {r0-r12} - - clrex - eret -ENDPROC(__guest_enter) - -ENTRY(__guest_exit) - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - - add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3) - stm r2!, {r3-r12} - str lr, [r2, #4] - add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3-r5} - - ldr r0, [r0, #VCPU_HOST_CTXT] - add r0, r0, #(USR_REGS_OFFSET + S_R4) - ldm r0!, {r4-r12} - ldr lr, [r0, #4] - - mov r0, r1 - mrs r1, SPSR - mrs r2, ELR_hyp - mrc p15, 4, r3, c5, c2, 0 @ HSR - - /* - * Force loads and stores to complete before unmasking aborts - * and forcing the delivery of the exception. This gives us a - * single instruction window, which the handler will try to - * match. - */ - dsb sy - cpsie a - - .global abort_guest_exit_start -abort_guest_exit_start: - - isb - - .global abort_guest_exit_end -abort_guest_exit_end: - - /* - * If we took an abort, r0[31] will be set, and cmp will set - * the N bit in PSTATE. - */ - cmp r0, #0 - msrmi SPSR_cxsf, r1 - msrmi ELR_hyp, r2 - mcrmi p15, 4, r3, c5, c2, 0 @ HSR - - bx lr -ENDPROC(__guest_exit) - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -ENTRY(__vfp_guest_restore) - push {r3, r4, lr} - - @ NEON/VFP used. Turn on VFP access. - mrc p15, 4, r1, c1, c1, 2 @ HCPTR - bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - mcr p15, 4, r1, c1, c1, 2 @ HCPTR - isb - - @ Switch VFP/NEON hardware state to the guest's - mov r4, r0 - ldr r0, [r0, #VCPU_HOST_CTXT] - add r0, r0, #CPU_CTXT_VFP - bl __vfp_save_state - add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) - bl __vfp_restore_state - - pop {r3, r4, lr} - pop {r0, r1, r2} - clrex - eret -ENDPROC(__vfp_guest_restore) -#endif - - .popsection - diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S deleted file mode 100644 index fe3d7811a908..000000000000 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ /dev/null @@ -1,295 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/arm-smccc.h> -#include <linux/linkage.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_asm.h> - - .arch_extension virt - - .text - .pushsection .hyp.text, "ax" - -.macro load_vcpu reg - mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR -.endm - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - - .align 5 -__kvm_hyp_vector: - .global __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR - .align 5 -__kvm_hyp_vector_ic_inv: - .global __kvm_hyp_vector_ic_inv - - /* - * We encode the exception entry in the bottom 3 bits of - * SP, and we have to guarantee to be 8 bytes aligned. - */ - W(add) sp, sp, #1 /* Reset 7 */ - W(add) sp, sp, #1 /* Undef 6 */ - W(add) sp, sp, #1 /* Syscall 5 */ - W(add) sp, sp, #1 /* Prefetch abort 4 */ - W(add) sp, sp, #1 /* Data abort 3 */ - W(add) sp, sp, #1 /* HVC 2 */ - W(add) sp, sp, #1 /* IRQ 1 */ - W(nop) /* FIQ 0 */ - - mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ - isb - - b decode_vectors - - .align 5 -__kvm_hyp_vector_bp_inv: - .global __kvm_hyp_vector_bp_inv - - /* - * We encode the exception entry in the bottom 3 bits of - * SP, and we have to guarantee to be 8 bytes aligned. - */ - W(add) sp, sp, #1 /* Reset 7 */ - W(add) sp, sp, #1 /* Undef 6 */ - W(add) sp, sp, #1 /* Syscall 5 */ - W(add) sp, sp, #1 /* Prefetch abort 4 */ - W(add) sp, sp, #1 /* Data abort 3 */ - W(add) sp, sp, #1 /* HVC 2 */ - W(add) sp, sp, #1 /* IRQ 1 */ - W(nop) /* FIQ 0 */ - - mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ - isb - -decode_vectors: - -#ifdef CONFIG_THUMB2_KERNEL - /* - * Yet another silly hack: Use VPIDR as a temp register. - * Thumb2 is really a pain, as SP cannot be used with most - * of the bitwise instructions. The vect_br macro ensures - * things gets cleaned-up. - */ - mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ - mov r0, sp - and r0, r0, #7 - sub sp, sp, r0 - push {r1, r2} - mov r1, r0 - mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ - mrc p15, 0, r2, c0, c0, 0 /* MIDR */ - mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ -#endif - -.macro vect_br val, targ -ARM( eor sp, sp, #\val ) -ARM( tst sp, #7 ) -ARM( eorne sp, sp, #\val ) - -THUMB( cmp r1, #\val ) -THUMB( popeq {r1, r2} ) - - beq \targ -.endm - - vect_br 0, hyp_fiq - vect_br 1, hyp_irq - vect_br 2, hyp_hvc - vect_br 3, hyp_dabt - vect_br 4, hyp_pabt - vect_br 5, hyp_svc - vect_br 6, hyp_undef - vect_br 7, hyp_reset -#endif - -.macro invalid_vector label, cause - .align -\label: mov r0, #\cause - b __hyp_panic -.endm - - invalid_vector hyp_reset ARM_EXCEPTION_RESET - invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED - invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE - invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT - invalid_vector hyp_fiq ARM_EXCEPTION_FIQ - -ENTRY(__hyp_do_panic) - mrs lr, cpsr - bic lr, lr, #MODE_MASK - orr lr, lr, #SVC_MODE -THUMB( orr lr, lr, #PSR_T_BIT ) - msr spsr_cxsf, lr - ldr lr, =panic - msr ELR_hyp, lr - ldr lr, =__kvm_call_hyp - clrex - eret -ENDPROC(__hyp_do_panic) - -hyp_hvc: - /* - * Getting here is either because of a trap from a guest, - * or from executing HVC from the host kernel, which means - * "do something in Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_hvc_trap @ Guest called HVC - - /* - * Getting here means host called HVC, we shift parameters and branch - * to Hyp function. - */ - pop {r0, r1, r2} - - /* - * Check if we have a kernel function, which is guaranteed to be - * bigger than the maximum hyp stub hypercall - */ - cmp r0, #HVC_STUB_HCALL_NR - bhs 1f - - /* - * Not a kernel function, treat it as a stub hypercall. - * Compute the physical address for __kvm_handle_stub_hvc - * (as the code lives in the idmaped page) and branch there. - * We hijack ip (r12) as a tmp register. - */ - push {r1} - ldr r1, =kimage_voffset - ldr r1, [r1] - ldr ip, =__kvm_handle_stub_hvc - sub ip, ip, r1 - pop {r1} - - bx ip - -1: - /* - * Pushing r2 here is just a way of keeping the stack aligned to - * 8 bytes on any path that can trigger a HYP exception. Here, - * we may well be about to jump into the guest, and the guest - * exit would otherwise be badly decoded by our fancy - * "decode-exception-without-a-branch" code... - */ - push {r2, lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {r2, lr} - eret - -guest_hvc_trap: - movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 - movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 - ldr r0, [sp] @ Guest's r0 - teq r0, r2 - bne guest_trap - add sp, sp, #12 - @ Returns: - @ r0 = 0 - @ r1 = HSR value (perfectly predictable) - @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1 - mov r0, #0 - eret - -guest_trap: - load_vcpu r0 @ Load VCPU pointer to r0 - -#ifdef CONFIG_VFPv3 - @ Check for a VFP access - lsr r1, r1, #HSR_EC_SHIFT - cmp r1, #HSR_EC_CP_0_13 - beq __vfp_guest_restore -#endif - - mov r1, #ARM_EXCEPTION_HVC - b __guest_exit - -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu r0 @ Load VCPU pointer to r0 - b __guest_exit - -hyp_dabt: - push {r0, r1} - mrs r0, ELR_hyp - ldr r1, =abort_guest_exit_start -THUMB( add r1, r1, #1) - cmp r0, r1 - ldrne r1, =abort_guest_exit_end -THUMB( addne r1, r1, #1) - cmpne r0, r1 - pop {r0, r1} - bne __hyp_panic - - orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) - eret - - .ltorg - - .popsection diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c deleted file mode 100644 index 5dfbea5adf65..000000000000 --- a/arch/arm/kvm/hyp/s2-setup.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2016 - ARM Ltd - * Author: Marc Zyngier <marc.zyngier@arm.com> - */ - -#include <linux/types.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_asm.h> -#include <asm/kvm_hyp.h> - -void __hyp_text __init_stage2_translation(void) -{ - u64 val; - - val = read_sysreg(VTCR) & ~VTCR_MASK; - - val |= read_sysreg(HTCR) & VTCR_HTCR_SH; - val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S; - - write_sysreg(val, VTCR); -} diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c deleted file mode 100644 index 1efeef3fd0ee..000000000000 --- a/arch/arm/kvm/hyp/switch.c +++ /dev/null @@ -1,242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier <marc.zyngier@arm.com> - */ -#include <linux/jump_label.h> - -#include <asm/kvm_asm.h> -#include <asm/kvm_hyp.h> -#include <asm/kvm_mmu.h> - -__asm__(".arch_extension virt"); - -/* - * Activate the traps, saving the host's fpexc register before - * overwriting it. We'll restore it on VM exit. - */ -static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) -{ - u32 val; - - /* - * We are about to set HCPTR.TCP10/11 to trap all floating point - * register accesses to HYP, however, the ARM ARM clearly states that - * traps are only taken to HYP if the operation would not otherwise - * trap to SVC. Therefore, always make sure that for 32-bit guests, - * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits. - */ - val = read_sysreg(VFP_FPEXC); - *fpexc_host = val; - if (!(val & FPEXC_EN)) { - write_sysreg(val | FPEXC_EN, VFP_FPEXC); - isb(); - } - - write_sysreg(vcpu->arch.hcr, HCR); - /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ - write_sysreg(HSTR_T(15), HSTR); - write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); - val = read_sysreg(HDCR); - val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */ - val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */ - write_sysreg(val, HDCR); -} - -static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) -{ - u32 val; - - /* - * If we pended a virtual abort, preserve it until it gets - * cleared. See B1.9.9 (Virtual Abort exception) for details, - * but the crucial bit is the zeroing of HCR.VA in the - * pseudocode. - */ - if (vcpu->arch.hcr & HCR_VA) - vcpu->arch.hcr = read_sysreg(HCR); - - write_sysreg(0, HCR); - write_sysreg(0, HSTR); - val = read_sysreg(HDCR); - write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR); - write_sysreg(0, HCPTR); -} - -static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - write_sysreg(kvm_get_vttbr(kvm), VTTBR); - write_sysreg(vcpu->arch.midr, VPIDR); -} - -static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) -{ - write_sysreg(0, VTTBR); - write_sysreg(read_sysreg(MIDR), VPIDR); -} - - -static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) -{ - if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { - __vgic_v3_save_state(vcpu); - __vgic_v3_deactivate_traps(vcpu); - } -} - -static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) -{ - if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { - __vgic_v3_activate_traps(vcpu); - __vgic_v3_restore_state(vcpu); - } -} - -static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) -{ - u32 hsr = read_sysreg(HSR); - u8 ec = hsr >> HSR_EC_SHIFT; - u32 hpfar, far; - - vcpu->arch.fault.hsr = hsr; - - if (ec == HSR_EC_IABT) - far = read_sysreg(HIFAR); - else if (ec == HSR_EC_DABT) - far = read_sysreg(HDFAR); - else - return true; - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) { - u64 par, tmp; - - par = read_sysreg(PAR); - write_sysreg(far, ATS1CPR); - isb(); - - tmp = read_sysreg(PAR); - write_sysreg(par, PAR); - - if (unlikely(tmp & 1)) - return false; /* Translation failed, back to guest */ - - hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4; - } else { - hpfar = read_sysreg(HPFAR); - } - - vcpu->arch.fault.hxfar = far; - vcpu->arch.fault.hpfar = hpfar; - return true; -} - -int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) -{ - struct kvm_cpu_context *host_ctxt; - struct kvm_cpu_context *guest_ctxt; - bool fp_enabled; - u64 exit_code; - u32 fpexc; - - vcpu = kern_hyp_va(vcpu); - write_sysreg(vcpu, HTPIDR); - - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); - guest_ctxt = &vcpu->arch.ctxt; - - __sysreg_save_state(host_ctxt); - __banked_save_state(host_ctxt); - - __activate_traps(vcpu, &fpexc); - __activate_vm(vcpu); - - __vgic_restore_state(vcpu); - __timer_enable_traps(vcpu); - - __sysreg_restore_state(guest_ctxt); - __banked_restore_state(guest_ctxt); - - /* Jump in the fire! */ -again: - exit_code = __guest_enter(vcpu, host_ctxt); - /* And we're baaack! */ - - if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu)) - goto again; - - fp_enabled = __vfp_enabled(); - - __banked_save_state(guest_ctxt); - __sysreg_save_state(guest_ctxt); - __timer_disable_traps(vcpu); - - __vgic_save_state(vcpu); - - __deactivate_traps(vcpu); - __deactivate_vm(vcpu); - - __banked_restore_state(host_ctxt); - __sysreg_restore_state(host_ctxt); - - if (fp_enabled) { - __vfp_save_state(&guest_ctxt->vfp); - __vfp_restore_state(&host_ctxt->vfp); - } - - write_sysreg(fpexc, VFP_FPEXC); - - return exit_code; -} - -static const char * const __hyp_panic_string[] = { - [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", - [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", - [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x", - [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x", - [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x", - [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x", - [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x", - [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x", -}; - -void __hyp_text __noreturn __hyp_panic(int cause) -{ - u32 elr = read_special(ELR_hyp); - u32 val; - - if (cause == ARM_EXCEPTION_DATA_ABORT) - val = read_sysreg(HDFAR); - else - val = read_special(SPSR); - - if (read_sysreg(VTTBR)) { - struct kvm_vcpu *vcpu; - struct kvm_cpu_context *host_ctxt; - - vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); - __timer_disable_traps(vcpu); - __deactivate_traps(vcpu); - __deactivate_vm(vcpu); - __banked_restore_state(host_ctxt); - __sysreg_restore_state(host_ctxt); - } - - /* Call panic for real */ - __hyp_do_panic(__hyp_panic_string[cause], elr, val); - - unreachable(); -} diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c deleted file mode 100644 index 848f27bbad9d..000000000000 --- a/arch/arm/kvm/hyp/tlb.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Original code: - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - * - * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com> - */ - -#include <asm/kvm_hyp.h> -#include <asm/kvm_mmu.h> - -/** - * Flush per-VMID TLBs - * - * __kvm_tlb_flush_vmid(struct kvm *kvm); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - * - * As v7 does not support flushing per IPA, just nuke the whole TLB - * instead, ignoring the ipa value. - */ -void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) -{ - dsb(ishst); - - /* Switch to requested VMID */ - kvm = kern_hyp_va(kvm); - write_sysreg(kvm_get_vttbr(kvm), VTTBR); - isb(); - - write_sysreg(0, TLBIALLIS); - dsb(ish); - isb(); - - write_sysreg(0, VTTBR); -} - -void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) -{ - __kvm_tlb_flush_vmid(kvm); -} - -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); - - /* Switch to requested VMID */ - write_sysreg(kvm_get_vttbr(kvm), VTTBR); - isb(); - - write_sysreg(0, TLBIALL); - dsb(nsh); - isb(); - - write_sysreg(0, VTTBR); -} - -void __hyp_text __kvm_flush_vm_context(void) -{ - write_sysreg(0, TLBIALLNSNHIS); - write_sysreg(0, ICIALLUIS); - dsb(ish); -} diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S deleted file mode 100644 index 675a52348d8d..000000000000 --- a/arch/arm/kvm/hyp/vfp.S +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/linkage.h> -#include <asm/vfpmacros.h> - - .text - .pushsection .hyp.text, "ax" - -/* void __vfp_save_state(struct vfp_hard_struct *vfp); */ -ENTRY(__vfp_save_state) - push {r4, r5} - VFPFMRX r1, FPEXC - - @ Make sure *really* VFP is enabled so we can touch the registers. - orr r5, r1, #FPEXC_EN - tst r5, #FPEXC_EX @ Check for VFP Subarchitecture - bic r5, r5, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r5 - isb - - VFPFMRX r2, FPSCR - beq 1f - - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r3, FPINST - tst r5, #FPEXC_FP2V - VFPFMRX r4, FPINST2, ne @ vmrsne -1: - VFPFSTMIA r0, r5 @ Save VFP registers - stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2 - pop {r4, r5} - bx lr -ENDPROC(__vfp_save_state) - -/* void __vfp_restore_state(struct vfp_hard_struct *vfp); - * Assume FPEXC_EN is on and FPEXC_EX is off */ -ENTRY(__vfp_restore_state) - VFPFLDMIA r0, r1 @ Load VFP registers - ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r1 - tst r0, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r2 - tst r0, #FPEXC_FP2V - VFPFMXR FPINST2, r3, ne -1: - VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN) - bx lr -ENDPROC(__vfp_restore_state) - - .popsection diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S deleted file mode 100644 index 33e34b6d24b2..000000000000 --- a/arch/arm/kvm/init.S +++ /dev/null @@ -1,157 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/unified.h> -#include <asm/asm-offsets.h> -#include <asm/kvm_asm.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_mmu.h> -#include <asm/virt.h> - -/******************************************************************** - * Hypervisor initialization - * - should be called with: - * r0 = top of Hyp stack (kernel VA) - * r1 = pointer to hyp vectors - * r2,r3 = Hypervisor pgd pointer - * - * The init scenario is: - * - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack, - * runtime vectors - * - Invalidate TLBs - * - Set stack and vectors - * - Setup the page tables - * - Enable the MMU - * - Profit! (or eret, if you only care about the code). - * - * Another possibility is to get a HYP stub hypercall. - * We discriminate between the two by checking if r0 contains a value - * that is less than HVC_STUB_HCALL_NR. - */ - - .text - .pushsection .hyp.idmap.text,"ax" - .align 5 -__kvm_hyp_init: - .globl __kvm_hyp_init - - @ Hyp-mode exception vector - W(b) . - W(b) . - W(b) . - W(b) . - W(b) . - W(b) __do_hyp_init - W(b) . - W(b) . - -__do_hyp_init: - @ Check for a stub hypercall - cmp r0, #HVC_STUB_HCALL_NR - blo __kvm_handle_stub_hvc - - @ Set stack pointer - mov sp, r0 - - @ Set HVBAR to point to the HYP vectors - mcr p15, 4, r1, c12, c0, 0 @ HVBAR - - @ Set the HTTBR to point to the hypervisor PGD pointer passed - mcrr p15, 4, rr_lo_hi(r2, r3), c2 - - @ Set the HTCR and VTCR to the same shareability and cacheability - @ settings as the non-secure TTBCR and with T0SZ == 0. - mrc p15, 4, r0, c2, c0, 2 @ HTCR - ldr r2, =HTCR_MASK - bic r0, r0, r2 - mrc p15, 0, r1, c2, c0, 2 @ TTBCR - and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) - orr r0, r0, r1 - mcr p15, 4, r0, c2, c0, 2 @ HTCR - - @ Use the same memory attributes for hyp. accesses as the kernel - @ (copy MAIRx ro HMAIRx). - mrc p15, 0, r0, c10, c2, 0 - mcr p15, 4, r0, c10, c2, 0 - mrc p15, 0, r0, c10, c2, 1 - mcr p15, 4, r0, c10, c2, 1 - - @ Invalidate the stale TLBs from Bootloader - mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH - dsb ish - - @ Set the HSCTLR to: - @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) - @ - Endianness: Kernel config - @ - Fast Interrupt Features: Kernel config - @ - Write permission implies XN: disabled - @ - Instruction cache: enabled - @ - Data/Unified cache: enabled - @ - MMU: enabled (this code must be run from an identity mapping) - mrc p15, 4, r0, c1, c0, 0 @ HSCR - ldr r2, =HSCTLR_MASK - bic r0, r0, r2 - mrc p15, 0, r1, c1, c0, 0 @ SCTLR - ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) - and r1, r1, r2 - ARM( ldr r2, =(HSCTLR_M) ) - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) - orr r1, r1, r2 - orr r0, r0, r1 - mcr p15, 4, r0, c1, c0, 0 @ HSCR - isb - - eret - -ENTRY(__kvm_handle_stub_hvc) - cmp r0, #HVC_SOFT_RESTART - bne 1f - - /* The target is expected in r1 */ - msr ELR_hyp, r1 - mrs r0, cpsr - bic r0, r0, #MODE_MASK - orr r0, r0, #HYP_MODE -THUMB( orr r0, r0, #PSR_T_BIT ) - msr spsr_cxsf, r0 - b reset - -1: cmp r0, #HVC_RESET_VECTORS - bne 1f - -reset: - /* We're now in idmap, disable MMU */ - mrc p15, 4, r1, c1, c0, 0 @ HSCTLR - ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I) - bic r1, r1, r0 - mcr p15, 4, r1, c1, c0, 0 @ HSCTLR - - /* - * Install stub vectors, using ardb's VA->PA trick. - */ -0: adr r0, 0b @ PA(0) - movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0) - movt r1, #:upper16:__hyp_stub_vectors - 0b - add r1, r1, r0 @ PA(stub) - mcr p15, 4, r1, c12, c0, 0 @ HVBAR - b exit - -1: ldr r0, =HVC_STUB_ERR - eret - -exit: - mov r0, #0 - eret -ENDPROC(__kvm_handle_stub_hvc) - - .ltorg - - .globl __kvm_hyp_init_end -__kvm_hyp_init_end: - - .popsection diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S deleted file mode 100644 index 064f4f118ca7..000000000000 --- a/arch/arm/kvm/interrupts.S +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#include <linux/linkage.h> - - .text - -/******************************************************************** - * Call function in Hyp mode - * - * - * unsigned long kvm_call_hyp(void *hypfn, ...); - * - * This is not really a variadic function in the classic C-way and care must - * be taken when calling this to ensure parameters are passed in registers - * only, since the stack will change between the caller and the callee. - * - * Call the function with the first argument containing a pointer to the - * function you wish to call in Hyp mode, and subsequent arguments will be - * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the - * function pointer can be passed). The function being called must be mapped - * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 (strictly 32bit). - * - * The calling convention follows the standard AAPCS: - * r0 - r3: caller save - * r12: caller save - * rest: callee save - */ -ENTRY(__kvm_call_hyp) - hvc #0 - bx lr -ENDPROC(__kvm_call_hyp) diff --git a/arch/arm/kvm/irq.h b/arch/arm/kvm/irq.h deleted file mode 100644 index 0d257de42c10..000000000000 --- a/arch/arm/kvm/irq.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * irq.h: in kernel interrupt controller related definitions - * Copyright (c) 2016 Red Hat, Inc. - * - * This header is included by irqchip.c. However, on ARM, interrupt - * controller declarations are located in include/kvm/arm_vgic.h since - * they are mostly shared between arm and arm64. - */ - -#ifndef __IRQ_H -#define __IRQ_H - -#include <kvm/arm_vgic.h> - -#endif diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c deleted file mode 100644 index eb4174f6ebbd..000000000000 --- a/arch/arm/kvm/reset.c +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ -#include <linux/compiler.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kvm_host.h> -#include <linux/kvm.h> - -#include <asm/unified.h> -#include <asm/ptrace.h> -#include <asm/cputype.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_coproc.h> -#include <asm/kvm_emulate.h> - -#include <kvm/arm_arch_timer.h> - -/****************************************************************************** - * Cortex-A15 and Cortex-A7 Reset Values - */ - -static struct kvm_regs cortexa_regs_reset = { - .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, -}; - - -/******************************************************************************* - * Exported reset function - */ - -/** - * kvm_reset_vcpu - sets core registers and cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architecturally defined reset values. - */ -int kvm_reset_vcpu(struct kvm_vcpu *vcpu) -{ - struct kvm_regs *reset_regs; - - switch (vcpu->arch.target) { - case KVM_ARM_TARGET_CORTEX_A7: - case KVM_ARM_TARGET_CORTEX_A15: - reset_regs = &cortexa_regs_reset; - vcpu->arch.midr = read_cpuid_id(); - break; - default: - return -ENODEV; - } - - /* Reset core registers */ - memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); - - /* Reset CP15 registers */ - kvm_reset_coprocs(vcpu); - - /* - * Additional reset state handling that PSCI may have imposed on us. - * Must be done after all the sys_reg reset. - */ - if (READ_ONCE(vcpu->arch.reset_state.reset)) { - unsigned long target_pc = vcpu->arch.reset_state.pc; - - /* Gracefully handle Thumb2 entry point */ - if (target_pc & 1) { - target_pc &= ~1UL; - vcpu_set_thumb(vcpu); - } - - /* Propagate caller endianness */ - if (vcpu->arch.reset_state.be) - kvm_vcpu_set_be(vcpu); - - *vcpu_pc(vcpu) = target_pc; - vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); - - vcpu->arch.reset_state.reset = false; - } - - /* Reset arch_timer context */ - return kvm_timer_vcpu_reset(vcpu); -} diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h deleted file mode 100644 index 69a9d62a0ac6..000000000000 --- a/arch/arm/kvm/trace.h +++ /dev/null @@ -1,86 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_ARM_KVM_H - -#include <linux/tracepoint.h> - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM kvm - -/* Architecturally implementation defined CP15 register access */ -TRACE_EVENT(kvm_emulate_cp15_imp, - TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, - unsigned long CRm, unsigned long Op2, bool is_write), - TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), - - TP_STRUCT__entry( - __field( unsigned int, Op1 ) - __field( unsigned int, Rt1 ) - __field( unsigned int, CRn ) - __field( unsigned int, CRm ) - __field( unsigned int, Op2 ) - __field( bool, is_write ) - ), - - TP_fast_assign( - __entry->is_write = is_write; - __entry->Op1 = Op1; - __entry->Rt1 = Rt1; - __entry->CRn = CRn; - __entry->CRm = CRm; - __entry->Op2 = Op2; - ), - - TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", - (__entry->is_write) ? "mcr" : "mrc", - __entry->Op1, __entry->Rt1, __entry->CRn, - __entry->CRm, __entry->Op2) -); - -TRACE_EVENT(kvm_wfx, - TP_PROTO(unsigned long vcpu_pc, bool is_wfe), - TP_ARGS(vcpu_pc, is_wfe), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( bool, is_wfe ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->is_wfe = is_wfe; - ), - - TP_printk("guest executed wf%c at: 0x%08lx", - __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_hvc, - TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), - TP_ARGS(vcpu_pc, r0, imm), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, r0 ) - __field( unsigned long, imm ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->r0 = r0; - __entry->imm = imm; - ), - - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", - __entry->vcpu_pc, __entry->r0, __entry->imm) -); - -#endif /* _TRACE_ARM_KVM_H */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE trace - -/* This part must be outside protection */ -#include <trace/define_trace.h> diff --git a/arch/arm/kvm/vgic-v3-coproc.c b/arch/arm/kvm/vgic-v3-coproc.c deleted file mode 100644 index ed3b2e4759ce..000000000000 --- a/arch/arm/kvm/vgic-v3-coproc.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * VGIC system registers handling functions for AArch32 mode - */ - -#include <linux/kvm.h> -#include <linux/kvm_host.h> -#include <asm/kvm_emulate.h> -#include "vgic.h" - -int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, - u64 *reg) -{ - /* - * TODO: Implement for AArch32 - */ - return -ENXIO; -} - -int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id, - u64 *reg) -{ - /* - * TODO: Implement for AArch32 - */ - return -ENXIO; -} diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index cbbe03e96de8..76838255b5fa 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -21,7 +21,7 @@ menuconfig ARCH_EXYNOS select EXYNOS_SROM select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS select GPIOLIB - select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 && VIRTUALIZATION + select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 select HAVE_ARM_SCU if SMP select HAVE_S3C2410_I2C if I2C select HAVE_S3C2410_WATCHDOG if WATCHDOG diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5d0d0f86e790..69a337df619f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -63,9 +63,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; static unsigned int ecc_mask __initdata = 0; pgprot_t pgprot_user; pgprot_t pgprot_kernel; -pgprot_t pgprot_hyp_device; -pgprot_t pgprot_s2; -pgprot_t pgprot_s2_device; EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_kernel); @@ -75,15 +72,8 @@ struct cachepolicy { unsigned int cr_mask; pmdval_t pmd; pteval_t pte; - pteval_t pte_s2; }; -#ifdef CONFIG_ARM_LPAE -#define s2_policy(policy) policy -#else -#define s2_policy(policy) 0 -#endif - unsigned long kimage_voffset __ro_after_init; static struct cachepolicy cache_policies[] __initdata = { @@ -92,31 +82,26 @@ static struct cachepolicy cache_policies[] __initdata = { .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH), }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), } }; @@ -246,9 +231,6 @@ static struct mem_type mem_types[] __ro_after_init = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, - .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | - s2_policy(L_PTE_S2_MT_DEV_SHARED) | - L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -434,7 +416,6 @@ static void __init build_mem_type_table(void) struct cachepolicy *cp; unsigned int cr = get_cr(); pteval_t user_pgprot, kern_pgprot, vecs_pgprot; - pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -558,9 +539,6 @@ static void __init build_mem_type_table(void) */ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; - s2_pgprot = cp->pte_s2; - hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; - s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; #ifndef CONFIG_ARM_LPAE /* @@ -604,7 +582,6 @@ static void __init build_mem_type_table(void) user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; - s2_pgprot |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -666,9 +643,6 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | kern_pgprot); - pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); - pgprot_s2_device = __pgprot(s2_device_pgprot); - pgprot_hyp_device = __pgprot(hyp_device_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 25fec4bde43a..a358e97572c1 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq) isb(); } -static inline void gic_write_dir(u32 irq) +static __always_inline void gic_write_dir(u32 irq) { write_sysreg_s(irq, SYS_ICC_DIR_EL1); isb(); diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 806e9dc2a852..a4d1b5f771f6 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void) return test_bit(ICACHEF_ALIASING, &__icache_flags); } -static inline int icache_is_vpipt(void) +static __always_inline int icache_is_vpipt(void) { return test_bit(ICACHEF_VPIPT, &__icache_flags); } diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 665c78e0665a..e6cca3d4acf7 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -static inline void __flush_icache_all(void) +static __always_inline void __flush_icache_all(void) { if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) return; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..2a746b99e937 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field) return cpuid_feature_extract_signed_field_width(features, field, 4); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) { return (u64)(features << (64 - width - field)) >> (64 - width); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field(u64 features, int field) { return cpuid_feature_extract_unsigned_field_width(features, field, 4); @@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void) return val == 0x1; } -static inline bool system_supports_fpsimd(void) +static __always_inline bool system_supports_fpsimd(void) { return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } @@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } -static inline bool system_supports_sve(void) +static __always_inline bool system_supports_sve(void) { return IS_ENABLED(CONFIG_ARM64_SVE) && cpus_have_const_cap(ARM64_SVE); } -static inline bool system_supports_cnp(void) +static __always_inline bool system_supports_cnp(void) { return IS_ENABLED(CONFIG_ARM64_CNP) && cpus_have_const_cap(ARM64_HAS_CNP); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4e531f57147d..6facd1308e7c 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) } #define __raw_writel __raw_writel -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); } @@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) } #define __raw_readl __raw_readl -static inline u32 __raw_readl(const volatile void __iomem *addr) +static __always_inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile(ALTERNATIVE("ldr %w0, [%1]", diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 688c63412cc2..a30b4eec7cb4 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu); void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); -static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { return !(vcpu->arch.hcr_el2 & HCR_RW); } @@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 &= ~HCR_TWE; - if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count)) + if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || + vcpu->kvm->arch.vgic.nassgireq) vcpu->arch.hcr_el2 &= ~HCR_TWI; else vcpu->arch.hcr_el2 |= HCR_TWI; @@ -127,7 +128,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) vcpu->arch.vsesr_el2 = vsesr; } -static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; } @@ -153,17 +154,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long *__vcpu_elr_el1(vcpu) = v; } -static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; } -static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) { return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); } -static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) return kvm_condition_valid32(vcpu); @@ -181,13 +182,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on * AArch32 with banked registers. */ -static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, +static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, +static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) @@ -264,12 +265,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } -static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); @@ -279,12 +280,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) return -1; } -static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.far_el2; } -static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) +static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) { return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; } @@ -299,7 +300,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; } -static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } @@ -319,17 +320,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); } -static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } -static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ @@ -340,18 +341,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); } -static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) +static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ -static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); } -static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); } @@ -361,17 +362,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } -static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; } -static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } -static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: @@ -390,7 +391,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) } } -static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); @@ -504,7 +505,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, return data; /* Leave LE untouched */ } -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) { if (vcpu_mode_is_32bit(vcpu)) kvm_skip_instr32(vcpu, is_wide_instr); @@ -519,7 +520,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) * Skip an instruction which has been emulated at hyp while most guest sysregs * are live. */ -static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) +static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d87aa609d2b6..32c8a675e5a4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -44,6 +44,7 @@ #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) +#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -626,38 +627,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} #endif -static inline void kvm_arm_vhe_guest_enter(void) -{ - local_daif_mask(); - - /* - * Having IRQs masked via PMR when entering the guest means the GIC - * will not signal the CPU of interrupts of lower priority, and the - * only way to get out will be via guest exceptions. - * Naturally, we want to avoid this. - * - * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a - * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. - */ - pmr_sync(); -} - -static inline void kvm_arm_vhe_guest_exit(void) -{ - /* - * local_daif_restore() takes care to properly restore PSTATE.DAIF - * and the GIC PMR if the host is using IRQ priorities. - */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); - - /* - * When we exit from the guest we change a number of CPU configuration - * parameters, such as traps. Make sure these changes take effect - * before running the host or additional guests. - */ - isb(); -} - #define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index a3a6a2ba9a63..fe57f60f06a8 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -47,6 +47,13 @@ #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) +/* + * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the + * static inline can allow the compiler to out-of-line this. KVM always wants + * the macro version as its always inlined. + */ +#define __kvm_swab32(x) ___constant_swab32(x) + int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 53d846f1bfe7..785762860c63 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); -static inline unsigned long __kern_hyp_va(unsigned long v) +static __always_inline unsigned long __kern_hyp_va(unsigned long v) { asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" "ror %0, %0, #1\n" @@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, extern void *__kvm_bp_vect_base; extern int __kvm_harden_el2_vector_slot; +/* This is only called on a VHE system */ static inline void *kvm_get_hyp_vector(void) { struct bp_hardening_data *data = arm64_get_bp_hardening_data(); diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index d429f7701c36..5d10051c3e62 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,7 +6,7 @@ #ifdef CONFIG_ARM64_LSE_ATOMICS -#define __LSE_PREAMBLE ".arch armv8-a+lse\n" +#define __LSE_PREAMBLE ".arch_extension lse\n" #include <linux/compiler_types.h> #include <linux/export.h> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a4f9ca5479b0..4d94676e5a8b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void) ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) #define untagged_addr(addr) ({ \ - u64 __addr = (__force u64)addr; \ + u64 __addr = (__force u64)(addr); \ __addr &= __untagged_addr(__addr); \ (__force __typeof__(addr))__addr; \ }) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 0958ed6191aa..61fd26752adc 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void) return read_sysreg(CurrentEL) == CurrentEL_EL2; } -static inline bool has_vhe(void) +static __always_inline bool has_vhe(void) { if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) return true; diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f3e0ab961565..600010cd6d1d 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -624,7 +624,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) } /* Switch to the guest for VHE systems running in EL2 */ -int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; @@ -677,7 +677,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } -NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); +NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe); + +int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +{ + int ret; + + local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + * + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. + */ + pmr_sync(); + + ret = __kvm_vcpu_run_vhe(vcpu); + + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + + /* + * When we exit from the guest we change a number of CPU configuration + * parameters, such as traps. Make sure these changes take effect + * before running the host or additional guests. + */ + isb(); + + return ret; +} /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 29ee1feba4eb..4f3a087e36d5 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) u32 data = vcpu_get_reg(vcpu, rd); if (__is_be(vcpu)) { /* guest pre-swabbed data, undo this for writel() */ - data = swab32(data); + data = __kvm_swab32(data); } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); if (__is_be(vcpu)) { /* guest expects swabbed data */ - data = swab32(data); + data = __kvm_swab32(data); } vcpu_set_reg(vcpu, rd, data); } diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index da09c884cc30..047427f71d83 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -9,7 +9,6 @@ config CSKY select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 select COMMON_CLK select CLKSRC_MMIO - select CLKSRC_OF select CSKY_MPINTC if CPU_CK860 select CSKY_MP_TIMER if CPU_CK860 select CSKY_APB_INTC @@ -37,6 +36,7 @@ config CSKY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL + select HAVE_COPY_THREAD_TLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER @@ -47,8 +47,8 @@ config CSKY select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select MAY_HAVE_SPARSE_IRQ select MODULES_USE_ELF_RELA if MODULES @@ -59,6 +59,11 @@ config CSKY select TIMER_OF select USB_ARCH_HAS_EHCI select USB_ARCH_HAS_OHCI + select GENERIC_PCI_IOMAP + select HAVE_PCI + select PCI_DOMAINS_GENERIC if PCI + select PCI_SYSCALL if PCI + select PCI_MSI if PCI config CPU_HAS_CACHEV2 bool @@ -75,7 +80,7 @@ config CPU_HAS_TLBI config CPU_HAS_LDSTEX bool help - For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. + For SMP, CPU needs "ldex&stex" instructions for atomic operations. config CPU_NEED_TLBSYNC bool @@ -188,6 +193,40 @@ config CPU_PM_STOP bool "stop" endchoice +menuconfig HAVE_TCM + bool "Tightly-Coupled/Sram Memory" + select GENERIC_ALLOCATOR + help + The implementation are not only used by TCM (Tightly-Coupled Meory) + but also used by sram on SOC bus. It follow existed linux tcm + software interface, so that old tcm application codes could be + re-used directly. + +if HAVE_TCM +config ITCM_RAM_BASE + hex "ITCM ram base" + default 0xffffffff + +config ITCM_NR_PAGES + int "Page count of ITCM size: NR*4KB" + range 1 256 + default 32 + +config HAVE_DTCM + bool "DTCM Support" + +config DTCM_RAM_BASE + hex "DTCM ram base" + depends on HAVE_DTCM + default 0xffffffff + +config DTCM_NR_PAGES + int "Page count of DTCM size: NR*4KB" + depends on HAVE_DTCM + range 1 256 + default 32 +endif + config CPU_HAS_VDSP bool "CPU has VDSP coprocessor" depends on CPU_HAS_FPU && CPU_HAS_FPUV2 @@ -196,6 +235,10 @@ config CPU_HAS_FPU bool "CPU has FPU coprocessor" depends on CPU_CK807 || CPU_CK810 || CPU_CK860 +config CPU_HAS_ICACHE_INS + bool "CPU has Icache invalidate instructions" + depends on CPU_HAS_CACHEV2 + config CPU_HAS_TEE bool "CPU has Trusted Execution Environment" depends on CPU_CK810 @@ -235,4 +278,6 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. endmenu +source "arch/csky/Kconfig.platforms" + source "kernel/Kconfig.hz" diff --git a/arch/csky/Kconfig.platforms b/arch/csky/Kconfig.platforms new file mode 100644 index 000000000000..639e17f4eacb --- /dev/null +++ b/arch/csky/Kconfig.platforms @@ -0,0 +1,9 @@ +menu "Platform drivers selection" + +config ARCH_CSKY_DW_APB_ICTL + bool "Select dw-apb interrupt controller" + select DW_APB_ICTL + default y + help + This enables support for snps dw-apb-ictl +endmenu diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h index 79ef9e8c1afd..d3e04208d53c 100644 --- a/arch/csky/abiv1/inc/abi/cacheflush.h +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u #define flush_icache_page(vma, page) do {} while (0); #define flush_icache_range(start, end) cache_wbinv_range(start, end) - -#define flush_icache_user_range(vma,page,addr,len) \ - flush_dcache_page(page) +#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) +#define flush_icache_deferred(mm) do {} while (0); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h index 7ab78bd0f3b1..f35a9f3315ee 100644 --- a/arch/csky/abiv1/inc/abi/entry.h +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -16,14 +16,16 @@ #define LSAVE_A4 40 #define LSAVE_A5 44 +#define usp ss1 + .macro USPTOKSP - mtcr sp, ss1 + mtcr sp, usp mfcr sp, ss0 .endm .macro KSPTOUSP mtcr sp, ss0 - mfcr sp, ss1 + mfcr sp, usp .endm .macro SAVE_ALL epc_inc @@ -45,7 +47,13 @@ add lr, r13 stw lr, (sp, 8) + mov lr, sp + addi lr, 32 + addi lr, 32 + addi lr, 16 + bt 2f mfcr lr, ss1 +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -79,9 +87,10 @@ ldw a0, (sp, 12) mtcr a0, epsr btsti a0, 31 + bt 1f ldw a0, (sp, 16) mtcr a0, ss1 - +1: ldw a0, (sp, 24) ldw a1, (sp, 28) ldw a2, (sp, 32) @@ -102,9 +111,9 @@ addi sp, 32 addi sp, 8 - bt 1f + bt 2f KSPTOUSP -1: +2: rte .endm diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index 5bb887b275e1..790f1ebfba44 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -6,46 +6,80 @@ #include <linux/mm.h> #include <asm/cache.h> -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) { - unsigned long start; + unsigned long addr; + struct page *page; - start = (unsigned long) kmap_atomic(page); + page = pfn_to_page(pte_pfn(*pte)); + if (page == ZERO_PAGE(0)) + return; - cache_wbinv_range(start, start + PAGE_SIZE); + if (test_and_set_bit(PG_dcache_clean, &page->flags)) + return; - kunmap_atomic((void *)start); -} + addr = (unsigned long) kmap_atomic(page); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, int len) -{ - unsigned long kaddr; + dcache_wb_range(addr, addr + PAGE_SIZE); - kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); + + kunmap_atomic((void *) addr); +} - cache_wbinv_range(kaddr, kaddr + len); +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; - kunmap_atomic((void *)kaddr); + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *pte) +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) { - unsigned long addr, pfn; - struct page *page; + unsigned int cpu; + cpumask_t others, *mask; - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) - return; + preempt_disable(); - page = pfn_to_page(pfn); - if (page == ZERO_PAGE(0)) +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); return; + } +#endif - addr = (unsigned long) kmap_atomic(page); + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); - cache_wbinv_range(addr, addr + PAGE_SIZE); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); - kunmap_atomic((void *) addr); + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); } diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index b8db5e0b2fe3..a565e00c3f70 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -13,24 +13,27 @@ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_cache_range(vma, start, end) \ - do { \ - if (vma->vm_flags & VM_EXEC) \ - icache_inv_all(); \ - } while (0) +#define PG_dcache_clean PG_arch_1 + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_range(start, end) cache_wbinv_range(start, end) -void flush_icache_page(struct vm_area_struct *vma, struct page *page); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, int len); +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) @@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index 9897a16b45e5..94a7a58765df 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -31,7 +31,13 @@ mfcr lr, epsr stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: mfcr lr, usp +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -64,8 +70,10 @@ mtcr a0, epc ldw a0, (sp, 12) mtcr a0, epsr + btsti a0, 31 ldw a0, (sp, 16) mtcr a0, usp + mtcr a0, ss0 #ifdef CONFIG_CPU_HAS_HILO ldw a0, (sp, 140) @@ -86,6 +94,9 @@ addi sp, 40 ldm r16-r30, (sp) addi sp, 72 + bf 1f + mfcr sp, ss0 +1: rte .endm diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig index 7ef42895dfb0..af722e4dfb47 100644 --- a/arch/csky/configs/defconfig +++ b/arch/csky/configs/defconfig @@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_CPU_CK807=y -CONFIG_CPU_HAS_FPU=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_TTY_PRINTK=y # CONFIG_VGA_CONSOLE is not set -CONFIG_CSKY_MPTIMER=y -CONFIG_GX6605S_TIMER=y CONFIG_PM_DEVFREQ=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y @@ -56,6 +50,4 @@ CONFIG_CRAMFS=y CONFIG_ROMFS_FS=y CONFIG_NFS_FS=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index bc15a26c782f..4130e3eaa766 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -28,7 +28,6 @@ generic-y += local64.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += module.h -generic-y += pci.h generic-y += percpu.h generic-y += preempt.h generic-y += qrwlock.h diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h index 1d5fc2f78fd7..4b5c09bf1d25 100644 --- a/arch/csky/include/asm/cache.h +++ b/arch/csky/include/asm/cache.h @@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start); void icache_inv_range(unsigned long start, unsigned long end); void icache_inv_all(void); +void local_icache_inv_all(void *priv); void dcache_wb_range(unsigned long start, unsigned long end); void dcache_wbinv_all(void); diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h index a96da67261ae..f0b8f25429a2 100644 --- a/arch/csky/include/asm/cacheflush.h +++ b/arch/csky/include/asm/cacheflush.h @@ -4,6 +4,7 @@ #ifndef __ASM_CSKY_CACHEFLUSH_H #define __ASM_CSKY_CACHEFLUSH_H +#include <linux/mm.h> #include <abi/cacheflush.h> #endif /* __ASM_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h index 380ff0a307df..81f9477d5330 100644 --- a/arch/csky/include/asm/fixmap.h +++ b/arch/csky/include/asm/fixmap.h @@ -5,12 +5,16 @@ #define __ASM_CSKY_FIXMAP_H #include <asm/page.h> +#include <asm/memory.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> #endif enum fixed_addresses { +#ifdef CONFIG_HAVE_TCM + FIX_TCM = TCM_NR_PAGES, +#endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, @@ -18,10 +22,13 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_TOP 0xffffc000 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #include <asm-generic/fixmap.h> +extern void fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base); +extern void __init fixaddr_init(void); + #endif /* __ASM_CSKY_FIXMAP_H */ diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h new file mode 100644 index 000000000000..a65c6759f537 --- /dev/null +++ b/arch/csky/include/asm/memory.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MEMORY_H +#define __ASM_CSKY_MEMORY_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> +#include <linux/sizes.h> + +#define FIXADDR_TOP _AC(0xffffc000, UL) +#define PKMAP_BASE _AC(0xff800000, UL) +#define VMALLOC_START _AC(0xc0008000, UL) +#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) + +#ifdef CONFIG_HAVE_TCM +#ifdef CONFIG_HAVE_DTCM +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) +#else +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) +#endif +#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) +#endif + +#endif diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index b382a14ea4ec..26fbb1d15df0 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -7,6 +7,7 @@ typedef struct { atomic64_t asid; void *vdso; + cpumask_t icache_stale_mask; } mm_context_t; #endif /* __ASM_CSKY_MMU_H */ diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index 0285b0ad18b6..abdf1f1cb6ec 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, TLBMISS_HANDLER_SETUP_PGD(next->pgd); write_mmu_entryhi(next->context.asid.counter); + + flush_icache_deferred(next); } #endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h new file mode 100644 index 000000000000..ebc765b1f78b --- /dev/null +++ b/arch/csky/include/asm/pci.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_PCI_H +#define __ASM_CSKY_PCI_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +/* C-SKY shim does not initialize PCI bus */ +#define pcibios_assign_all_busses() 1 + +extern int isa_dma_bridge_buggy; + +#ifdef CONFIG_PCI +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + /* no legacy IRQ on csky */ + return -ENODEV; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + /* always show the domain in /proc */ + return 1; +} +#endif /* CONFIG_PCI */ + +#endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 4b2a41e15f2e..9b7764cb7645 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -5,6 +5,7 @@ #define __ASM_CSKY_PGTABLE_H #include <asm/fixmap.h> +#include <asm/memory.h> #include <asm/addrspace.h> #include <abi/pgtable-bits.h> #include <asm-generic/pgtable-nopmd.h> @@ -16,11 +17,6 @@ #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0UL -#define PKMAP_BASE (0xff800000) - -#define VMALLOC_START (0xc0008000) -#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) - /* * C-SKY is two-level paging structure: */ diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h new file mode 100644 index 000000000000..d7cd4e51edd9 --- /dev/null +++ b/arch/csky/include/asm/stackprotector.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* __ASM_SH_STACKPROTECTOR_H */ diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h new file mode 100644 index 000000000000..2b135cefb73f --- /dev/null +++ b/arch/csky/include/asm/tcm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TCM_H +#define __ASM_CSKY_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include <linux/compiler.h> + +/* Tag variables with this */ +#define __tcmdata __section(.tcm.data) +/* Tag constants with this */ +#define __tcmconst __section(.tcm.rodata) +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __section(.tcm.text) noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(.tcm.text) + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index 211c983c7282..ba4018929733 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,7 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #include <asm-generic/unistd.h> diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S index 5b84f11485ae..3821ef9b7567 100644 --- a/arch/csky/kernel/atomic.S +++ b/arch/csky/kernel/atomic.S @@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg) mfcr a3, epc addi a3, TRAP0_SIZE - subi sp, 8 + subi sp, 16 stw a3, (sp, 0) mfcr a3, epsr stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) psrset ee #ifdef CONFIG_CPU_HAS_LDSTEX @@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg) mtcr a3, epc ldw a3, (sp, 4) mtcr a3, epsr - addi sp, 8 + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 KSPTOUSP rte END(csky_cmpxchg) diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index f320d9248a22..f7b231ca269a 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -16,6 +16,12 @@ struct cpuinfo_csky cpu_data[NR_CPUS]; +#ifdef CONFIG_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sw->r15; } -int copy_thread(unsigned long clone_flags, +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, - struct task_struct *p) + struct task_struct *p, + unsigned long tls) { struct switch_stack *childstack; struct pt_regs *childregs = task_pt_regs(p); @@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags, childregs->usp = usp; if (clone_flags & CLONE_SETTLS) task_thread_info(p)->tp_value = childregs->tls - = childregs->regs[0]; + = tls; childregs->a0 = 0; childstack->r15 = (unsigned long) ret_from_fork; diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index 52eaf31ba27f..3821e55742f4 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -47,9 +47,6 @@ static void __init csky_memblock_init(void) signed long size; memblock_reserve(__pa(_stext), _end - _stext); -#ifdef CONFIG_BLK_DEV_INITRD - memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); -#endif early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); @@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p) sparse_init(); + fixaddr_init(); + #ifdef CONFIG_HIGHMEM kmap_init(); #endif diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index b753d382e4ce..0bb0954d5570 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -120,7 +120,7 @@ void __init setup_smp_ipi(void) int rc; if (ipi_irq == 0) - panic("%s IRQ mapping failed\n", __func__); + return; rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", &ipi_dummy_dev); diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c index b5fc9447d93f..52379d866fe4 100644 --- a/arch/csky/kernel/time.c +++ b/arch/csky/kernel/time.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. -#include <linux/clk-provider.h> #include <linux/clocksource.h> +#include <linux/of_clk.h> void __init time_init(void) { diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 2ff37beaf2bf..f05b413df328 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -2,6 +2,7 @@ #include <asm/vmlinux.lds.h> #include <asm/page.h> +#include <asm/memory.h> OUTPUT_ARCH(csky) ENTRY(_start) @@ -53,6 +54,54 @@ SECTIONS RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; +#ifdef CONFIG_HAVE_TCM + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + } + + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) + { + . = ALIGN(4); + __stcm_text_data = .; + *(.tcm.text) + *(.tcm.rodata) +#ifndef CONFIG_HAVE_DTCM + *(.tcm.data) +#endif + . = ALIGN(4); + __etcm_text_data = .; + } + + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); + +#ifdef CONFIG_HAVE_DTCM + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE + + .dtcm_start : { + __dtcm_start = .; + } + + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) + { + . = ALIGN(4); + __stcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __etcm_data = .; + } + + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); + + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { +#else + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { +#endif + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + EXCEPTION_TABLE(L1_CACHE_BYTES) BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) VBR_BASE diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile index c94ef6481098..6e7696e55f71 100644 --- a/arch/csky/mm/Makefile +++ b/arch/csky/mm/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) obj-y += cachev2.o +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) else obj-y += cachev1.o +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) endif obj-y += dma-mapping.o @@ -14,3 +16,4 @@ obj-y += syscache.o obj-y += tlb.o obj-y += asid.o obj-y += context.o +obj-$(CONFIG_HAVE_TCM) += tcm.o diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c index 494ec912abff..5a5a9804a0e3 100644 --- a/arch/csky/mm/cachev1.c +++ b/arch/csky/mm/cachev1.c @@ -94,6 +94,11 @@ void icache_inv_all(void) cache_op_all(INS_CACHE|CACHE_INV, 0); } +void local_icache_inv_all(void *priv) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + void dcache_wb_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c index b61be6518e21..bc419f8039d3 100644 --- a/arch/csky/mm/cachev2.c +++ b/arch/csky/mm/cachev2.c @@ -3,15 +3,25 @@ #include <linux/spinlock.h> #include <linux/smp.h> +#include <linux/mm.h> #include <asm/cache.h> #include <asm/barrier.h> -inline void dcache_wb_line(unsigned long start) +#define INS_CACHE (1 << 0) +#define CACHE_INV (1 << 4) + +void local_icache_inv_all(void *priv) { - asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + mtcr("cr17", INS_CACHE|CACHE_INV); sync_is(); } +void icache_inv_all(void) +{ + on_each_cpu(local_icache_inv_all, NULL, 1); +} + +#ifdef CONFIG_CPU_HAS_ICACHE_INS void icache_inv_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); @@ -20,43 +30,32 @@ void icache_inv_range(unsigned long start, unsigned long end) asm volatile("icache.iva %0\n"::"r"(i):"memory"); sync_is(); } - -void icache_inv_all(void) +#else +void icache_inv_range(unsigned long start, unsigned long end) { - asm volatile("icache.ialls\n":::"memory"); - sync_is(); + icache_inv_all(); } +#endif -void dcache_wb_range(unsigned long start, unsigned long end) +inline void dcache_wb_line(unsigned long start) { - unsigned long i = start & ~(L1_CACHE_BYTES - 1); - - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); sync_is(); } -void dcache_inv_range(unsigned long start, unsigned long end) +void dcache_wb_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); sync_is(); } void cache_wbinv_range(unsigned long start, unsigned long end) { - unsigned long i = start & ~(L1_CACHE_BYTES - 1); - - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); - sync_is(); - - i = start & ~(L1_CACHE_BYTES - 1); - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("icache.iva %0\n"::"r"(i):"memory"); - sync_is(); + dcache_wb_range(start, end); + icache_inv_range(start, end); } EXPORT_SYMBOL(cache_wbinv_range); diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c index 3317b774f6dc..813129145f3d 100644 --- a/arch/csky/mm/highmem.c +++ b/arch/csky/mm/highmem.c @@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr) return pte_page(*pte); } -static void __init fixrange_init(unsigned long start, unsigned long end, - pgd_t *pgd_base) +static void __init kmap_pages_init(void) { -#ifdef CONFIG_HIGHMEM - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i, j, k; unsigned long vaddr; - - vaddr = start; - i = __pgd_offset(vaddr); - j = __pud_offset(vaddr); - k = __pmd_offset(vaddr); - pgd = pgd_base + i; - - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { - pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { - pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { - if (pmd_none(*pmd)) { - pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=%lx\n", - __func__, PAGE_SIZE, - PAGE_SIZE); - - set_pmd(pmd, __pmd(__pa(pte))); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - } - vaddr += PMD_SIZE; - } - k = 0; - } - j = 0; - } -#endif -} - -void __init fixaddr_kmap_pages_init(void) -{ - unsigned long vaddr; - pgd_t *pgd_base; -#ifdef CONFIG_HIGHMEM pgd_t *pgd; pmd_t *pmd; pud_t *pud; pte_t *pte; -#endif - pgd_base = swapper_pg_dir; - - /* - * Fixed mappings: - */ - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); - -#ifdef CONFIG_HIGHMEM - /* - * Permanent kmaps: - */ + vaddr = PKMAP_BASE; - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = (pud_t *)pgd; pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; -#endif } void __init kmap_init(void) { unsigned long vaddr; - fixaddr_kmap_pages_init(); + kmap_pages_init(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index d4c2292ea46b..cb64d8647a78 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -19,6 +19,7 @@ #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> +#include <linux/initrd.h> #include <asm/setup.h> #include <asm/cachectl.h> @@ -31,10 +32,50 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + void __init mem_init(void) { #ifdef CONFIG_HIGHMEM @@ -46,6 +87,10 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + memblock_free_all(); #ifdef CONFIG_HIGHMEM @@ -101,3 +146,50 @@ void __init pre_mmu_init(void) /* Setup page mask to 4k */ write_mmu_pagemask(0); } + +void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pud_offset(vaddr); + k = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +} + +void __init fixaddr_init(void) +{ + unsigned long vaddr; + + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); +} diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c index c4645e4e97f4..ffade2f9a4c8 100644 --- a/arch/csky/mm/syscache.c +++ b/arch/csky/mm/syscache.c @@ -3,7 +3,7 @@ #include <linux/syscalls.h> #include <asm/page.h> -#include <asm/cache.h> +#include <asm/cacheflush.h> #include <asm/cachectl.h> SYSCALL_DEFINE3(cacheflush, @@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush, { switch (cache) { case ICACHE: - icache_inv_range((unsigned long)addr, - (unsigned long)addr + bytes); - break; + case BCACHE: + flush_icache_mm_range(current->mm, + (unsigned long)addr, + (unsigned long)addr + bytes); case DCACHE: dcache_wb_range((unsigned long)addr, (unsigned long)addr + bytes); break; - case BCACHE: - cache_wbinv_range((unsigned long)addr, - (unsigned long)addr + bytes); - break; default: return -EINVAL; } diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c new file mode 100644 index 000000000000..ddeb36328819 --- /dev/null +++ b/arch/csky/mm/tcm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/highmem.h> +#include <linux/genalloc.h> +#include <asm/tlbflush.h> +#include <asm/fixmap.h> + +#if (CONFIG_ITCM_RAM_BASE == 0xffffffff) +#error "You should define ITCM_RAM_BASE" +#endif + +#ifdef CONFIG_HAVE_DTCM +#if (CONFIG_DTCM_RAM_BASE == 0xffffffff) +#error "You should define DTCM_RAM_BASE" +#endif + +#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) +#error "You should define correct DTCM_RAM_BASE" +#endif +#endif + +extern char __tcm_start, __tcm_end, __dtcm_start; + +static struct gen_pool *tcm_pool; + +static void __init tcm_mapping_init(void) +{ + pte_t *tcm_pte; + unsigned long vaddr, paddr; + int i; + + paddr = CONFIG_ITCM_RAM_BASE; + + if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) + goto panic; + +#ifndef CONFIG_HAVE_DTCM + for (i = 0; i < TCM_NR_PAGES; i++) { +#else + for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { +#endif + vaddr = __fix_to_virt(FIX_TCM - i); + + tcm_pte = + pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } + +#ifdef CONFIG_HAVE_DTCM + if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) + goto panic; + + paddr = CONFIG_DTCM_RAM_BASE; + + for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { + vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); + + tcm_pte = + pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } +#endif + +#ifndef CONFIG_HAVE_DTCM + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__tcm_end - &__tcm_start); + + pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __tcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); +#else + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__dtcm_start - &__tcm_start); + + pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __itcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); + + memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + &__dtcm_start, &__tcm_end - &__dtcm_start); + + pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + CONFIG_DTCM_RAM_BASE); + + pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); + +#endif + return; +panic: + panic("TCM init error"); +} + +void *tcm_alloc(size_t len) +{ + unsigned long vaddr; + + if (!tcm_pool) + return NULL; + + vaddr = gen_pool_alloc(tcm_pool, len); + if (!vaddr) + return NULL; + + return (void *) vaddr; +} +EXPORT_SYMBOL(tcm_alloc); + +void tcm_free(void *addr, size_t len) +{ + gen_pool_free(tcm_pool, (unsigned long) addr, len); +} +EXPORT_SYMBOL(tcm_free); + +static int __init tcm_setup_pool(void) +{ +#ifndef CONFIG_HAVE_DTCM + u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__tcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM) + + (u32) (&__tcm_end - &__tcm_start); +#else + u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__dtcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) + + (u32) (&__tcm_end - &__dtcm_start); +#endif + int ret; + + tcm_pool = gen_pool_create(2, -1); + + ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); + if (ret) { + pr_err("%s: gen_pool add failed!\n", __func__); + return ret; + } + + pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", + __func__, pool_size, tcm_pool_start); + + return 0; +} + +static int __init tcm_init(void) +{ + tcm_mapping_init(); + + tcm_setup_pool(); + + return 0; +} +arch_initcall(tcm_init); diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 5accda2767be..a3301bab9231 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/clock/jz4740-cgu.h> +#include <dt-bindings/clock/ingenic,tcu.h> / { #address-cells = <1>; @@ -45,14 +46,6 @@ #clock-cells = <1>; }; - watchdog: watchdog@10002000 { - compatible = "ingenic,jz4740-watchdog"; - reg = <0x10002000 0x10>; - - clocks = <&cgu JZ4740_CLK_RTC>; - clock-names = "rtc"; - }; - tcu: timer@10002000 { compatible = "ingenic,jz4740-tcu", "simple-mfd"; reg = <0x10002000 0x1000>; @@ -73,6 +66,14 @@ interrupt-parent = <&intc>; interrupts = <23 22 21>; + + watchdog: watchdog@0 { + compatible = "ingenic,jz4740-watchdog"; + reg = <0x0 0xc>; + + clocks = <&tcu TCU_CLK_WDT>; + clock-names = "wdt"; + }; }; rtc_dev: rtc@10003000 { diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index f928329b034b..bb89653d16a3 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/clock/jz4780-cgu.h> +#include <dt-bindings/clock/ingenic,tcu.h> #include <dt-bindings/dma/jz4780-dma.h> / { @@ -67,6 +68,14 @@ interrupt-parent = <&intc>; interrupts = <27 26 25>; + + watchdog: watchdog@0 { + compatible = "ingenic,jz4780-watchdog"; + reg = <0x0 0xc>; + + clocks = <&tcu TCU_CLK_WDT>; + clock-names = "wdt"; + }; }; rtc_dev: rtc@10003000 { @@ -348,14 +357,6 @@ status = "disabled"; }; - watchdog: watchdog@10002000 { - compatible = "ingenic,jz4780-watchdog"; - reg = <0x10002000 0x10>; - - clocks = <&cgu JZ4780_CLK_RTCLK>; - clock-names = "rtc"; - }; - nemc: nemc@13410000 { compatible = "ingenic,jz4780-nemc"; reg = <0x13410000 0x10000>; diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi index 4994c695a1a7..147f7d5c243a 100644 --- a/arch/mips/boot/dts/ingenic/x1000.dtsi +++ b/arch/mips/boot/dts/ingenic/x1000.dtsi @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <dt-bindings/clock/ingenic,tcu.h> #include <dt-bindings/clock/x1000-cgu.h> #include <dt-bindings/dma/x1000-dma.h> @@ -72,7 +73,7 @@ compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog"; reg = <0x0 0x10>; - clocks = <&cgu X1000_CLK_RTCLK>; + clocks = <&tcu TCU_CLK_WDT>; clock-names = "wdt"; }; }; @@ -158,7 +159,6 @@ i2c0: i2c-controller@10050000 { compatible = "ingenic,x1000-i2c"; reg = <0x10050000 0x1000>; - #address-cells = <1>; #size-cells = <0>; @@ -173,7 +173,6 @@ i2c1: i2c-controller@10051000 { compatible = "ingenic,x1000-i2c"; reg = <0x10051000 0x1000>; - #address-cells = <1>; #size-cells = <0>; @@ -188,7 +187,6 @@ i2c2: i2c-controller@10052000 { compatible = "ingenic,x1000-i2c"; reg = <0x10052000 0x1000>; - #address-cells = <1>; #size-cells = <0>; diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h index 7c6a1095f556..aabd097933fe 100644 --- a/arch/mips/include/asm/sync.h +++ b/arch/mips/include/asm/sync.h @@ -155,9 +155,11 @@ * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use * optimized memory barrier primitives."). Here we specify that the affected * sync instructions should be emitted twice. + * Note that this expression is evaluated by the assembler (not the compiler), + * and that the assembler evaluates '==' as 0 or -1, not 0 or 1. */ #ifdef CONFIG_CPU_CAVIUM_OCTEON -# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb)) +# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb)) #else # define __SYNC_rpt(type) 1 #endif diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6176b9acba95..d0d832ab3d3b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -134,7 +134,7 @@ void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) - release_progmem(v); + release_progmem(v->load_addr); kfree(v); } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index aa89a41dc5dd..d7fe8408603e 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -33,6 +33,7 @@ endif cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ + -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) @@ -51,6 +52,8 @@ endif CFLAGS_REMOVE_vgettimeofday.o = -pg +DISABLE_VDSO := n + # # For the pre-R6 code in arch/mips/vdso/vdso.h for locating # the base address of VDSO, the linker will emit a R_MIPS_PC32 @@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg ifndef CONFIG_CPU_MIPSR6 ifeq ($(call ld-ifversion, -lt, 225000000, y),y) $(warning MIPS VDSO requires binutils >= 2.25) - obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) - ccflags-vdso += -DDISABLE_MIPS_VDSO + DISABLE_VDSO := y endif endif +# +# GCC (at least up to version 9.2) appears to emit function calls that make use +# of the GOT when targeting microMIPS, which we can't use in the VDSO due to +# the lack of relocations. As such, we disable the VDSO for microMIPS builds. +# +ifdef CONFIG_CPU_MICROMIPS + DISABLE_VDSO := y +endif + +ifeq ($(DISABLE_VDSO),y) + obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) + ccflags-vdso += -DDISABLE_MIPS_VDSO +endif + # VDSO linker flags. VDSO_LDFLAGS := \ -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ @@ -81,12 +97,18 @@ GCOV_PROFILE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n +# Check that we don't have PIC 'jalr t9' calls left +quiet_cmd_vdso_mips_check = VDSOCHK $@ + cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \ + then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \ + rm -f $@; /bin/false); fi + # # Shared build commands. # quiet_cmd_vdsold_and_vdso_check = LD $@ - cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check) quiet_cmd_vdsold = VDSO $@ cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 86332080399a..080a0bf8e54b 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn) /* * Some number of bits at the level of the page table that points to * a hugepte are used to encode the size. This masks those bits. + * On 8xx, HW assistance requires 4k alignment for the hugepte. */ +#ifdef CONFIG_PPC_8xx +#define HUGEPD_SHIFT_MASK 0xfff +#else #define HUGEPD_SHIFT_MASK 0x3f +#endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8387698bd5b6..eedcbfb9a6ff 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -168,6 +168,10 @@ struct thread_struct { unsigned long srr1; unsigned long dar; unsigned long dsisr; +#ifdef CONFIG_PPC_BOOK3S_32 + unsigned long r0, r3, r4, r5, r6, r8, r9, r11; + unsigned long lr, ctr; +#endif #endif /* Debug Registers */ struct debug_reg debug; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c25e562f1cd9..fcf24a365fc0 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -132,6 +132,18 @@ int main(void) OFFSET(SRR1, thread_struct, srr1); OFFSET(DAR, thread_struct, dar); OFFSET(DSISR, thread_struct, dsisr); +#ifdef CONFIG_PPC_BOOK3S_32 + OFFSET(THR0, thread_struct, r0); + OFFSET(THR3, thread_struct, r3); + OFFSET(THR4, thread_struct, r4); + OFFSET(THR5, thread_struct, r5); + OFFSET(THR6, thread_struct, r6); + OFFSET(THR8, thread_struct, r8); + OFFSET(THR9, thread_struct, r9); + OFFSET(THR11, thread_struct, r11); + OFFSET(THLR, thread_struct, lr); + OFFSET(THCTR, thread_struct, ctr); +#endif #endif #ifdef CONFIG_SPE OFFSET(THREAD_EVR0, thread_struct, evr[0]); diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index a1eaffe868de..7b048cee767c 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void) eeh_pe_state_mark(pe, EEH_PE_RECOVERING); eeh_handle_normal_event(pe); } else { + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + + /* Notify all devices to be down */ + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_pe_report( + "error_detected(permanent failure)", pe, + eeh_report_failure, NULL); + pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { phb_pe = eeh_phb_pe_get(hose); @@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue; - eeh_for_each_pe(pe, tmp_pe) - eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) - edev->mode &= ~EEH_DEV_NO_HANDLER; - - /* Notify all devices to be down */ - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_pe_report( - "error_detected(permanent failure)", pe, - eeh_report_failure, NULL); bus = eeh_pe_bus_get(phb_pe); if (!bus) { pr_err("%s: Cannot find PCI bus for " diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 0713daa651d9..16af0d8d90a8 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -783,7 +783,7 @@ fast_exception_return: 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 bge 2b #else bge 3f @@ -791,7 +791,7 @@ fast_exception_return: lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 blt 2b #else blt 3f @@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI -1: tophys(r9,r1) +1: tophys_novmstack r9, r1 +#ifdef CONFIG_VMAP_STACK + li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */ + mtmsr r0 + isync +#endif lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ addi r1,r1,INT_FRAME_SIZE li r0,0 - tophys(r7, r2) + tophys_novmstack r7, r2 stw r0, THREAD + RTAS_SP(r7) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0493fcac6409..97c887950c3c 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -290,17 +290,55 @@ MachineCheck: 7: EXCEPTION_PROLOG_2 addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_CHRP - bne cr1,1f +#ifdef CONFIG_VMAP_STACK + mfspr r4, SPRN_SPRG_THREAD + tovirt(r4, r4) + lwz r4, RTAS_SP(r4) + cmpwi cr1, r4, 0 #endif - EXC_XFER_STD(0x200, machine_check_exception) -#ifdef CONFIG_PPC_CHRP -1: b machine_check_in_rtas + beq cr1, machine_check_tramp + b machine_check_in_rtas +#else + b machine_check_tramp #endif /* Data access exception. */ . = 0x300 DO_KVM 0x300 DataAccess: +#ifdef CONFIG_VMAP_STACK + mtspr SPRN_SPRG_SCRATCH0,r10 + mfspr r10, SPRN_SPRG_THREAD +BEGIN_MMU_FTR_SECTION + stw r11, THR11(r10) + mfspr r10, SPRN_DSISR + mfcr r11 +#ifdef CONFIG_PPC_KUAP + andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h +#else + andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h +#endif + mfspr r10, SPRN_SPRG_THREAD + beq hash_page_dsi +.Lhash_page_dsi_cont: + mtcr r11 + lwz r11, THR11(r10) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) + mtspr SPRN_SPRG_SCRATCH1,r11 + mfspr r11, SPRN_DAR + stw r11, DAR(r10) + mfspr r11, SPRN_DSISR + stw r11, DSISR(r10) + mfspr r11, SPRN_SRR0 + stw r11, SRR0(r10) + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ + stw r11, SRR1(r10) + mfcr r10 + andi. r11, r11, MSR_PR + + EXCEPTION_PROLOG_1 + b handle_page_fault_tramp_1 +#else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG handle_dar_dsisr=1 get_and_save_dar_dsisr_on_stack r4, r5, r11 BEGIN_MMU_FTR_SECTION @@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION FTR_SECTION_ELSE b handle_page_fault_tramp_2 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif /* CONFIG_VMAP_STACK */ /* Instruction access exception. */ . = 0x400 DO_KVM 0x400 InstructionAccess: +#ifdef CONFIG_VMAP_STACK + mtspr SPRN_SPRG_SCRATCH0,r10 + mtspr SPRN_SPRG_SCRATCH1,r11 + mfspr r10, SPRN_SPRG_THREAD + mfspr r11, SPRN_SRR0 + stw r11, SRR0(r10) + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ + stw r11, SRR1(r10) + mfcr r10 +BEGIN_MMU_FTR_SECTION + andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ + bne hash_page_isi +.Lhash_page_isi_cont: + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) + andi. r11, r11, MSR_PR + + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 +#else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */ beq 1f /* if so, try to put a PTE */ @@ -329,6 +388,7 @@ InstructionAccess: BEGIN_MMU_FTR_SECTION bl hash_page END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif /* CONFIG_VMAP_STACK */ 1: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ stw r4, _DAR(r11) @@ -344,7 +404,7 @@ Alignment: EXCEPTION_PROLOG handle_dar_dsisr=1 save_dar_dsisr_on_stack r4, r5, r11 addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x600, alignment_exception) + b alignment_exception_tramp /* Program check exception */ EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) @@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) . = 0x3000 +machine_check_tramp: + EXC_XFER_STD(0x200, machine_check_exception) + +alignment_exception_tramp: + EXC_XFER_STD(0x600, alignment_exception) + handle_page_fault_tramp_1: +#ifdef CONFIG_VMAP_STACK + EXCEPTION_PROLOG_2 handle_dar_dsisr=1 +#endif lwz r4, _DAR(r11) lwz r5, _DSISR(r11) /* fall through */ handle_page_fault_tramp_2: EXC_XFER_LITE(0x300, handle_page_fault) +#ifdef CONFIG_VMAP_STACK +.macro save_regs_thread thread + stw r0, THR0(\thread) + stw r3, THR3(\thread) + stw r4, THR4(\thread) + stw r5, THR5(\thread) + stw r6, THR6(\thread) + stw r8, THR8(\thread) + stw r9, THR9(\thread) + mflr r0 + stw r0, THLR(\thread) + mfctr r0 + stw r0, THCTR(\thread) +.endm + +.macro restore_regs_thread thread + lwz r0, THLR(\thread) + mtlr r0 + lwz r0, THCTR(\thread) + mtctr r0 + lwz r0, THR0(\thread) + lwz r3, THR3(\thread) + lwz r4, THR4(\thread) + lwz r5, THR5(\thread) + lwz r6, THR6(\thread) + lwz r8, THR8(\thread) + lwz r9, THR9(\thread) +.endm + +hash_page_dsi: + save_regs_thread r10 + mfdsisr r3 + mfdar r4 + mfsrr0 r5 + mfsrr1 r9 + rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */ + bl hash_page + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + b .Lhash_page_dsi_cont + +hash_page_isi: + mr r11, r10 + mfspr r10, SPRN_SPRG_THREAD + save_regs_thread r10 + li r3, 0 + lwz r4, SRR0(r10) + lwz r9, SRR1(r10) + bl hash_page + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + mr r10, r11 + b .Lhash_page_isi_cont + + .globl fast_hash_page_return +fast_hash_page_return: + andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */ + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + bne 1f + + /* DSI */ + mtcr r11 + lwz r11, THR11(r10) + mfspr r10, SPRN_SPRG_SCRATCH0 + SYNC + RFI + +1: /* ISI */ + mtcr r11 + mfspr r11, SPRN_SPRG_SCRATCH1 + mfspr r10, SPRN_SPRG_SCRATCH0 + SYNC + RFI + stack_overflow: vmap_stack_overflow_exception +#endif AltiVecUnavailable: EXCEPTION_PROLOG diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index a6a5fbbf8504..9db162f79fe6 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -64,11 +64,25 @@ .endm .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0 +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + mtcr r10 +FTR_SECTION_ELSE + stw r10, _CCR(r11) +ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#else stw r10,_CCR(r11) /* save registers */ +#endif + mfspr r10, SPRN_SPRG_SCRATCH0 stw r12,GPR12(r11) stw r9,GPR9(r11) - mfspr r10,SPRN_SPRG_SCRATCH0 stw r10,GPR10(r11) +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + mfcr r10 + stw r10, _CCR(r11) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif mfspr r12,SPRN_SPRG_SCRATCH1 stw r12,GPR11(r11) mflr r10 @@ -83,6 +97,11 @@ stw r10, _DSISR(r11) .endif lwz r9, SRR1(r12) +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + andi. r10, r9, MSR_PR +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif lwz r12, SRR0(r12) #else mfspr r12,SPRN_SRR0 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9922306ae512..073a651787df 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -256,7 +256,7 @@ InstructionTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ - rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */ + rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 0ffdd18b9f26..433d97bea1f3 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -166,7 +166,11 @@ BEGIN_FTR_SECTION mfspr r9,SPRN_HID0 andis. r9,r9,HID0_NAP@h beq 1f +#ifdef CONFIG_VMAP_STACK + addis r9, r11, nap_save_msscr0@ha +#else addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha +#endif lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 sync @@ -174,7 +178,11 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) BEGIN_FTR_SECTION +#ifdef CONFIG_VMAP_STACK + addis r9, r11, nap_save_hid1@ha +#else addis r9,r11,(nap_save_hid1-KERNELBASE)@ha +#endif lwz r9,nap_save_hid1@l(r9) mtspr SPRN_HID1, r9 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e6c30cee6abf..d215f9554553 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk) * normal/non-checkpointed stack pointer. */ + unsigned long ret = tsk->thread.regs->gpr[1]; + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BUG_ON(tsk != current); if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { + preempt_disable(); tm_reclaim_current(TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) - return tsk->thread.ckpt_regs.gpr[1]; + ret = tsk->thread.ckpt_regs.gpr[1]; + + /* + * If we treclaim, we must clear the current thread's TM bits + * before re-enabling preemption. Otherwise we might be + * preempted and have the live MSR[TS] changed behind our back + * (tm_recheckpoint_new_task() would recheckpoint). Besides, we + * enter the signal handler in non-transactional state. + */ + tsk->thread.regs->msr &= ~MSR_TS_MASK; + preempt_enable(); } #endif - return tsk->thread.regs->gpr[1]; + return ret; } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 98600b276f76..1b090a76b444 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, */ static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, - struct mcontext __user *tm_frame, int sigret) + struct mcontext __user *tm_frame, int sigret, + unsigned long msr) { - unsigned long msr = regs->msr; - WARN_ON(tm_suspend_disabled); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - /* Save both sets of general registers */ if (save_general_regs(¤t->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) @@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_frame = &rt_sf->uc_transact.uc_mcontext; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; - if (save_tm_user_regs(regs, frame, tm_frame, sigret)) + if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr)) goto badframe; } else @@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mctx = &frame->mctx_transact; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, - sigret)) + sigret, msr)) goto badframe; } else diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 117515564ec7..84ed2e77ef9c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct task_struct *tsk, - int signr, sigset_t *set, unsigned long handler) + int signr, sigset_t *set, unsigned long handler, + unsigned long msr) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); #endif struct pt_regs *regs = tsk->thread.regs; - unsigned long msr = tsk->thread.regs->msr; long err = 0; BUG_ON(tsk != current); - BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + BUG_ON(!MSR_TM_ACTIVE(msr)); WARN_ON(tm_suspend_disabled); @@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, */ msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); @@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ @@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, - (unsigned long)ksig->ka.sa.sa_handler); + (unsigned long)ksig->ka.sa.sa_handler, + msr); } else #endif { diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index c11b0a005196..2015c4f96238 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -25,12 +25,6 @@ #include <asm/feature-fixups.h> #include <asm/code-patching-asm.h> -#ifdef CONFIG_VMAP_STACK -#define ADDR_OFFSET 0 -#else -#define ADDR_OFFSET PAGE_OFFSET -#endif - #ifdef CONFIG_SMP .section .bss .align 2 @@ -53,8 +47,8 @@ mmu_hash_lock: .text _GLOBAL(hash_page) #ifdef CONFIG_SMP - lis r8, (mmu_hash_lock - ADDR_OFFSET)@h - ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l + lis r8, (mmu_hash_lock - PAGE_OFFSET)@h + ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l lis r0,0x0fff b 10f 11: lwz r6,0(r8) @@ -72,12 +66,9 @@ _GLOBAL(hash_page) cmplw 0,r4,r0 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */ -#ifdef CONFIG_VMAP_STACK - tovirt(r5, r5) -#endif blt+ 112f /* assume user more likely */ - lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */ - addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */ + lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 112: #ifndef CONFIG_PTE_64BIT @@ -89,9 +80,6 @@ _GLOBAL(hash_page) lwzx r8,r8,r5 /* Get L1 entry */ rlwinm. r8,r8,0,0,20 /* extract pt base address */ #endif -#ifdef CONFIG_VMAP_STACK - tovirt(r8, r8) -#endif #ifdef CONFIG_SMP beq- hash_page_out /* return if no mapping */ #else @@ -143,30 +131,36 @@ retry: bne- retry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ +#ifndef CONFIG_VMAP_STACK mfctr r0 stw r0,_CTR(r11) +#endif bl create_hpte /* add the hash table entry */ #ifdef CONFIG_SMP eieio - lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif +#ifdef CONFIG_VMAP_STACK + b fast_hash_page_return +#else /* Return from the exception */ lwz r5,_CTR(r11) mtctr r5 lwz r0,GPR0(r11) lwz r8,GPR8(r11) b fast_exception_return +#endif #ifdef CONFIG_SMP hash_page_out: eieio - lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) blr #endif /* CONFIG_SMP */ @@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) patch_site 1f, patch__hash_page_A1 patch_site 2f, patch__hash_page_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */ +0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ @@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ 10f /* no PTE: go look for an empty slot */ tlbie r4 - lis r4, (htab_hash_searches - ADDR_OFFSET)@ha - lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) + lis r4, (htab_hash_searches - PAGE_OFFSET)@ha + lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) addi r6,r6,1 /* count how many searches we do */ - stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) + stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 @@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ found_empty /* update counter of times that the primary PTEG is full */ - lis r4, (primary_pteg_full - ADDR_OFFSET)@ha - lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) + lis r4, (primary_pteg_full - PAGE_OFFSET)@ha + lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) addi r6,r6,1 - stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) + stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) patch_site 0f, patch__hash_page_C /* Search the secondary PTEG for an empty slot */ @@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) * lockup here but that shouldn't happen */ -1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */ - lwz r6, (next_slot - ADDR_OFFSET)@l(r4) +1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ + lwz r6, (next_slot - PAGE_OFFSET)@l(r4) addi r6,r6,HPTE_SIZE /* search for candidate */ andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 0a1c65a2c565..f888cbb109b9 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -413,7 +413,7 @@ void __init MMU_init_hw(void) void __init MMU_init_hw_patch(void) { unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); - unsigned int hash; + unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; if (ppc_md.progress) ppc_md.progress("hash:patch", 0x345); @@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:create_hpte */ - if (IS_ENABLED(CONFIG_VMAP_STACK)) - hash = (unsigned int)Hash; - else - hash = (unsigned int)Hash - PAGE_OFFSET; - modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6); @@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:flush_hash_page */ - modify_instruction_site(&patch__flush_hash_A0, 0xffff, - ((unsigned int)Hash - PAGE_OFFSET) >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 73d4873fc7f8..33b3461d91e8 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); + new = NULL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { - cachep = PGT_CACHE(PTE_INDEX_SIZE); + cachep = NULL; num_hugepd = 1; + new = pte_alloc_one(mm); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; + new = NULL; } - if (!cachep) { + if (!cachep && !new) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + if (cachep) + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); - kmem_cache_free(cachep, new); + if (cachep) + kmem_cache_free(cachep, new); + else + pte_free(mm, new); } else { kmemleak_ignore(new); } @@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else if (IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_free_tlb(tlb, hugepte, - get_hugepd_cache_index(PTE_INDEX_SIZE)); + pgtable_free_tlb(tlb, hugepte, 0); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); @@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_cache_add(PTE_INDEX_SIZE); - else if (pdshift > shift) - pgtable_cache_add(pdshift - shift); - else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx)) + if (pdshift > shift) { + if (!IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_cache_add(pdshift - shift); + } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); + } configured = true; } diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 16dd95bd0749..db5664dde5ff 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -185,8 +185,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0}; static void __init kasan_early_hash_table(void) { - unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash : - __pa(early_hash); + unsigned int hash = __pa(early_hash); modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index e8c84d265602..0ec9640335bb 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3435,6 +3435,11 @@ getstring(char *s, int size) int c; c = skipbl(); + if (c == '\n') { + *s = 0; + return; + } + do { if( size > 1 ){ *s++ = c; diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore index 8dab0bb6ae66..8a45a37d2af4 100644 --- a/arch/riscv/boot/.gitignore +++ b/arch/riscv/boot/.gitignore @@ -1,2 +1,4 @@ Image Image.gz +loader +loader.lds diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 435b65532e29..8e18d2c64399 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -72,6 +72,16 @@ #define EXC_LOAD_PAGE_FAULT 13 #define EXC_STORE_PAGE_FAULT 15 +/* PMP configuration */ +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_A_TOR 0x08 +#define PMP_A_NA4 0x10 +#define PMP_A_NAPOT 0x18 +#define PMP_L 0x80 + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -100,6 +110,8 @@ #define CSR_MCAUSE 0x342 #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPADDR0 0x3b0 #define CSR_MHARTID 0xf14 #ifdef CONFIG_RISCV_M_MODE diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 271860fc2c3f..85f2073e7fe4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -58,6 +58,12 @@ _start_kernel: /* Reset all registers except ra, a0, a1 */ call reset_regs + /* Setup a PMP to permit access to all of memory. */ + li a0, -1 + csrw CSR_PMPADDR0, a0 + li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) + csrw CSR_PMPCFG0, a0 + /* * The hartid in a0 is expected later on, and we have no firmware * to hand it to us. diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index f4cad5163bf2..ffb3d94bf0cc 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -156,6 +156,6 @@ void __init trap_init(void) csr_write(CSR_SCRATCH, 0); /* Set the exception vector address */ csr_write(CSR_TVEC, &handle_exception); - /* Enable all interrupts */ - csr_write(CSR_IE, -1); + /* Enable interrupts */ + csr_write(CSR_IE, IE_SIE | IE_EIE); } diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index f0cc86040587..ec0ca90dd900 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, mk_pte(virt_to_page(kasan_early_shadow_page), - PAGE_KERNEL)); + PAGE_KERNEL)); for (i = 0; i < PTRS_PER_PMD; ++i) set_pmd(kasan_early_shadow_pmd + i, - pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)), - __pgprot(_PAGE_TABLE))); + pfn_pmd(PFN_DOWN + (__pa((uintptr_t) kasan_early_shadow_pte)), + __pgprot(_PAGE_TABLE))); for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; i += PGDIR_SIZE, ++pgd) set_pgd(pgd, - pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))), - __pgprot(_PAGE_TABLE))); + pfn_pgd(PFN_DOWN + (__pa(((uintptr_t) kasan_early_shadow_pmd))), + __pgprot(_PAGE_TABLE))); /* init for swapper_pg_dir */ pgd = pgd_offset_k(KASAN_SHADOW_START); @@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void) for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; i += PGDIR_SIZE, ++pgd) set_pgd(pgd, - pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))), - __pgprot(_PAGE_TABLE))); + pfn_pgd(PFN_DOWN + (__pa(((uintptr_t) kasan_early_shadow_pmd))), + __pgprot(_PAGE_TABLE))); flush_tlb_all(); } static void __init populate(void *start, void *end) { - unsigned long i; + unsigned long i, offset; unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); unsigned long n_pages = (vend - vaddr) / PAGE_SIZE; + unsigned long n_ptes = + ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE; unsigned long n_pmds = - (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 : - n_pages / PTRS_PER_PTE; + ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD; + + pte_t *pte = + memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + pmd_t *pmd = + memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); pgd_t *pgd = pgd_offset_k(vaddr); - pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE); - pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; i++) { phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); - - set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); + set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); } - for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD) - set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))), + for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE) + set_pmd(&pmd[i], + pfn_pmd(PFN_DOWN(__pa(&pte[offset])), __pgprot(_PAGE_TABLE))); - for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE) - set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))), + for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD) + set_pgd(&pgd[i], + pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), __pgprot(_PAGE_TABLE))); flush_tlb_all(); @@ -81,7 +89,8 @@ void __init kasan_init(void) unsigned long i; kasan_populate_early_shadow((void *)KASAN_SHADOW_START, - (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + (void *)kasan_mem_to_shadow((void *) + VMALLOC_END)); for_each_memblock(memory, reg) { void *start = (void *)__va(reg->base); @@ -90,14 +99,14 @@ void __init kasan_init(void) if (start >= end) break; - populate(kasan_mem_to_shadow(start), - kasan_mem_to_shadow(end)); + populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); }; for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], mk_pte(virt_to_page(kasan_early_shadow_page), - __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED))); + __pgprot(_PAGE_PRESENT | _PAGE_READ | + _PAGE_ACCESSED))); memset(kasan_early_shadow_page, 0, PAGE_SIZE); init_task.kasan_depth = 0; diff --git a/arch/s390/Makefile b/arch/s390/Makefile index e0e3a465bbfd..8dfa2cf1f05c 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -146,7 +146,7 @@ all: bzImage #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg... KBUILD_IMAGE := $(boot)/bzImage -install: vmlinux +install: $(Q)$(MAKE) $(build)=$(boot) $@ bzImage: vmlinux diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 30f1811540c5..45b33b83de08 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE $(obj)/startup.a: $(OBJECTS) FORCE $(call if_changed,ar) -install: $(CONFIGURE) $(obj)/bzImage +install: sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 5d12352545c5..5591243d673e 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit) *(unsigned long *) prng.parm_block ^= seed; for (i = 0; i < 16; i++) { cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, - (char *) entropy, (char *) entropy, + (u8 *) entropy, (u8 *) entropy, sizeof(entropy)); memcpy(prng.parm_block, entropy, sizeof(entropy)); } diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 2e60c80395ab..0c86ba19fa2b 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m CONFIG_CRASH_DUMP=y CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y +CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y CONFIG_CMM=m CONFIG_APPLDATA_BASE=y CONFIG_KVM=m @@ -474,7 +475,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -684,7 +684,6 @@ CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m @@ -748,7 +747,6 @@ CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 CONFIG_HEADERS_INSTALL=y -CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y @@ -772,9 +770,9 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_ON_OOPS=y CONFIG_DEBUG_TIMEKEEPING=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y @@ -783,9 +781,20 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_DEBUG_CREDENTIALS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=300 +CONFIG_LATENCYTOP=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_S390_PTDUMP=y CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y @@ -796,15 +805,6 @@ CONFIG_FAIL_IO_TIMEOUT=y CONFIG_FAIL_FUTEX=y CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_LATENCYTOP=y -CONFIG_IRQSOFF_TRACER=y -CONFIG_PREEMPT_TRACER=y -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_HIST_TRIGGERS=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_TEST_SORT=y @@ -814,5 +814,3 @@ CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 25f799849582..6b27d861a9a3 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m CONFIG_CRASH_DUMP=y CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y +CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y CONFIG_CMM=m CONFIG_APPLDATA_BASE=y CONFIG_KVM=m @@ -470,7 +471,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -677,7 +677,6 @@ CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m @@ -739,18 +738,18 @@ CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_PANIC_ON_OOPS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y +CONFIG_S390_PTDUMP=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 4ebcf891ff3c..62440a82731a 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end); static inline void storage_key_init_range(unsigned long start, unsigned long end) { - if (PAGE_DEFAULT_KEY) + if (PAGE_DEFAULT_KEY != 0) __storage_key_init_range(start, end); } diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 361ef5eda468..aadb3d0e2adc 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -84,7 +84,6 @@ void s390_update_cpu_mhz(void); void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; -extern int sysctl_ieee_emulation_warnings; extern void execve_tail(void); extern void __bpon(void); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 71e3f0146cda..1e3517b0518b 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -201,7 +201,7 @@ struct slib { * @scount: SBAL count * @sflags: whole SBAL flags * @length: length - * @addr: address + * @addr: absolute data address */ struct qdio_buffer_element { u8 eflags; @@ -211,7 +211,7 @@ struct qdio_buffer_element { u8 scount; u8 sflags; u32 length; - void *addr; + u64 addr; } __attribute__ ((packed, aligned(16))); /** @@ -227,7 +227,7 @@ struct qdio_buffer { * @sbal: absolute SBAL address */ struct sl_element { - unsigned long sbal; + u64 sbal; } __attribute__ ((packed)); /** diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c index 748456c365f4..9557c5a15b91 100644 --- a/arch/x86/boot/compressed/kaslr_64.c +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -29,9 +29,6 @@ #define __PAGE_OFFSET __PAGE_OFFSET_BASE #include "../../mm/ident_map.c" -/* Used by pgtable.h asm code to force instruction serialization. */ -unsigned long __force_order; - /* Used to track our page table allocation area. */ struct alloc_pgt_data { unsigned char *pgt_buf; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ebe1685e92dd..d5e517d1c3dd 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -512,6 +512,8 @@ #define MSR_K7_HWCR 0xc0010015 #define MSR_K7_HWCR_SMMLOCK_BIT 0 #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT) +#define MSR_K7_HWCR_IRPERF_EN_BIT 30 +#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT) #define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_STATUS 0xc0010042 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ac83a0fef628..1f875fbe1384 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -28,6 +28,7 @@ static const int amd_erratum_383[]; static const int amd_erratum_400[]; +static const int amd_erratum_1054[]; static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); /* @@ -972,6 +973,15 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_has(c, X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + + /* + * Turn on the Instructions Retired free counter on machines not + * susceptible to erratum #1054 "Instructions Retired Performance + * Counter May Be Inaccurate". + */ + if (cpu_has(c, X86_FEATURE_IRPERF) && + !cpu_has_amd_erratum(c, amd_erratum_1054)) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); } #ifdef CONFIG_X86_32 @@ -1099,6 +1109,10 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +static const int amd_erratum_1054[] = + AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index b3a50d962851..52de616a8065 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1163,9 +1163,12 @@ static const struct sysfs_ops threshold_ops = { .store = store, }; +static void threshold_block_release(struct kobject *kobj); + static struct kobj_type threshold_ktype = { .sysfs_ops = &threshold_ops, .default_attrs = default_attrs, + .release = threshold_block_release, }; static const char *get_name(unsigned int bank, struct threshold_block *b) @@ -1198,8 +1201,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return buf_mcatype; } -static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, - unsigned int block, u32 address) +static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, + unsigned int bank, unsigned int block, + u32 address) { struct threshold_block *b = NULL; u32 low, high; @@ -1243,16 +1247,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, INIT_LIST_HEAD(&b->miscj); - if (per_cpu(threshold_banks, cpu)[bank]->blocks) { - list_add(&b->miscj, - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); - } else { - per_cpu(threshold_banks, cpu)[bank]->blocks = b; - } + if (tb->blocks) + list_add(&b->miscj, &tb->blocks->miscj); + else + tb->blocks = b; - err = kobject_init_and_add(&b->kobj, &threshold_ktype, - per_cpu(threshold_banks, cpu)[bank]->kobj, - get_name(bank, b)); + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); if (err) goto out_free; recurse: @@ -1260,7 +1260,7 @@ recurse: if (!address) return 0; - err = allocate_threshold_blocks(cpu, bank, block, address); + err = allocate_threshold_blocks(cpu, tb, bank, block, address); if (err) goto out_free; @@ -1345,8 +1345,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out_free; } - per_cpu(threshold_banks, cpu)[bank] = b; - if (is_shared_bank(bank)) { refcount_set(&b->cpus, 1); @@ -1357,9 +1355,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) } } - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); - if (!err) - goto out; + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + if (err) + goto out_free; + + per_cpu(threshold_banks, cpu)[bank] = b; + + return 0; out_free: kfree(b); @@ -1368,8 +1370,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) return err; } -static void deallocate_threshold_block(unsigned int cpu, - unsigned int bank) +static void threshold_block_release(struct kobject *kobj) +{ + kfree(to_block(kobj)); +} + +static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) { struct threshold_block *pos = NULL; struct threshold_block *tmp = NULL; @@ -1379,13 +1385,11 @@ static void deallocate_threshold_block(unsigned int cpu, return; list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { - kobject_put(&pos->kobj); list_del(&pos->miscj); - kfree(pos); + kobject_put(&pos->kobj); } - kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); - per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; + kobject_put(&head->blocks->kobj); } static void __threshold_remove_blocks(struct threshold_bank *b) diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c index 4d4f5d9faac3..23054909c8dd 100644 --- a/arch/x86/kernel/ima_arch.c +++ b/arch/x86/kernel/ima_arch.c @@ -10,8 +10,6 @@ extern struct boot_params boot_params; static enum efi_secureboot_mode get_sb_mode(void) { - efi_char16_t efi_SecureBoot_name[] = L"SecureBoot"; - efi_char16_t efi_SetupMode_name[] = L"SecureBoot"; efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; efi_status_t status; unsigned long size; @@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } /* Get variable contents into buffer */ - status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid, + status = efi.get_variable(L"SecureBoot", &efi_variable_guid, NULL, &size, &secboot); if (status == EFI_NOT_FOUND) { pr_info("ima: secureboot mode disabled\n"); @@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } size = sizeof(setupmode); - status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid, + status = efi.get_variable(L"SetupMode", &efi_variable_guid, NULL, &size, &setupmode); if (status != EFI_SUCCESS) /* ignore unknown SetupMode */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d817f255aed8..6efe0410fb72 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void) } } +static bool pv_tlb_flush_supported(void) +{ + return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && + !kvm_para_has_hint(KVM_HINTS_REALTIME) && + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + +static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); + #ifdef CONFIG_SMP + +static bool pv_ipi_supported(void) +{ + return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); +} + +static bool pv_sched_yield_supported(void) +{ + return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && + !kvm_para_has_hint(KVM_HINTS_REALTIME) && + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) static void __send_ipi_mask(const struct cpumask *mask, int vector) @@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); - struct cpumask new_mask; + struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); const struct cpumask *local_mask; - cpumask_copy(&new_mask, mask); - cpumask_clear_cpu(this_cpu, &new_mask); - local_mask = &new_mask; + cpumask_copy(new_mask, mask); + cpumask_clear_cpu(this_cpu, new_mask); + local_mask = new_mask; __send_ipi_mask(local_mask, vector); } @@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void) update_intr_gate(X86_TRAP_PF, async_page_fault); } -static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); static void kvm_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info) @@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask, u8 state; int cpu; struct kvm_steal_time *src; - struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); + struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); cpumask_copy(flushmask, cpumask); /* @@ -619,11 +640,10 @@ static void __init kvm_guest_init(void) pv_ops.time.steal_clock = kvm_steal_clock; } - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_tlb_flush_supported()) { pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; + pr_info("KVM setup pv remote TLB flush\n"); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -632,9 +652,7 @@ static void __init kvm_guest_init(void) #ifdef CONFIG_SMP smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus; smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; - if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_sched_yield_supported()) { smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; pr_info("KVM setup pv sched yield\n"); } @@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void) static void __init kvm_apic_init(void) { #if defined(CONFIG_SMP) - if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) + if (pv_ipi_supported()) kvm_setup_pv_ipi(); #endif } @@ -732,26 +750,31 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); -static __init int kvm_setup_pv_tlb_flush(void) +static __init int kvm_alloc_cpumask(void) { int cpu; + bool alloc = false; if (!kvm_para_available() || nopv) return 0; - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_tlb_flush_supported()) + alloc = true; + +#if defined(CONFIG_SMP) + if (pv_ipi_supported()) + alloc = true; +#endif + + if (alloc) for_each_possible_cpu(cpu) { - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); } - pr_info("KVM setup pv remote TLB flush\n"); - } return 0; } -arch_initcall(kvm_setup_pv_tlb_flush); +arch_initcall(kvm_alloc_cpumask); #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 991019d5eee1..1bb4927030af 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -59,6 +59,19 @@ config KVM If unsure, say N. +config KVM_WERROR + bool "Compile KVM with -Werror" + # KASAN may cause the build to fail due to larger frames + default y if X86_64 && !KASAN + # We use the dependency on !COMPILE_TEST to not be enabled + # blindly in allmodconfig or allyesconfig configurations + depends on (X86_64 && !KASAN) || !COMPILE_TEST + depends on EXPERT + help + Add -Werror to the build flags for (and only for) i915.ko. + + If in doubt, say "N". + config KVM_INTEL tristate "KVM for Intel (and compatible) processors support" depends on KVM && IA32_FEAT_CTL diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 4654e97a05cc..e553f0fdd87d 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 ccflags-y += -Iarch/x86/kvm -ccflags-y += -Werror +ccflags-$(CONFIG_KVM_WERROR) += -Werror KVM := ../../../virt/kvm diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2125c6ae5951..05cb45bc0e08 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -57,11 +57,13 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +#ifdef MODULE static const struct x86_cpu_id svm_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_SVM), {} }; MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); +#endif #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 @@ -2230,8 +2232,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) static int avic_init_vcpu(struct vcpu_svm *svm) { int ret; + struct kvm_vcpu *vcpu = &svm->vcpu; - if (!kvm_vcpu_apicv_active(&svm->vcpu)) + if (!avic || !irqchip_in_kernel(vcpu->kvm)) return 0; ret = avic_init_backing_page(&svm->vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3aba51d782e2..a7dd67859bd4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -64,11 +64,13 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +#ifdef MODULE static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); +#endif bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); @@ -7163,6 +7165,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, else intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; } @@ -7192,6 +7195,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, case x86_intercept_outs: return vmx_check_intercept_io(vcpu, info); + case x86_intercept_lgdt: + case x86_intercept_lidt: + case x86_intercept_lldt: + case x86_intercept_ltr: + case x86_intercept_sgdt: + case x86_intercept_sidt: + case x86_intercept_sldt: + case x86_intercept_str: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) + return X86EMUL_CONTINUE; + + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + break; + /* TODO: check more intercepts... */ default: break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6fa014ccd253..1b6d9ac9533c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7195,15 +7195,15 @@ static void kvm_timer_init(void) if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ - struct cpufreq_policy policy; + struct cpufreq_policy *policy; int cpu; - memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); - cpufreq_get_policy(&policy, cpu); - if (policy.cpuinfo.max_freq) - max_tsc_khz = policy.cpuinfo.max_freq; + policy = cpufreq_cpu_get(cpu); + if (policy && policy->cpuinfo.max_freq) + max_tsc_khz = policy->cpuinfo.max_freq; put_cpu(); + cpufreq_cpu_put(policy); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); @@ -7313,12 +7313,12 @@ int kvm_arch_init(void *opaque) } if (!ops->cpu_has_kvm_support()) { - printk(KERN_ERR "kvm: no hardware support\n"); + pr_err_ratelimited("kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { - printk(KERN_ERR "kvm: disabled by bios\n"); + pr_err_ratelimited("kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 1f756ffffe8b..79409120a603 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -896,14 +896,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; +#ifdef CONFIG_X86_64 + unsigned int which; + u64 base; +#endif ret = 0; switch (msr) { #ifdef CONFIG_X86_64 - unsigned which; - u64 base; - case MSR_FS_BASE: which = SEGBASE_FS; goto set; case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; |