From e978eaca4beefb789874864c0ed99603531c5425 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:00:39 +0000 Subject: arm64/cpufeature: Remove 4 bit assumption in ARM64_FEATURE_MASK() The ARM64_FEATURE_MASK(), used extensively by KVM, assumes that all ID register fields are 4 bits wide but this is not the case any more, for example there are several 1 bit fields in ID_AA64SMFR0_EL1. Fortunately we now have generated constants for all the ID mask registers which can be used instead. Rather than create churn from updating existing users update the macro to reference the generated constants and replace the comment with a note advising against adding new users. There are also users of ARM64_FEATURE_FIELD_BITS in the pKVM code which will need to be fixed separately, since no relevant feature is planned to be exposed to protected guests in the immediate future there is no immediate issue with them assuming fields are 4 bits wide. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221222-arm64-arm64-feature-mask-v1-1-c34c1e177f90@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..1c2bd78f03d3 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -809,8 +809,8 @@ #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ -- cgit v1.2.3 From e080477a050cce0471d3a84347f350ad7514a18b Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:17 -0600 Subject: perf: arm_spe: Use feature numbering for PMSEVFR_EL1 defines Similar to commit 121a8fc088f1 ("arm64/sysreg: Use feature numbering for PMU and SPE revisions") use feature numbering instead of architecture versions for the PMSEVFR_EL1 Res0 defines. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-1-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 6 +++--- drivers/perf/arm_spe_pmu.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..c4ce16333750 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -273,11 +273,11 @@ #define SYS_PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) #define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 00e3a637f7b6..65cf93dcc8ee 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -677,11 +677,11 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver) { switch (pmsver) { case ID_AA64DFR0_EL1_PMSVer_IMP: - return SYS_PMSEVFR_EL1_RES0_8_2; + return PMSEVFR_EL1_RES0_IMP; case ID_AA64DFR0_EL1_PMSVer_V1P1: /* Return the highest version we support in default */ default: - return SYS_PMSEVFR_EL1_RES0_8_3; + return PMSEVFR_EL1_RES0_V1P1; } } -- cgit v1.2.3 From c759ec850df89f7235b08e468abb3190b6998d4e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:18 -0600 Subject: arm64: Drop SYS_ from SPE register defines We currently have a non-standard SYS_ prefix in the constants generated for the SPE register bitfields. Drop this in preparation for automatic register definition generation. The SPE mask defines were unshifted, and the SPE register field enumerations were shifted. The autogenerated defines are the opposite, so make the necessary adjustments. No functional changes. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-2-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/el2_setup.h | 6 +- arch/arm64/include/asm/sysreg.h | 112 ++++++++++++++++++------------------- arch/arm64/kvm/debug.c | 2 +- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 2 +- drivers/perf/arm_spe_pmu.c | 85 ++++++++++++++-------------- 5 files changed, 103 insertions(+), 104 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..f9da43e53cdb 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -53,10 +53,10 @@ cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, - and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical - mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ - 1 << SYS_PMSCR_EL2_PA_SHIFT) + mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ + 1 << PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter .Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c4ce16333750..dbb0e8e22cf4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -218,59 +218,59 @@ /*** Statistical Profiling Extension ***/ /* ID registers */ #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL +#define PMSIDR_EL1_FE_SHIFT 0 +#define PMSIDR_EL1_FT_SHIFT 1 +#define PMSIDR_EL1_FL_SHIFT 2 +#define PMSIDR_EL1_ARCHINST_SHIFT 3 +#define PMSIDR_EL1_LDS_SHIFT 4 +#define PMSIDR_EL1_ERND_SHIFT 5 +#define PMSIDR_EL1_INTERVAL_SHIFT 8 +#define PMSIDR_EL1_INTERVAL_MASK GENMASK_ULL(11, 8) +#define PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define PMSIDR_EL1_MAXSIZE_MASK GENMASK_ULL(15, 12) +#define PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define PMSIDR_EL1_COUNTSIZE_MASK GENMASK_ULL(19, 16) #define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 +#define PMBIDR_EL1_ALIGN_SHIFT 0 +#define PMBIDR_EL1_ALIGN_MASK 0xfU +#define PMBIDR_EL1_P_SHIFT 4 +#define PMBIDR_EL1_F_SHIFT 5 /* Sampling controls */ #define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 +#define PMSCR_EL1_E0SPE_SHIFT 0 +#define PMSCR_EL1_E1SPE_SHIFT 1 +#define PMSCR_EL1_CX_SHIFT 3 +#define PMSCR_EL1_PA_SHIFT 4 +#define PMSCR_EL1_TS_SHIFT 5 +#define PMSCR_EL1_PCT_SHIFT 6 #define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 +#define PMSCR_EL2_E0HSPE_SHIFT 0 +#define PMSCR_EL2_E2SPE_SHIFT 1 +#define PMSCR_EL2_CX_SHIFT 3 +#define PMSCR_EL2_PA_SHIFT 4 +#define PMSCR_EL2_TS_SHIFT 5 +#define PMSCR_EL2_PCT_SHIFT 6 #define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) #define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL +#define PMSIRR_EL1_RND_SHIFT 0 +#define PMSIRR_EL1_INTERVAL_SHIFT 8 +#define PMSIRR_EL1_INTERVAL_MASK GENMASK_ULL(31, 8) /* Filtering controls */ #define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) #define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 +#define PMSFCR_EL1_FE_SHIFT 0 +#define PMSFCR_EL1_FT_SHIFT 1 +#define PMSFCR_EL1_FL_SHIFT 2 +#define PMSFCR_EL1_B_SHIFT 16 +#define PMSFCR_EL1_LD_SHIFT 17 +#define PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) #define PMSEVFR_EL1_RES0_IMP \ @@ -280,37 +280,37 @@ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 +#define PMSLATFR_EL1_MINLAT_SHIFT 0 /* Buffer controls */ #define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) +#define PMBLIMITR_EL1_E_SHIFT 0 +#define PMBLIMITR_EL1_FM_SHIFT 1 +#define PMBLIMITR_EL1_FM_MASK GENMASK_ULL(2, 1) +#define PMBLIMITR_EL1_FM_STOP_IRQ 0 #define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) /* Buffer error reporting */ #define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL +#define PMBSR_EL1_COLL_SHIFT 16 +#define PMBSR_EL1_S_SHIFT 17 +#define PMBSR_EL1_EA_SHIFT 18 +#define PMBSR_EL1_DL_SHIFT 19 +#define PMBSR_EL1_EC_SHIFT 26 +#define PMBSR_EL1_EC_MASK GENMASK_ULL(31, 26) -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) +#define PMBSR_EL1_EC_BUF 0x0UL +#define PMBSR_EL1_EC_FAULT_S1 0x24UL +#define PMBSR_EL1_EC_FAULT_S2 0x25UL -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT 0 +#define PMBSR_EL1_FAULT_FSC_MASK 0x3fUL -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT 0 +#define PMBSR_EL1_BUF_BSC_MASK 0x3fUL -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index fccf9ec01813..55f80fb93925 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -328,7 +328,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * we may need to check if the host state needs to be saved. */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index e17455773b98..2673bde62fad 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -27,7 +27,7 @@ static void __debug_save_spe(u64 *pmscr_el1) * Check if the host is actually using it ? */ reg = read_sysreg_s(SYS_PMBLIMITR_EL1); - if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) + if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT))) return; /* Yes; save the control register and disable data generation */ diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 65cf93dcc8ee..814ed18346b6 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -12,6 +12,7 @@ #define DRVNAME PMUNAME "_pmu" #define pr_fmt(fmt) DRVNAME ": " fmt +#include #include #include #include @@ -282,18 +283,18 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << PMSCR_EL1_TS_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << PMSCR_EL1_PA_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << PMSCR_EL1_PCT_SHIFT; if (!attr->exclude_user) - reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT); + reg |= BIT(PMSCR_EL1_E0SPE_SHIFT); if (!attr->exclude_kernel) - reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); + reg |= BIT(PMSCR_EL1_E1SPE_SHIFT); if (get_spe_event_has_cx(event)) - reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); + reg |= BIT(PMSCR_EL1_CX_SHIFT); return reg; } @@ -302,8 +303,7 @@ static void arm_spe_event_sanitise_period(struct perf_event *event) { struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); u64 period = event->hw.sample_period; - u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK - << SYS_PMSIRR_EL1_INTERVAL_SHIFT; + u64 max_period = PMSIRR_EL1_INTERVAL_MASK; if (period < spe_pmu->min_period) period = spe_pmu->min_period; @@ -322,7 +322,7 @@ static u64 arm_spe_event_to_pmsirr(struct perf_event *event) arm_spe_event_sanitise_period(event); - reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, jitter) << PMSIRR_EL1_RND_SHIFT; reg |= event->hw.sample_period; return reg; @@ -333,18 +333,18 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, load_filter) << PMSFCR_EL1_LD_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, store_filter) << PMSFCR_EL1_ST_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << PMSFCR_EL1_B_SHIFT; if (reg) - reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT); + reg |= BIT(PMSFCR_EL1_FT_SHIFT); if (ATTR_CFG_GET_FLD(attr, event_filter)) - reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT); + reg |= BIT(PMSFCR_EL1_FE_SHIFT); if (ATTR_CFG_GET_FLD(attr, min_latency)) - reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT); + reg |= BIT(PMSFCR_EL1_FL_SHIFT); return reg; } @@ -359,7 +359,7 @@ static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; return ATTR_CFG_GET_FLD(attr, min_latency) - << SYS_PMSLATFR_EL1_MINLAT_SHIFT; + << PMSLATFR_EL1_MINLAT_SHIFT; } static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) @@ -511,7 +511,7 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) : arm_spe_pmu_next_off(handle); if (limit) - limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT); + limit |= BIT(PMBLIMITR_EL1_E_SHIFT); limit += (u64)buf->base; base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); @@ -570,28 +570,28 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) /* Service required? */ pmbsr = read_sysreg_s(SYS_PMBSR_EL1); - if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT))) + if (!(pmbsr & BIT(PMBSR_EL1_S_SHIFT))) return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; /* * If we've lost data, disable profiling and also set the PARTIAL * flag to indicate that the last record is corrupted. */ - if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT)) + if (pmbsr & BIT(PMBSR_EL1_DL_SHIFT)) perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | PERF_AUX_FLAG_PARTIAL); /* Report collisions to userspace so that it can up the period */ - if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT)) + if (pmbsr & BIT(PMBSR_EL1_COLL_SHIFT)) perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); /* We only expect buffer management events */ - switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) { - case SYS_PMBSR_EL1_EC_BUF: + switch (FIELD_GET(PMBSR_EL1_EC_MASK, pmbsr)) { + case PMBSR_EL1_EC_BUF: /* Handled below */ break; - case SYS_PMBSR_EL1_EC_FAULT_S1: - case SYS_PMBSR_EL1_EC_FAULT_S2: + case PMBSR_EL1_EC_FAULT_S1: + case PMBSR_EL1_EC_FAULT_S2: err_str = "Unexpected buffer fault"; goto out_err; default: @@ -600,9 +600,8 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) } /* Buffer management event */ - switch (pmbsr & - (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) { - case SYS_PMBSR_EL1_BUF_BSC_FULL: + switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) { + case PMBSR_EL1_BUF_BSC_FULL: ret = SPE_PMU_BUF_FAULT_ACT_OK; goto out_stop; default: @@ -717,23 +716,23 @@ static int arm_spe_pmu_event_init(struct perf_event *event) return -EINVAL; reg = arm_spe_event_to_pmsfcr(event); - if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FE_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) return -EOPNOTSUPP; - if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FT_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) return -EOPNOTSUPP; - if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FL_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && - (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | - BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) + (reg & (BIT(PMSCR_EL1_PA_SHIFT) | + BIT(PMSCR_EL1_PCT_SHIFT)))) return -EACCES; return 0; @@ -971,14 +970,14 @@ static void __arm_spe_pmu_dev_probe(void *info) /* Read PMBIDR first to determine whether or not we have access */ reg = read_sysreg_s(SYS_PMBIDR_EL1); - if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) { + if (reg & BIT(PMBIDR_EL1_P_SHIFT)) { dev_err(dev, "profiling buffer owned by higher exception level\n"); return; } /* Minimum alignment. If it's out-of-range, then fail the probe */ - fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK; + fld = (reg & PMBIDR_EL1_ALIGN_MASK) >> PMBIDR_EL1_ALIGN_SHIFT; spe_pmu->align = 1 << fld; if (spe_pmu->align > SZ_2K) { dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", @@ -988,26 +987,26 @@ static void __arm_spe_pmu_dev_probe(void *info) /* It's now safe to read PMSIDR and figure out what we've got */ reg = read_sysreg_s(SYS_PMSIDR_EL1); - if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FE_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; - if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FT_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; - if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FL_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; - if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT)) + if (reg & BIT(PMSIDR_EL1_ARCHINST_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; - if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT)) + if (reg & BIT(PMSIDR_EL1_LDS_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_LDS; - if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT)) + if (reg & BIT(PMSIDR_EL1_ERND_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_ERND; /* This field has a spaced out encoding, so just use a look-up */ - fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK; + fld = (reg & PMSIDR_EL1_INTERVAL_MASK) >> PMSIDR_EL1_INTERVAL_SHIFT; switch (fld) { case 0: spe_pmu->min_period = 256; @@ -1039,7 +1038,7 @@ static void __arm_spe_pmu_dev_probe(void *info) } /* Maximum record size. If it's out-of-range, then fail the probe */ - fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK; + fld = (reg & PMSIDR_EL1_MAXSIZE_MASK) >> PMSIDR_EL1_MAXSIZE_SHIFT; spe_pmu->max_record_sz = 1 << fld; if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", @@ -1047,7 +1046,7 @@ static void __arm_spe_pmu_dev_probe(void *info) return; } - fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK; + fld = (reg & PMSIDR_EL1_COUNTSIZE_MASK) >> PMSIDR_EL1_COUNTSIZE_SHIFT; switch (fld) { default: dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", -- cgit v1.2.3 From 956936041a56eaebc012cf62a00fafd86958ffd5 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:19 -0600 Subject: arm64/sysreg: Convert SPE registers to automatic generation Convert all the SPE register defines to automatic generation. No functional changes. New registers and fields for SPEv1.2 are added with the conversion. Some of the PMBSR MSS field defines are kept as the automatic generation has no way to create multiple names for the same register bits. The meaning of the MSS field depends on other bits. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Mark Brown Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-3-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 91 ++------------------------ arch/arm64/tools/sysreg | 139 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 86 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index dbb0e8e22cf4..db269eda7c1c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -216,99 +216,18 @@ #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define PMSIDR_EL1_FE_SHIFT 0 -#define PMSIDR_EL1_FT_SHIFT 1 -#define PMSIDR_EL1_FL_SHIFT 2 -#define PMSIDR_EL1_ARCHINST_SHIFT 3 -#define PMSIDR_EL1_LDS_SHIFT 4 -#define PMSIDR_EL1_ERND_SHIFT 5 -#define PMSIDR_EL1_INTERVAL_SHIFT 8 -#define PMSIDR_EL1_INTERVAL_MASK GENMASK_ULL(11, 8) -#define PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define PMSIDR_EL1_MAXSIZE_MASK GENMASK_ULL(15, 12) -#define PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define PMSIDR_EL1_COUNTSIZE_MASK GENMASK_ULL(19, 16) - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define PMBIDR_EL1_ALIGN_SHIFT 0 -#define PMBIDR_EL1_ALIGN_MASK 0xfU -#define PMBIDR_EL1_P_SHIFT 4 -#define PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define PMSCR_EL1_E0SPE_SHIFT 0 -#define PMSCR_EL1_E1SPE_SHIFT 1 -#define PMSCR_EL1_CX_SHIFT 3 -#define PMSCR_EL1_PA_SHIFT 4 -#define PMSCR_EL1_TS_SHIFT 5 -#define PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define PMSCR_EL2_E0HSPE_SHIFT 0 -#define PMSCR_EL2_E2SPE_SHIFT 1 -#define PMSCR_EL2_CX_SHIFT 3 -#define PMSCR_EL2_PA_SHIFT 4 -#define PMSCR_EL2_TS_SHIFT 5 -#define PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define PMSIRR_EL1_RND_SHIFT 0 -#define PMSIRR_EL1_INTERVAL_SHIFT 8 -#define PMSIRR_EL1_INTERVAL_MASK GENMASK_ULL(31, 8) - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define PMSFCR_EL1_FE_SHIFT 0 -#define PMSFCR_EL1_FT_SHIFT 1 -#define PMSFCR_EL1_FL_SHIFT 2 -#define PMSFCR_EL1_B_SHIFT 16 -#define PMSFCR_EL1_LD_SHIFT 17 -#define PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) #define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) #define PMSEVFR_EL1_RES0_V1P1 \ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define PMBLIMITR_EL1_E_SHIFT 0 -#define PMBLIMITR_EL1_FM_SHIFT 1 -#define PMBLIMITR_EL1_FM_MASK GENMASK_ULL(2, 1) -#define PMBLIMITR_EL1_FM_STOP_IRQ 0 - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) - /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define PMBSR_EL1_COLL_SHIFT 16 -#define PMBSR_EL1_S_SHIFT 17 -#define PMBSR_EL1_EA_SHIFT 18 -#define PMBSR_EL1_DL_SHIFT 19 -#define PMBSR_EL1_EC_SHIFT 26 -#define PMBSR_EL1_EC_MASK GENMASK_ULL(31, 26) - -#define PMBSR_EL1_EC_BUF 0x0UL -#define PMBSR_EL1_EC_FAULT_S1 0x24UL -#define PMBSR_EL1_EC_FAULT_S2 0x25UL - -#define PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define PMBSR_EL1_FAULT_FSC_MASK 0x3fUL - -#define PMBSR_EL1_BUF_BSC_SHIFT 0 -#define PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK + +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK #define PMBSR_EL1_BUF_BSC_FULL 0x1UL diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 184e58fd5631..c323833cf235 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1618,6 +1618,130 @@ Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL1 3 0 9 9 0 +Res0 63:8 +Field 7:6 PCT +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E1SPE +Field 0 E0SPE +EndSysreg + +Sysreg PMSNEVFR_EL1 3 0 9 9 1 +Field 63:0 E +EndSysreg + +Sysreg PMSICR_EL1 3 0 9 9 2 +Field 63:56 ECOUNT +Res0 55:32 +Field 31:0 COUNT +EndSysreg + +Sysreg PMSIRR_EL1 3 0 9 9 3 +Res0 63:32 +Field 31:8 INTERVAL +Res0 7:1 +Field 0 RND +EndSysreg + +Sysreg PMSFCR_EL1 3 0 9 9 4 +Res0 63:19 +Field 18 ST +Field 17 LD +Field 16 B +Res0 15:4 +Field 3 FnE +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMSEVFR_EL1 3 0 9 9 5 +Field 63:0 E +EndSysreg + +Sysreg PMSLATFR_EL1 3 0 9 9 6 +Res0 63:16 +Field 15:0 MINLAT +EndSysreg + +Sysreg PMSIDR_EL1 3 0 9 9 7 +Res0 63:25 +Field 24 PBT +Field 23:20 FORMAT +Enum 19:16 COUNTSIZE + 0b0010 12_BIT_SAT + 0b0011 16_BIT_SAT +EndEnum +Field 15:12 MAXSIZE +Enum 11:8 INTERVAL + 0b0000 256 + 0b0010 512 + 0b0011 768 + 0b0100 1024 + 0b0101 1536 + 0b0110 2048 + 0b0111 3072 + 0b1000 4096 +EndEnum +Res0 7 +Field 6 FnE +Field 5 ERND +Field 4 LDS +Field 3 ARCHINST +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMBLIMITR_EL1 3 0 9 10 0 +Field 63:12 LIMIT +Res0 11:6 +Field 5 PMFZ +Res0 4:3 +Enum 2:1 FM + 0b00 FILL + 0b10 DISCARD +EndEnum +Field 0 E +EndSysreg + +Sysreg PMBPTR_EL1 3 0 9 10 1 +Field 63:0 PTR +EndSysreg + +Sysreg PMBSR_EL1 3 0 9 10 3 +Res0 63:32 +Enum 31:26 EC + 0b000000 BUF + 0b100100 FAULT_S1 + 0b100101 FAULT_S2 + 0b011110 FAULT_GPC + 0b011111 IMP_DEF +EndEnum +Res0 25:20 +Field 19 DL +Field 18 EA +Field 17 S +Field 16 COLL +Field 15:0 MSS +EndSysreg + +Sysreg PMBIDR_EL1 3 0 9 10 7 +Res0 63:12 +Enum 11:8 EA + 0b0000 NotDescribed + 0b0001 Ignored + 0b0010 SError +EndEnum +Res0 7:6 +Field 5 F +Field 4 P +Field 3:0 ALIGN +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID @@ -1772,6 +1896,21 @@ Sysreg FAR_EL2 3 4 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL2 3 4 9 9 0 +Res0 63:8 +Enum 7:6 PCT + 0b00 VIRT + 0b01 PHYS + 0b11 GUEST +EndEnum +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E2SPE +Field 0 E0HSPE +EndSysreg + Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg -- cgit v1.2.3 From 4998897b1e96d624bf094e5785b27023e17ba570 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:22 -0600 Subject: perf: arm_spe: Support new SPEv1.2/v8.7 'not taken' event Arm SPEv1.2 (Armv8.7/v9.2) adds a new event, 'not taken', in bit 6 of the PMSEVFR_EL1 register. Update arm_spe_pmsevfr_res0() to support the additional event. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-6-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 2 ++ drivers/perf/arm_spe_pmu.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index db269eda7c1c..fc8787727792 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -221,6 +221,8 @@ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) #define PMSEVFR_EL1_RES0_V1P1 \ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ #define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index af6d3867c3e7..82f67e941bc4 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -677,9 +677,11 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver) case ID_AA64DFR0_EL1_PMSVer_IMP: return PMSEVFR_EL1_RES0_IMP; case ID_AA64DFR0_EL1_PMSVer_V1P1: + return PMSEVFR_EL1_RES0_V1P1; + case ID_AA64DFR0_EL1_PMSVer_V1P2: /* Return the highest version we support in default */ default: - return PMSEVFR_EL1_RES0_V1P1; + return PMSEVFR_EL1_RES0_V1P2; } } -- cgit v1.2.3 From ce514000da4f4b5f850f3339f805471e5c5c1caf Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:36 +0000 Subject: arm64/sme: Rename za_state to sme_state In preparation for adding support for storage for ZT0 to the thread_struct rename za_state to sme_state. Since ZT0 is accessible when PSTATE.ZA is set just like ZA itself we will extend the allocation done for ZA to cover it, avoiding the need to further expand task_struct for non-SME tasks. No functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-1-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 15 +++++++++------ arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/fpsimd.c | 34 +++++++++++++++++----------------- arch/arm64/kernel/process.c | 13 +++++++------ arch/arm64/kernel/ptrace.c | 6 +++--- arch/arm64/kernel/signal.c | 8 ++++---- arch/arm64/kvm/fpsimd.c | 2 +- 7 files changed, 42 insertions(+), 38 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index e6fa1e2982c8..2d3fa80cd95d 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void); struct cpu_fp_state { struct user_fpsimd_state *st; void *sve_state; - void *za_state; + void *sme_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; @@ -355,14 +355,17 @@ extern int sme_get_current_vl(void); /* * Return how many bytes of memory are required to store the full SME - * specific state (currently just ZA) for task, given task's currently - * configured vector length. + * specific state for task, given task's currently configured vector + * length. */ -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { unsigned int vl = task_get_sme_vl(task); + size_t size; - return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + + return size; } #else @@ -382,7 +385,7 @@ static inline int sme_max_virtualisable_vl(void) { return 0; } static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { return 0; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d51b32a69309..3918f2a67970 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -161,7 +161,7 @@ struct thread_struct { enum fp_type fp_type; /* registers FPSIMD or SVE? */ unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ - void *za_state; /* ZA register, if any */ + void *sme_state; /* ZA and ZT state, if any */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ unsigned long fault_address; /* fault info */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index dcc81e7200d4..9e168a9eb615 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, /* * TIF_SME controls whether a task can use SME without trapping while * in userspace, when TIF_SME is set then we must have storage - * alocated in sve_state and za_state to store the contents of both ZA + * alocated in sve_state and sme_state to store the contents of both ZA * and the SVE registers for both streaming and non-streaming modes. * * If both SVCR.ZA and SVCR.SM are disabled then at any point we @@ -429,7 +429,7 @@ static void task_fpsimd_load(void) write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) - za_load_state(current->thread.za_state); + za_load_state(current->thread.sme_state); if (thread_sm_enabled(¤t->thread)) restore_ffr = system_supports_fa64(); @@ -490,7 +490,7 @@ static void fpsimd_save(void) *svcr = read_sysreg_s(SYS_SVCR); if (*svcr & SVCR_ZA_MASK) - za_save_state(last->za_state); + za_save_state(last->sme_state); /* If we are in streaming mode override regular SVE. */ if (*svcr & SVCR_SM_MASK) { @@ -1257,30 +1257,30 @@ void fpsimd_release_task(struct task_struct *dead_task) #ifdef CONFIG_ARM64_SME /* - * Ensure that task->thread.za_state is allocated and sufficiently large. + * Ensure that task->thread.sme_state is allocated and sufficiently large. * * This function should be used only in preparation for replacing - * task->thread.za_state with new data. The memory is always zeroed + * task->thread.sme_state with new data. The memory is always zeroed * here to prevent stale data from showing through: this is done in * the interest of testability and predictability, the architecture * guarantees that when ZA is enabled it will be zeroed. */ void sme_alloc(struct task_struct *task) { - if (task->thread.za_state) { - memset(task->thread.za_state, 0, za_state_size(task)); + if (task->thread.sme_state) { + memset(task->thread.sme_state, 0, sme_state_size(task)); return; } /* This could potentially be up to 64K. */ - task->thread.za_state = - kzalloc(za_state_size(task), GFP_KERNEL); + task->thread.sme_state = + kzalloc(sme_state_size(task), GFP_KERNEL); } static void sme_free(struct task_struct *task) { - kfree(task->thread.za_state); - task->thread.za_state = NULL; + kfree(task->thread.sme_state); + task->thread.sme_state = NULL; } void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) @@ -1488,7 +1488,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) sve_alloc(current, false); sme_alloc(current); - if (!current->thread.sve_state || !current->thread.za_state) { + if (!current->thread.sve_state || !current->thread.sme_state) { force_sig(SIGKILL); return; } @@ -1609,7 +1609,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) void fpsimd_flush_thread(void) { void *sve_state = NULL; - void *za_state = NULL; + void *sme_state = NULL; if (!system_supports_fpsimd()) return; @@ -1634,8 +1634,8 @@ void fpsimd_flush_thread(void) clear_thread_flag(TIF_SME); /* Defer kfree() while in atomic context */ - za_state = current->thread.za_state; - current->thread.za_state = NULL; + sme_state = current->thread.sme_state; + current->thread.sme_state = NULL; fpsimd_flush_thread_vl(ARM64_VEC_SME); current->thread.svcr = 0; @@ -1645,7 +1645,7 @@ void fpsimd_flush_thread(void) put_cpu_fpsimd_context(); kfree(sve_state); - kfree(za_state); + kfree(sme_state); } /* @@ -1711,7 +1711,7 @@ static void fpsimd_bind_task_to_cpu(void) WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; - last->za_state = current->thread.za_state; + last->sme_state = current->thread.sme_state; last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 269ac1c25ae2..4ce0c4313ec6 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -311,23 +311,24 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * This may be shortly freed if we exec() or if CLONE_SETTLS * but it's simpler to do it here. To avoid confusing the rest * of the code ensure that we have a sve_state allocated - * whenever za_state is allocated. + * whenever sme_state is allocated. */ if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), GFP_KERNEL); if (!dst->thread.sve_state) return -ENOMEM; - dst->thread.za_state = kmemdup(src->thread.za_state, - za_state_size(src), - GFP_KERNEL); - if (!dst->thread.za_state) { + + dst->thread.sme_state = kmemdup(src->thread.sme_state, + sme_state_size(src), + GFP_KERNEL); + if (!dst->thread.sme_state) { kfree(dst->thread.sve_state); dst->thread.sve_state = NULL; return -ENOMEM; } } else { - dst->thread.za_state = NULL; + dst->thread.sme_state = NULL; clear_tsk_thread_flag(dst, TIF_SME); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2686ab157601..67256e7772e4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1045,7 +1045,7 @@ static int za_get(struct task_struct *target, if (thread_za_enabled(&target->thread)) { start = end; end = ZA_PT_SIZE(vq); - membuf_write(&to, target->thread.za_state, end - start); + membuf_write(&to, target->thread.sme_state, end - start); } /* Zero any trailing padding */ @@ -1099,7 +1099,7 @@ static int za_set(struct task_struct *target, /* Allocate/reinit ZA storage */ sme_alloc(target); - if (!target->thread.za_state) { + if (!target->thread.sme_state) { ret = -ENOMEM; goto out; } @@ -1124,7 +1124,7 @@ static int za_set(struct task_struct *target, start = ZA_PT_ZA_OFFSET; end = ZA_PT_SIZE(vq); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.za_state, + target->thread.sme_state, start, end); if (ret) goto out; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index e0d09bf5b01b..27768809dd3e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -389,7 +389,7 @@ static int preserve_za_context(struct za_context __user *ctx) * fpsimd_signal_preserve_current_state(). */ err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, - current->thread.za_state, + current->thread.sme_state, ZA_SIG_REGS_SIZE(vq)); } @@ -420,7 +420,7 @@ static int restore_za_context(struct user_ctxs *user) /* * Careful: we are about __copy_from_user() directly into - * thread.za_state with preemption enabled, so protection is + * thread.sme_state with preemption enabled, so protection is * needed to prevent a racing context switch from writing stale * registers back over the new data. */ @@ -429,13 +429,13 @@ static int restore_za_context(struct user_ctxs *user) /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ sme_alloc(current); - if (!current->thread.za_state) { + if (!current->thread.sme_state) { current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } - err = __copy_from_user(current->thread.za_state, + err = __copy_from_user(current->thread.sme_state, (char __user const *)user->za + ZA_SIG_REGS_OFFSET, ZA_SIG_REGS_SIZE(vq)); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 02dd7e9ebd39..235775d0c825 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -143,7 +143,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fp_state.st = &vcpu->arch.ctxt.fp_regs; fp_state.sve_state = vcpu->arch.sve_state; fp_state.sve_vl = vcpu->arch.sve_max_vl; - fp_state.za_state = NULL; + fp_state.sme_state = NULL; fp_state.svcr = &vcpu->arch.svcr; fp_state.fp_type = &vcpu->arch.fp_type; -- cgit v1.2.3 From 8ef55603b8eaf9362464b3197eb139baa4bd18b9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:40 +0000 Subject: arm64/esr: Document ISS for ZT0 being disabled SME2 defines a new ISS code for use when trapping acesses to ZT0, add a definition for it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-5-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 15b34fbfca66..5f3271e9d5df 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -341,6 +341,7 @@ #define ESR_ELx_SME_ISS_ILL 1 #define ESR_ELx_SME_ISS_SM_DISABLED 2 #define ESR_ELx_SME_ISS_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_ZT_DISABLED 4 #ifndef __ASSEMBLY__ #include -- cgit v1.2.3 From 2cdeecdb95134cc0abc76626a59c94073cda8a47 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:41 +0000 Subject: arm64/sme: Manually encode ZT0 load and store instructions In order to avoid unrealistic toolchain requirements we manually encode the instructions for loading and storing ZT0. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-6-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimdmacros.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 5e0910cf4832..cd03819a3b68 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -220,6 +220,28 @@ | ((\offset) & 7) .endm +/* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + _check_general_reg \nx + .inst 0xe11f8000 \ + | (\nx << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + _check_general_reg \nx + .inst 0xe13f8000 \ + | (\nx << 5) +.endm + /* * Zero the entire ZA array * ZERO ZA -- cgit v1.2.3 From d4913eee152d3ffaf9a1e70073bc15e773625685 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:43 +0000 Subject: arm64/sme: Add basic enumeration for SME2 Add basic feature detection for SME2, detecting that the feature is present and disabling traps for ZT0. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-8-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 ++++++ arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/kernel/cpufeature.c | 14 ++++++++++++++ arch/arm64/kernel/fpsimd.c | 11 +++++++++++ arch/arm64/tools/cpucaps | 1 + 5 files changed, 33 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..fc2c739f48f1 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void) cpus_have_const_cap(ARM64_SME); } +static __always_inline bool system_supports_sme2(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME2); +} + static __always_inline bool system_supports_fa64(void) { return IS_ENABLED(CONFIG_ARM64_SME) && diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2d3fa80cd95d..2a66e3b94553 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -118,6 +118,7 @@ extern void za_load_state(void const *state); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..fd90905bc2e6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -282,6 +282,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), @@ -2649,6 +2651,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = fa64_kernel_enable, }, + { + .desc = "SME2", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME2, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, + .field_width = ID_AA64PFR1_EL1_SME_WIDTH, + .min_field_value = ID_AA64PFR1_EL1_SME_SME2, + .matches = has_cpuid_feature, + .cpu_enable = sme2_kernel_enable, + }, #endif /* CONFIG_ARM64_SME */ { .desc = "WFx with timeout", diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 9e168a9eb615..717ae4aaa021 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1298,6 +1298,17 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) isb(); } +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ +void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of ZT0 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, + SYS_SMCR_EL1); +} + /* * This must be called after sme_kernel_enable(), we rely on the * feature table being sorted to ensure this. diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index a86ee376920a..1222b0ed63a2 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -50,6 +50,7 @@ MTE MTE_ASYMM SME SME_FA64 +SME2 SPECTRE_V2 SPECTRE_V3A SPECTRE_V4 -- cgit v1.2.3 From d6138b4adc70729404efeb0133bc9f6e9ad78d03 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:44 +0000 Subject: arm64/sme: Provide storage for ZT0 When the system supports SME2 there is an additional register ZT0 which we must store when the task is using SME. Since ZT0 is accessible only when PSTATE.ZA is set just like ZA we allocate storage for it along with ZA, increasing the allocation size for the memory region where we store ZA and storing the data for ZT after that for ZA. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-9-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 13 +++++++++++++ arch/arm64/kernel/process.c | 10 +++++----- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2a66e3b94553..8df769c20677 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -105,6 +105,13 @@ static inline void *sve_pffr(struct thread_struct *thread) return (char *)thread->sve_state + sve_ffr_offset(vl); } +static inline void *thread_zt_state(struct thread_struct *thread) +{ + /* The ZT register state is stored immediately after the ZA state */ + unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread)); + return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq); +} + extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, int restore_ffr); @@ -354,6 +361,9 @@ extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); +/* Will move with signal support */ +#define ZT_SIG_REG_SIZE 512 + /* * Return how many bytes of memory are required to store the full SME * specific state for task, given task's currently configured vector @@ -366,6 +376,9 @@ static inline size_t sme_state_size(struct task_struct const *task) size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + if (system_supports_sme2()) + size += ZT_SIG_REG_SIZE; + return size; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4ce0c4313ec6..71d59b5abede 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -307,11 +307,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * In the unlikely event that we create a new thread with ZA - * enabled we should retain the ZA state so duplicate it here. - * This may be shortly freed if we exec() or if CLONE_SETTLS - * but it's simpler to do it here. To avoid confusing the rest - * of the code ensure that we have a sve_state allocated - * whenever sme_state is allocated. + * enabled we should retain the ZA and ZT state so duplicate + * it here. This may be shortly freed if we exec() or if + * CLONE_SETTLS but it's simpler to do it here. To avoid + * confusing the rest of the code ensure that we have a + * sve_state allocated whenever sme_state is allocated. */ if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), -- cgit v1.2.3 From 95fcec713259d440626526ed96ff5d3bac6179ea Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:45 +0000 Subject: arm64/sme: Implement context switching for ZT0 When the system supports SME2 the ZT0 register must be context switched as part of the floating point state. This register is stored immediately after ZA in memory and is only accessible when PSTATE.ZA is set so we handle it in the same functions we use to save and restore ZA. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-10-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++-- arch/arm64/kernel/entry-fpsimd.S | 30 ++++++++++++++++++++---------- arch/arm64/kernel/fpsimd.c | 6 ++++-- 3 files changed, 26 insertions(+), 14 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 8df769c20677..0ce4465644fc 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -119,8 +119,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); extern void sme_set_vq(unsigned long vq_minus_1); -extern void za_save_state(void *state); -extern void za_load_state(void const *state); +extern void sme_save_state(void *state, int zt); +extern void sme_load_state(void const *state, int zt); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 229436f33df5..6325db1a2179 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -100,25 +100,35 @@ SYM_FUNC_START(sme_set_vq) SYM_FUNC_END(sme_set_vq) /* - * Save the SME state + * Save the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_save_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_save_za 0, x1, 12 +SYM_FUNC_START(sme_save_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_save_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _str_zt 0 +1: ret -SYM_FUNC_END(za_save_state) +SYM_FUNC_END(sme_save_state) /* - * Load the SME state + * Load the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_load_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_load_za 0, x1, 12 +SYM_FUNC_START(sme_load_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_load_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _ldr_zt 0 +1: ret -SYM_FUNC_END(za_load_state) +SYM_FUNC_END(sme_load_state) #endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 717ae4aaa021..cec8b43e7888 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -429,7 +429,8 @@ static void task_fpsimd_load(void) write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) - za_load_state(current->thread.sme_state); + sme_load_state(current->thread.sme_state, + system_supports_sme2()); if (thread_sm_enabled(¤t->thread)) restore_ffr = system_supports_fa64(); @@ -490,7 +491,8 @@ static void fpsimd_save(void) *svcr = read_sysreg_s(SYS_SVCR); if (*svcr & SVCR_ZA_MASK) - za_save_state(last->sme_state); + sme_save_state(last->sme_state, + system_supports_sme2()); /* If we are in streaming mode override regular SVE. */ if (*svcr & SVCR_SM_MASK) { -- cgit v1.2.3 From ee072cf708048c0d718a88159ae7985dd96d316f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:46 +0000 Subject: arm64/sme: Implement signal handling for ZT Add a new signal context type for ZT which is present in the signal frame when ZA is enabled and ZT is supported by the system. In order to account for the possible addition of further ZT registers in the future we make the number of registers variable in the ABI, though currently the only possible number is 1. We could just use a bare list head for the context since the number of registers can be inferred from the size of the context but for usability and future extensibility we define a header with the number of registers and some reserved fields in it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-11-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 3 - arch/arm64/include/uapi/asm/sigcontext.h | 19 ++++++ arch/arm64/kernel/signal.c | 105 +++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 0ce4465644fc..67f2fb781f59 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -361,9 +361,6 @@ extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); -/* Will move with signal support */ -#define ZT_SIG_REG_SIZE 512 - /* * Return how many bytes of memory are required to store the full SME * specific state for task, given task's currently configured vector diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 9525041e4a14..46e9072985a5 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -152,6 +152,14 @@ struct za_context { __u16 __reserved[3]; }; +#define ZT_MAGIC 0x5a544e01 + +struct zt_context { + struct _aarch64_ctx head; + __u16 nregs; + __u16 __reserved[3]; +}; + #endif /* !__ASSEMBLY__ */ #include @@ -304,4 +312,15 @@ struct za_context { #define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) +#define ZT_SIG_REG_SIZE 512 + +#define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) + +#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) + +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) + +#define ZT_SIG_CONTEXT_SIZE(n) \ + (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) + #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 27768809dd3e..1c5e557a3617 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -57,6 +57,7 @@ struct rt_sigframe_user_layout { unsigned long esr_offset; unsigned long sve_offset; unsigned long za_offset; + unsigned long zt_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -221,6 +222,7 @@ struct user_ctxs { struct fpsimd_context __user *fpsimd; struct sve_context __user *sve; struct za_context __user *za; + struct zt_context __user *zt; }; #ifdef CONFIG_ARM64_SVE @@ -447,11 +449,81 @@ static int restore_za_context(struct user_ctxs *user) return 0; } + +static int preserve_zt_context(struct zt_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + + if (WARN_ON(!thread_za_enabled(¤t->thread))) + return -EINVAL; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZT_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), + &ctx->head.size, err); + __put_user_error(1, &ctx->nregs, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + /* + * This assumes that the ZT state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, + thread_zt_state(¤t->thread), + ZT_SIG_REGS_SIZE(1)); + + return err ? -EFAULT : 0; +} + +static int restore_zt_context(struct user_ctxs *user) +{ + int err; + struct zt_context zt; + + /* ZA must be restored first for this check to be valid */ + if (!thread_za_enabled(¤t->thread)) + return -EINVAL; + + if (__copy_from_user(&zt, user->zt, sizeof(zt))) + return -EFAULT; + + if (zt.nregs != 1) + return -EINVAL; + + if (zt.head.size != ZT_SIG_CONTEXT_SIZE(zt.nregs)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.zt_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ + + err = __copy_from_user(thread_zt_state(¤t->thread), + (char __user const *)user->zt + + ZT_SIG_REGS_OFFSET, + ZT_SIG_REGS_SIZE(1)); + if (err) + return -EFAULT; + + return 0; +} + #else /* ! CONFIG_ARM64_SME */ /* Turn any non-optimised out attempts to use these into a link error: */ extern int preserve_za_context(void __user *ctx); extern int restore_za_context(struct user_ctxs *user); +extern int preserve_zt_context(void __user *ctx); +extern int restore_zt_context(struct user_ctxs *user); #endif /* ! CONFIG_ARM64_SME */ @@ -469,6 +541,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->fpsimd = NULL; user->sve = NULL; user->za = NULL; + user->zt = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -547,6 +620,19 @@ static int parse_user_sigframe(struct user_ctxs *user, user->za = (struct za_context __user *)head; break; + case ZT_MAGIC: + if (!system_supports_sme2()) + goto invalid; + + if (user->zt) + goto invalid; + + if (size < sizeof(*user->zt)) + goto invalid; + + user->zt = (struct zt_context __user *)head; + break; + case EXTRA_MAGIC: if (have_extra_context) goto invalid; @@ -669,6 +755,9 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0 && system_supports_sme() && user.za) err = restore_za_context(&user); + if (err == 0 && system_supports_sme2() && user.zt) + err = restore_zt_context(&user); + return err; } @@ -769,6 +858,15 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return err; } + if (system_supports_sme2()) { + if (add_all || thread_za_enabled(¤t->thread)) { + err = sigframe_alloc(user, &user->zt_offset, + ZT_SIG_CONTEXT_SIZE(1)); + if (err) + return err; + } + } + return sigframe_alloc_end(user); } @@ -824,6 +922,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_za_context(za_ctx); } + /* ZT state if present */ + if (system_supports_sme2() && err == 0 && user->zt_offset) { + struct zt_context __user *zt_ctx = + apply_user_offset(user, user->zt_offset); + err |= preserve_zt_context(zt_ctx); + } + if (err == 0 && user->extra_offset) { char __user *sfp = (char __user *)user->sigframe; char __user *userp = -- cgit v1.2.3 From 7d5d8601e4577c918fdb5fa922c634a73557ac0f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:48 +0000 Subject: arm64/sme: Add hwcaps for SME 2 and 2.1 features In order to allow userspace to discover the presence of the new SME features add hwcaps for them. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-13-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/elf_hwcaps.rst | 18 ++++++++++++++++++ arch/arm64/include/asm/hwcap.h | 6 ++++++ arch/arm64/include/uapi/asm/hwcap.h | 6 ++++++ arch/arm64/kernel/cpufeature.c | 14 ++++++++++++++ arch/arm64/kernel/cpuinfo.c | 6 ++++++ 5 files changed, 50 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index 6fed84f935df..8a9d4bf7daf4 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -284,6 +284,24 @@ HWCAP2_RPRFM HWCAP2_SVE2P1 Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010. +HWCAP2_SME2 + Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001. + +HWCAP2_SME2P1 + Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0010. + +HWCAP2_SMEI16I32 + Functionality implied by ID_AA64SMFR0_EL1.I16I32 == 0b0101 + +HWCAP2_SMEBI32I32 + Functionality implied by ID_AA64SMFR0_EL1.BI32I32 == 0b1 + +HWCAP2_SMEB16B16 + Functionality implied by ID_AA64SMFR0_EL1.B16B16 == 0b1 + +HWCAP2_SMEF16F16 + Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1 + 4. Unused AT_HWCAP bits ----------------------- diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..475c803ecf42 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -123,6 +123,12 @@ #define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC) #define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM) #define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1) +#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2) +#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1) +#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32) +#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32) +#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) +#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index b713d30544f1..69a4fb749c65 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -96,5 +96,11 @@ #define HWCAP2_CSSC (1UL << 34) #define HWCAP2_RPRFM (1UL << 35) #define HWCAP2_SVE2P1 (1UL << 36) +#define HWCAP2_SME2 (1UL << 37) +#define HWCAP2_SME2P1 (1UL << 38) +#define HWCAP2_SME_I16I32 (1UL << 39) +#define HWCAP2_SME_BI32I32 (1UL << 40) +#define HWCAP2_SME_B16B16 (1UL << 41) +#define HWCAP2_SME_F16F16 (1UL << 42) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index fd90905bc2e6..5bd959bd9a1f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -288,12 +288,20 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), ARM64_FTR_END, @@ -2841,11 +2849,17 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { #ifdef CONFIG_ARM64_SME HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16B16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_BI32I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), #endif /* CONFIG_ARM64_SME */ {}, diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 379695262b77..85e54417d141 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -119,6 +119,12 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_CSSC] = "cssc", [KERNEL_HWCAP_RPRFM] = "rprfm", [KERNEL_HWCAP_SVE2P1] = "sve2p1", + [KERNEL_HWCAP_SME2] = "sme2", + [KERNEL_HWCAP_SME2P1] = "sme2p1", + [KERNEL_HWCAP_SME_I16I32] = "smei16i32", + [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32", + [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", + [KERNEL_HWCAP_SME_F16F16] = "smef16f16", }; #ifdef CONFIG_COMPAT -- cgit v1.2.3 From a89c6bcdac22bec1bfbe6e64060b4cf5838d4f47 Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Mon, 9 Jan 2023 12:19:55 -0300 Subject: arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path Accessing AA64MMFR1_EL1 is expensive in KVM guests, since it is emulated in the hypervisor. In fact, ARM documentation mentions some feature registers are not supposed to be accessed frequently by the OS, and therefore should be emulated for guests [1]. Commit 0388f9c74330 ("arm64: mm: Implement arch_wants_old_prefaulted_pte()") introduced a read of this register in the page fault path. But, even when the feature of setting faultaround pages with the old flag is disabled for a given cpu, we are still paying the cost of checking the register on every pagefault. This results in an explosion of vmexit events in KVM guests, which directly impacts the performance of virtualized workloads. For instance, running kernbench yields a 15% increase in system time solely due to the increased vmexit cycles. This patch avoids the extra cost by using the sanitized cached value. It should be safe to do so, since this register mustn't change for a given cpu. [1] https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/Learn%20the%20Architecture/Armv8-A%20virtualization.pdf?revision=a765a7df-1a00-434d-b241-357bfda2dd31 Signed-off-by: Gabriel Krisman Bertazi Acked-by: Will Deacon Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20230109151955.8292-1-krisman@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..3d1a5677321e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -864,7 +864,11 @@ static inline bool cpu_has_hw_af(void) if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) return false; - mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + /* + * Use cached version to avoid emulated msr operation on KVM + * guests. + */ + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } -- cgit v1.2.3 From 2198d07c509f1db4a1185d1f65aaada794c6ea59 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 9 Jan 2023 18:47:59 +0100 Subject: arm64: Always load shadow stack pointer directly from the task struct All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index ff7da1268a52..13df982a0808 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -10,15 +10,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm .macro scs_save tsk str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk + .macro scs_load_current .endm .macro scs_save tsk diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..546f7773238e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif 1: - scs_load tsk + scs_load_current .else add x21, sp, #PT_REGS_SIZE get_current_task tsk @@ -848,7 +848,7 @@ SYM_FUNC_START(cpu_switch_to) msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0 - scs_load x1 + scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 952e17bd1c0b..b9c1a506798e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping) stp xzr, xzr, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME - scs_load \tsk + scs_load_current adr_l \tmp1, __per_cpu_offset ldr w\tmp2, [\tsk, #TSK_TI_CPU] -- cgit v1.2.3 From 846b73a4a3d0bf289ee924f6101c4f842ecc5b81 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:00 +0530 Subject: arm64: Add compat hwcap FPHP and ASIMDHP These hwcaps were added earlier for 32-bit native arm kernel by commit c00a19c8b143 ("ARM: 9268/1: vfp: Add hwcap FPHP and ASIMDHP for FEAT_FP16") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar userspace interfaces. Floating point half-precision (FPHP) and Advanced SIMD half-precision (ASIMDHP) represents the Armv8 FP16 feature extension and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-2-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 2 ++ arch/arm64/kernel/cpufeature.c | 6 ++++-- arch/arm64/kernel/cpuinfo.c | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..8d0d412aba3c 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -31,6 +31,8 @@ #define COMPAT_HWCAP_VFPD32 (1 << 19) #define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) +#define COMPAT_HWCAP_FPHP (1 << 22) +#define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..4c589ff7e5ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -444,8 +444,8 @@ static const struct arm64_ftr_bits ftr_mvfr0[] = { static const struct arm64_ftr_bits ftr_mvfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0), @@ -2866,6 +2866,8 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, 3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 379695262b77..195b44a99f5d 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -146,6 +146,8 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae", [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", + [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", + [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) -- cgit v1.2.3 From 27addd402a73cb97b1529ea4e56be7eb82d3c478 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:01 +0530 Subject: arm64: Add compat hwcap ASIMDDP This hwcap was added earlier for 32-bit native arm kernel by commit 62ea0d873af3 ("ARM: 9269/1: vfp: Add hwcap for FEAT_DotProd") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Advanced Dot product is a feature (FEAT_DotProd) present in both AArch32/AArch64 state for Armv8 and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-3-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 8d0d412aba3c..d3d8a992ab05 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -33,6 +33,7 @@ #define COMPAT_HWCAP_EVTSTRM (1 << 21) #define COMPAT_HWCAP_FPHP (1 << 22) #define COMPAT_HWCAP_ASIMDHP (1 << 23) +#define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4c589ff7e5ea..e213a3fcbb9b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -534,7 +534,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2873,6 +2873,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 195b44a99f5d..e94eb1a4f1f4 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -148,6 +148,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", + [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) -- cgit v1.2.3 From 4a87be25b02b33948c293cae47a7f27d8b9bf20f Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:02 +0530 Subject: arm64: Add compat hwcap ASIMDFHM This hwcap was added earlier for 32-bit native arm kernel by commit ce4835497c20 ("ARM: 9270/1: vfp: Add hwcap for FEAT_FHM") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Floating-point half-precision multiplication (FHM) is a feature present in AArch32/AArch64 state for Armv8. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-4-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index d3d8a992ab05..5891e27b840b 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -34,6 +34,7 @@ #define COMPAT_HWCAP_FPHP (1 << 22) #define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP_ASIMDDP (1 << 24) +#define COMPAT_HWCAP_ASIMDFHM (1 << 25) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e213a3fcbb9b..d056b54dbe01 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -533,7 +533,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), ARM64_FTR_END, @@ -2874,6 +2874,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e94eb1a4f1f4..a0fefb451bac 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -149,6 +149,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", + [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) -- cgit v1.2.3 From f64234fa45f47cd757e5eb7a83d54d83480c4fce Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:03 +0530 Subject: arm64: Add compat hwcap ASIMDBF16 This hwcap was added earlier for 32-bit native arm kernel by commit 23b6d4ad6e7a ("ARM: 9271/1: vfp: Add hwcap for FEAT_AA32BF16") and hence the corresponding changes added in 32-bit compat arm64 kernel. Brain 16-bit floating-point storage format is a feature (FEAT_AA32BF16) present in AArch32 state for Armv8 and is represented by ISAR6.BF16 identification register. Similar feature (FEAT_BF16) exist for AArch64 state and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-5-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 5891e27b840b..268aa0e5ec06 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -35,6 +35,7 @@ #define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP_ASIMDFHM (1 << 25) +#define COMPAT_HWCAP_ASIMDBF16 (1 << 26) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d056b54dbe01..1533107a7ad5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -530,7 +530,7 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = { static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), @@ -2875,6 +2875,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index a0fefb451bac..50cfd808b80c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -150,6 +150,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", + [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) -- cgit v1.2.3 From 0864d1e42959b8b1e32efa3fb672fe81ba1dbc6c Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:04 +0530 Subject: arm64: Add compat hwcap I8MM This hwcap was added earlier for 32-bit native arm kernel by commit 956ca3a4eb81 ("ARM: 9272/1: vfp: Add hwcap for FEAT_AA32I8MM") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Int8 matrix multiplication is a feature (FEAT_AA32I8MM) present in AArch32 state of Armv8 and is identified by ISAR6.I8MM register. Similar feature(FEAT_I8MM) exist for AArch64 state and is already advertised in arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-6-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 268aa0e5ec06..da7687efd1ae 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -36,6 +36,7 @@ #define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP_ASIMDFHM (1 << 25) #define COMPAT_HWCAP_ASIMDBF16 (1 << 26) +#define COMPAT_HWCAP_I8MM (1 << 27) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1533107a7ad5..46998bbba6ff 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -529,7 +529,7 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = { }; static const struct arm64_ftr_bits ftr_id_isar6[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), @@ -2876,6 +2876,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 50cfd808b80c..2af73bc14775 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -151,6 +151,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", + [COMPAT_KERNEL_HWCAP(I8MM)] = "i8mm", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) -- cgit v1.2.3 From 2d602aa99abb84c34cc9602c8bd6852895314f74 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:05 +0530 Subject: arm64: Add compat hwcap SB This hwcap was added for 32-bit native arm kernel by commit 3bda6d884897 ("ARM: 9273/1: Add hwcap for Speculation Barrier(SB)") and hence the corresponding changes added in 32-bit compat arm64 kernel. Speculation Barrier is a feature(FEAT_SB) present in both AArch32 and AArch64 state. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-7-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index da7687efd1ae..f2bcf4255f97 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -43,6 +43,7 @@ #define COMPAT_HWCAP2_SHA1 (1 << 2) #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) +#define COMPAT_HWCAP2_SB (1 << 5) #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 46998bbba6ff..8adbafbdf1da 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -532,7 +532,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), @@ -2875,6 +2875,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), #endif diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 2af73bc14775..d2b41f2544f5 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -161,6 +161,7 @@ static const char *const compat_hwcap2_str[] = { [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1", [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", + [COMPAT_KERNEL_HWCAP2(SB)] = "sb", }; #endif /* CONFIG_COMPAT */ -- cgit v1.2.3 From 4f2c9bf16a4bc209a674e7b76d8e829b917c7f84 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:06 +0530 Subject: arm64: Add compat hwcap SSBS This hwcap was added for 32-bit native arm kernel by commit fea53546be57 ("ARM: 9274/1: Add hwcap for Speculative Store Bypassing Safe") and hence the corresponding changes added in 32-bit compat arm64 for similar user interfaces. Speculative Store Bypass Safe is a feature(FEAT_SSBS) present in AArch32/AArch64 state for Armv8 and can be identified by PFR2.SSBS identification register. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-8-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index f2bcf4255f97..2f539a3e3d3a 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -44,6 +44,7 @@ #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) #define COMPAT_HWCAP2_SB (1 << 5) +#define COMPAT_HWCAP2_SSBS (1 << 6) #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8adbafbdf1da..d54c2a003435 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -562,7 +562,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_pfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2878,6 +2878,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index d2b41f2544f5..876cd96c73ea 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -162,6 +162,7 @@ static const char *const compat_hwcap2_str[] = { [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", [COMPAT_KERNEL_HWCAP2(SB)] = "sb", + [COMPAT_KERNEL_HWCAP2(SSBS)] = "ssbs", }; #endif /* CONFIG_COMPAT */ -- cgit v1.2.3 From 47a15aa544279d34e14e17ca3b5855e39b946cec Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:45:59 +0000 Subject: arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT On arm64 we don't align assembly function in the same way as C functions. This somewhat limits the utility of CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B for testing, and adds noise when testing that we're correctly aligning functions as will be necessary for ftrace in subsequent patches. Follow the example of x86, and align assembly functions in the same way as C functions. Selecting FUNCTION_ALIGNMENT_4B ensures CONFIG_FUCTION_ALIGNMENT will be a minimum of 4 bytes, matching the minimum alignment that __ALIGN and __ALIGN_STR provide prior to this patch. I've tested this by selecting CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y, building and booting a kernel, and looking for misaligned text symbols: Before, v6.2-rc3: # uname -rm 6.2.0-rc3 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 5009 Before, v6.2-rc3 + fixed __cold: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 919 Before, v6.2-rc3 + fixed __cold + fixed ACPICA: # uname -rm 6.2.0-rc3-00002-g267bddc38572 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 323 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep acpi | wc -l 0 After: # uname -rm 6.2.0-rc3-00003-g71db61ee3ea1 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 112 Considering the remaining 112 unaligned text symbols: * 20 are non-function KVM NVHE assembly symbols, which are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __kvm_nvhe | wc -l 20 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __kvm_nvhe ffffbe6483f73784 t __kvm_nvhe___invalid ffffbe6483f73788 t __kvm_nvhe___do_hyp_init ffffbe6483f73ab0 t __kvm_nvhe_reset ffffbe6483f73b8c T __kvm_nvhe___hyp_idmap_text_end ffffbe6483f73b8c T __kvm_nvhe___hyp_text_start ffffbe6483f77864 t __kvm_nvhe___host_enter_restore_full ffffbe6483f77874 t __kvm_nvhe___host_enter_for_panic ffffbe6483f778a4 t __kvm_nvhe___host_enter_without_restoring ffffbe6483f81178 T __kvm_nvhe___guest_exit_panic ffffbe6483f811c8 T __kvm_nvhe___guest_exit ffffbe6483f81354 t __kvm_nvhe_abort_guest_exit_start ffffbe6483f81358 t __kvm_nvhe_abort_guest_exit_end ffffbe6483f81830 t __kvm_nvhe_wa_epilogue ffffbe6483f81844 t __kvm_nvhe_el1_trap ffffbe6483f81864 t __kvm_nvhe_el1_fiq ffffbe6483f81864 t __kvm_nvhe_el1_irq ffffbe6483f81884 t __kvm_nvhe_el1_error ffffbe6483f818a4 t __kvm_nvhe_el2_sync ffffbe6483f81920 t __kvm_nvhe_el2_error ffffbe6483f865c8 T __kvm_nvhe___start___kvm_ex_table * 53 are position-independent functions only used during early boot, which are built with '-Os', but are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __pi | wc -l 53 We *could* drop '-Os' when building these for consistency, but that is not necessary to ensure that ftrace works correctly. * The remaining 39 are non-function symbols, and 3 runtime BPF functions, which are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep -v __kvm_nvhe | grep -v __pi | wc -l 39 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep -v __kvm_nvhe | grep -v __pi ffffbe6482e1009c T __irqentry_text_end ffffbe6482e10358 T __softirqentry_text_end ffffbe6482e1435c T __entry_text_end ffffbe6482e825f8 T __guest_exit_panic ffffbe6482e82648 T __guest_exit ffffbe6482e827d4 t abort_guest_exit_start ffffbe6482e827d8 t abort_guest_exit_end ffffbe6482e83030 t wa_epilogue ffffbe6482e83044 t el1_trap ffffbe6482e83064 t el1_fiq ffffbe6482e83064 t el1_irq ffffbe6482e83084 t el1_error ffffbe6482e830a4 t el2_sync ffffbe6482e83120 t el2_error ffffbe6482e93550 T sha256_block_neon ffffbe64830f3ae0 t e843419@01cc_00002a0c_3104 ffffbe648378bd90 t e843419@09b3_0000d7cb_bc4 ffffbe6483bdab20 t e843419@0c66_000116e2_34c8 ffffbe6483f62c94 T __noinstr_text_end ffffbe6483f70a18 T __sched_text_end ffffbe6483f70b2c T __cpuidle_text_end ffffbe6483f722d4 T __lock_text_end ffffbe6483f73b8c T __hyp_idmap_text_end ffffbe6483f73b8c T __hyp_text_start ffffbe6483f865c8 T __start___kvm_ex_table ffffbe6483f870d0 t init_el1 ffffbe6483f870f8 t init_el2 ffffbe6483f87324 t pen ffffbe6483f87b48 T __idmap_text_end ffffbe64848eb010 T __hibernate_exit_text_start ffffbe64848eb124 T __hibernate_exit_text_end ffffbe64848eb124 T __relocate_new_kernel_start ffffbe64848eb260 T __relocate_new_kernel_end ffffbe648498a8e8 T _einittext ffffbe648498a8e8 T __exittext_begin ffffbe6484999d84 T __exittext_end ffff8000080756b4 t bpf_prog_6deef7357e7b4530 [bpf] ffff80000808dd78 t bpf_prog_6deef7357e7b4530 [bpf] ffff80000809d684 t bpf_prog_6deef7357e7b4530 [bpf] Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/linkage.h | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 03934808b2ed..6914f6bf41e2 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -123,6 +123,7 @@ config ARM64 select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER + select FUNCTION_ALIGNMENT_4B select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS_BROADCAST diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1436fa1cde24..d3acd9c87509 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -5,8 +5,8 @@ #include #endif -#define __ALIGN .align 2 -#define __ALIGN_STR ".align 2" +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT +#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT /* * When using in-kernel BTI we need to ensure that PCS-conformant -- cgit v1.2.3 From 2bbbb4015aa1af62f8002de86de019786ecf63a7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:00 +0000 Subject: arm64: insn: Add helpers for BTI In subsequent patches we'd like to check whether an instruction is a BTI. In preparation for this, add basic instruction helpers for BTI instructions. Per ARM DDI 0487H.a section C6.2.41, BTI is encoded in binary as follows, MSB to LSB: 1101 0101 000 0011 0010 0100 xx01 1111 Where the `xx` bits encode J/C/JC: 00 : (omitted) 01 : C 10 : J 11 : JC Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/insn.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index aaf1f52fbf3e..139a88e4e852 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -420,6 +420,7 @@ __AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF) __AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F) __AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F) __AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F) +__AARCH64_INSN_FUNCS(bti, 0xFFFFFF3F, 0xD503241f) #undef __AARCH64_INSN_FUNCS -- cgit v1.2.3 From e4ecbe83fd1a5428d5458de04a3404f1b5444429 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:01 +0000 Subject: arm64: patching: Add aarch64_insn_write_literal_u64() In subsequent patches we'll need to atomically write to a naturally-aligned 64-bit literal embedded within the kernel text. Add a helper for this. For consistency with other text patching code we use copy_to_kernel_nofault(), which is atomic for naturally-aligned accesses up to 64-bits. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/patching.h | 2 ++ arch/arm64/kernel/patching.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h index 6bf5adc56295..68908b82b168 100644 --- a/arch/arm64/include/asm/patching.h +++ b/arch/arm64/include/asm/patching.h @@ -7,6 +7,8 @@ int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); +int aarch64_insn_write_literal_u64(void *addr, u64 val); + int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c index 33e0fabc0b79..b4835f6d594b 100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@ -88,6 +88,23 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) return __aarch64_insn_write(addr, cpu_to_le32(insn)); } +noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val) +{ + u64 *waddr; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &val, sizeof(val)); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; -- cgit v1.2.3 From baaf553d3bc330697c68a00f96cf11f4edfeac7e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:03 +0000 Subject: arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS This patch enables support for DYNAMIC_FTRACE_WITH_CALL_OPS on arm64. This allows each ftrace callsite to provide an ftrace_ops to the common ftrace trampoline, allowing each callsite to invoke distinct tracer functions without the need to fall back to list processing or to allocate custom trampolines for each callsite. This significantly speeds up cases where multiple distinct trace functions are used and callsites are mostly traced by a single tracer. The main idea is to place a pointer to the ftrace_ops as a literal at a fixed offset from the function entry point, which can be recovered by the common ftrace trampoline. Using a 64-bit literal avoids branch range limitations, and permits the ops to be swapped atomically without special considerations that apply to code-patching. In future this will also allow for the implementation of DYNAMIC_FTRACE_WITH_DIRECT_CALLS without branch range limitations by using additional fields in struct ftrace_ops. As noted in the core patch adding support for DYNAMIC_FTRACE_WITH_CALL_OPS, this approach allows for directly invoking ftrace_ops::func even for ftrace_ops which are dynamically-allocated (or part of a module), without going via ftrace_ops_list_func. Currently, this approach is not compatible with CLANG_CFI, as the presence/absence of pre-function NOPs changes the offset of the pre-function type hash, and there's no existing mechanism to ensure a consistent offset for instrumented and uninstrumented functions. When CLANG_CFI is enabled, the existing scheme with a global ops->func pointer is used, and there should be no functional change. I am currently working with others to allow the two to work together in future (though this will liekly require updated compiler support). I've benchamrked this with the ftrace_ops sample module [1], which is not currently upstream, but available at: https://lore.kernel.org/lkml/20230103124912.2948963-1-mark.rutland@arm.com git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git ftrace-ops-sample-20230109 Using that module I measured the total time taken for 100,000 calls to a trivial instrumented function, with a number of tracers enabled with relevant filters (which would apply to the instrumented function) and a number of tracers enabled with irrelevant filters (which would not apply to the instrumented function). I tested on an M1 MacBook Pro, running under a HVF-accelerated QEMU VM (i.e. on real hardware). Before this patch: Number of tracers || Total time | Per-call average time (ns) Relevant | Irrelevant || (ns) | Total | Overhead =========+============++=============+==============+============ 0 | 0 || 94,583 | 0.95 | - 0 | 1 || 93,709 | 0.94 | - 0 | 2 || 93,666 | 0.94 | - 0 | 10 || 93,709 | 0.94 | - 0 | 100 || 93,792 | 0.94 | - ---------+------------++-------------+--------------+------------ 1 | 1 || 6,467,833 | 64.68 | 63.73 1 | 2 || 7,509,708 | 75.10 | 74.15 1 | 10 || 23,786,792 | 237.87 | 236.92 1 | 100 || 106,432,500 | 1,064.43 | 1063.38 ---------+------------++-------------+--------------+------------ 1 | 0 || 1,431,875 | 14.32 | 13.37 2 | 0 || 6,456,334 | 64.56 | 63.62 10 | 0 || 22,717,000 | 227.17 | 226.22 100 | 0 || 103,293,667 | 1032.94 | 1031.99 ---------+------------++-------------+--------------+-------------- Note: per-call overhead is estimated relative to the baseline case with 0 relevant tracers and 0 irrelevant tracers. After this patch Number of tracers || Total time | Per-call average time (ns) Relevant | Irrelevant || (ns) | Total | Overhead =========+============++=============+==============+============ 0 | 0 || 94,541 | 0.95 | - 0 | 1 || 93,666 | 0.94 | - 0 | 2 || 93,709 | 0.94 | - 0 | 10 || 93,667 | 0.94 | - 0 | 100 || 93,792 | 0.94 | - ---------+------------++-------------+--------------+------------ 1 | 1 || 281,000 | 2.81 | 1.86 1 | 2 || 281,042 | 2.81 | 1.87 1 | 10 || 280,958 | 2.81 | 1.86 1 | 100 || 281,250 | 2.81 | 1.87 ---------+------------++-------------+--------------+------------ 1 | 0 || 280,959 | 2.81 | 1.86 2 | 0 || 6,502,708 | 65.03 | 64.08 10 | 0 || 18,681,209 | 186.81 | 185.87 100 | 0 || 103,550,458 | 1,035.50 | 1034.56 ---------+------------++-------------+--------------+------------ Note: per-call overhead is estimated relative to the baseline case with 0 relevant tracers and 0 irrelevant tracers. As can be seen from the above: a) Whenever there is a single relevant tracer function associated with a tracee, the overhead of invoking the tracer is constant, and does not scale with the number of tracers which are *not* associated with that tracee. b) The overhead for a single relevant tracer has dropped to ~1/7 of the overhead prior to this series (from 13.37ns to 1.86ns). This is largely due to permitting calls to dynamically-allocated ftrace_ops without going through ftrace_ops_list_func. I've run the ftrace selftests from v6.2-rc3, which reports: | # of passed: 110 | # of failed: 0 | # of unresolved: 3 | # of untested: 0 | # of unsupported: 0 | # of xfailed: 1 | # of undefined(test bug): 0 ... where the unresolved entries were the tests for DIRECT functions (which are not supported), and the checkbashisms selftest (which is irrelevant here): | [8] Test ftrace direct functions against tracers [UNRESOLVED] | [9] Test ftrace direct functions against kprobes [UNRESOLVED] | [62] Meta-selftest: Checkbashisms [UNRESOLVED] ... with all other tests passing (or failing as expected). Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 + arch/arm64/Makefile | 5 +- arch/arm64/include/asm/ftrace.h | 15 +--- arch/arm64/kernel/asm-offsets.c | 4 + arch/arm64/kernel/entry-ftrace.S | 32 ++++++-- arch/arm64/kernel/ftrace.c | 156 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 195 insertions(+), 20 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6914f6bf41e2..6f6f37161cf6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -124,6 +124,7 @@ config ARM64 select EDAC_SUPPORT select FRAME_POINTER select FUNCTION_ALIGNMENT_4B + select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS_BROADCAST @@ -187,6 +188,8 @@ config ARM64 select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ if $(cc-option,-fpatchable-function-entry=2) + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ + if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ if DYNAMIC_FTRACE_WITH_ARGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d62bd221828f..4c3be442fbb3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -139,7 +139,10 @@ endif CHECKFLAGS += -D__aarch64__ -ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) +ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y) + KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY + CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2 +else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 endif diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5664729800ae..1c2672bbbf37 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -62,20 +62,7 @@ extern unsigned long ftrace_graph_call; extern void return_to_handler(void); -static inline unsigned long ftrace_call_adjust(unsigned long addr) -{ - /* - * Adjust addr to point at the BL in the callsite. - * See ftrace_init_nop() for the callsite sequence. - */ - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) - return addr + AARCH64_INSN_SIZE; - /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. - */ - return addr; -} +unsigned long ftrace_call_adjust(unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS struct dyn_ftrace; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 2234624536d9..ae345b06e9f7 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -193,6 +194,9 @@ int main(void) DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); DEFINE(KIMAGE_START, offsetof(struct kimage, start)); BLANK(); +#endif +#ifdef CONFIG_FUNCTION_TRACER + DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); #endif return 0; } diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 3b625f76ffba..350ed81324ac 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -65,13 +65,35 @@ SYM_CODE_START(ftrace_caller) stp x29, x30, [sp, #FREGS_SIZE] add x29, sp, #FREGS_SIZE - sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) - mov x1, x9 // parent_ip (callsite's LR) - ldr_l x2, function_trace_op // op - mov x3, sp // regs + /* Prepare arguments for the the tracer func */ + sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + mov x1, x9 // parent_ip (callsite's LR) + mov x3, sp // regs + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + /* + * The literal pointer to the ops is at an 8-byte aligned boundary + * which is either 12 or 16 bytes before the BL instruction in the call + * site. See ftrace_call_adjust() for details. + * + * Therefore here the LR points at `literal + 16` or `literal + 20`, + * and we can find the address of the literal in either case by + * aligning to an 8-byte boundary and subtracting 16. We do the + * alignment first as this allows us to fold the subtraction into the + * LDR. + */ + bic x2, x30, 0x7 + ldr x2, [x2, #-16] // op + + ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func + blr x4 // op->func(ip, parent_ip, op, regs) + +#else + ldr_l x2, function_trace_op // op SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) - bl ftrace_stub + bl ftrace_stub // func(ip, parent_ip, op, regs) +#endif /* * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 38ebdf063255..5545fe1a9012 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -60,6 +60,89 @@ int ftrace_regs_query_register_offset(const char *name) } #endif +unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * When using mcount, addr is the address of the mcount call + * instruction, and no adjustment is necessary. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) + return addr; + + /* + * When using patchable-function-entry without pre-function NOPS, addr + * is the address of the first NOP after the function entry point. + * + * The compiler has either generated: + * + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * Or: + * + * addr-04: BTI C + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at `addr + 4` bytes in either case. + * + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return addr + AARCH64_INSN_SIZE; + + /* + * When using patchable-function-entry with pre-function NOPs, addr is + * the address of the first pre-function NOP. + * + * Starting from an 8-byte aligned base, the compiler has either + * generated: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: NOP // To be patched to MOV X9, LR + * addr+12: NOP // To be patched to BL + * + * Or: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: BTI C + * addr+12: NOP // To be patched to MOV X9, LR + * addr+16: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at either addr+12 or addr+16 depending on + * whether there is a BTI. + */ + + if (!IS_ALIGNED(addr, sizeof(unsigned long))) { + WARN_RATELIMIT(1, "Misaligned patch-site %pS\n", + (void *)(addr + 8)); + return 0; + } + + /* Skip the NOPs placed before the function entry point */ + addr += 2 * AARCH64_INSN_SIZE; + + /* Skip any BTI */ + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) { + u32 insn = le32_to_cpu(*(__le32 *)addr); + + if (aarch64_insn_is_bti(insn)) { + addr += AARCH64_INSN_SIZE; + } else if (insn != aarch64_insn_gen_nop()) { + WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n", + (void *)addr, insn); + } + } + + /* Skip the first NOP after function entry */ + addr += AARCH64_INSN_SIZE; + + return addr; +} + /* * Replace a single instruction, which may be a branch or NOP. * If @validate == true, a replaced instruction is checked against 'old'. @@ -98,6 +181,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned long pc; u32 new; + /* + * When using CALL_OPS, the function to call is associated with the + * call site, and we don't have a global function pointer to update. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return 0; + pc = (unsigned long)ftrace_call; new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, AARCH64_INSN_BRANCH_LINK); @@ -176,6 +266,44 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, return true; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec) +{ + const struct ftrace_ops *ops = NULL; + + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + WARN_ON_ONCE(!ops); + } + + if (!ops) + ops = &ftrace_list_ops; + + return ops; +} + +static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, + const struct ftrace_ops *ops) +{ + unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8); + return aarch64_insn_write_literal_u64((void *)literal, + (unsigned long)ops); +} + +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, &ftrace_nop_ops); +} + +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); +} +#else +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; } +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; } +#endif + /* * Turn on the call to ftrace_caller() in instrumented function */ @@ -183,6 +311,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; u32 old, new; + int ret; + + ret = ftrace_rec_update_ops(rec); + if (ret) + return ret; if (!ftrace_find_callable_addr(rec, NULL, &addr)) return -EINVAL; @@ -193,6 +326,19 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(pc, old, new, true); } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller)) + return -EINVAL; + if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller)) + return -EINVAL; + + return ftrace_rec_update_ops(rec); +} +#endif + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS /* * The compiler has inserted two NOPs before the regular function prologue. @@ -220,6 +366,11 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { unsigned long pc = rec->ip - AARCH64_INSN_SIZE; u32 old, new; + int ret; + + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, @@ -237,9 +388,14 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, { unsigned long pc = rec->ip; u32 old = 0, new; + int ret; new = aarch64_insn_gen_nop(); + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; + /* * When using mcount, callsites in modules may have been initalized to * call an arbitrary module PLT (which redirects to the _mcount stub) -- cgit v1.2.3 From 61786170383093908e9f5f8fd8c5c3ff0c3bbe03 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:36 +0100 Subject: efi: arm64: enter with MMU and caches enabled Instead of cleaning the entire loaded kernel image to the PoC and disabling the MMU and caches before branching to the kernel's bare metal entry point, we can leave the MMU and caches enabled, and rely on EFI's cacheable 1:1 mapping of all of system RAM (which is mandated by the spec) to populate the initial page tables. This removes the need for managing coherency in software, which is tedious and error prone. Note that we still need to clean the executable region of the image to the PoU if this is required for I/D coherency, but only if we actually decided to move the image in memory, as otherwise, this will have been taken care of by the loader. This change affects both the builtin EFI stub as well as the zboot decompressor, which now carries the entire EFI stub along with the decompression code and the compressed image. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-7-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 2 + arch/arm64/kernel/image-vars.h | 5 ++- arch/arm64/mm/cache.S | 1 + drivers/firmware/efi/libstub/Makefile | 4 +- drivers/firmware/efi/libstub/arm64-entry.S | 67 ------------------------------ drivers/firmware/efi/libstub/arm64-stub.c | 26 ++++++++---- drivers/firmware/efi/libstub/arm64.c | 41 +++++++++++++++--- 7 files changed, 61 insertions(+), 85 deletions(-) delete mode 100644 drivers/firmware/efi/libstub/arm64-entry.S (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 31d13a6001df..0f0e729b40ef 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -105,6 +105,8 @@ static inline unsigned long efi_get_kimg_min_align(void) #define EFI_ALLOC_ALIGN SZ_64K #define EFI_ALLOC_LIMIT ((1UL << 48) - 1) +extern unsigned long primary_entry_offset(void); + /* * On ARM systems, virtually remapped UEFI runtime services are set up in two * distinct stages: diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..73388b21d07d 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -10,7 +10,7 @@ #error This file should only be included in vmlinux.lds.S #endif -PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); +PROVIDE(__efistub_primary_entry = primary_entry); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to @@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); * linked at. The routines below are all implemented in assembler in a * position independent manner */ -PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc); +PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou); PROVIDE(__efistub__text = _text); PROVIDE(__efistub__end = _end); +PROVIDE(__efistub___inittext_end = __inittext_end); PROVIDE(__efistub__edata = _edata); PROVIDE(__efistub_screen_info = screen_info); PROVIDE(__efistub__ctype = _ctype); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 081058d4e436..503567c864fd 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou) caches_clean_inval_pou_macro ret SYM_FUNC_END(caches_clean_inval_pou) +SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou) /* * caches_clean_inval_user_pou(start,end) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index be8b8c6e8b40..80d85a5169fb 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ screen_info.o efi-stub-entry.o lib-$(CONFIG_ARM) += arm32-stub.o -lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o +lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o @@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS # STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ -STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64 +STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS # For RISC-V, we don't need anything special other than arm64. Keep all the # symbols in .init section and make sure that no absolute symbols references diff --git a/drivers/firmware/efi/libstub/arm64-entry.S b/drivers/firmware/efi/libstub/arm64-entry.S deleted file mode 100644 index b5c17e89a4fc..000000000000 --- a/drivers/firmware/efi/libstub/arm64-entry.S +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * EFI entry point. - * - * Copyright (C) 2013, 2014 Red Hat, Inc. - * Author: Mark Salter - */ -#include -#include - - /* - * The entrypoint of a arm64 bare metal image is at offset #0 of the - * image, so this is a reasonable default for primary_entry_offset. - * Only when the EFI stub is integrated into the core kernel, it is not - * guaranteed that the PE/COFF header has been copied to memory too, so - * in this case, primary_entry_offset should be overridden by the - * linker and point to primary_entry() directly. - */ - .weak primary_entry_offset - -SYM_CODE_START(efi_enter_kernel) - /* - * efi_pe_entry() will have copied the kernel image if necessary and we - * end up here with device tree address in x1 and the kernel entry - * point stored in x0. Save those values in registers which are - * callee preserved. - */ - ldr w2, =primary_entry_offset - add x19, x0, x2 // relocated Image entrypoint - - mov x0, x1 // DTB address - mov x1, xzr - mov x2, xzr - mov x3, xzr - - /* - * Clean the remainder of this routine to the PoC - * so that we can safely disable the MMU and caches. - */ - adr x4, 1f - dc civac, x4 - dsb sy - - /* Turn off Dcache and MMU */ - mrs x4, CurrentEL - cmp x4, #CurrentEL_EL2 - mrs x4, sctlr_el1 - b.ne 0f - mrs x4, sctlr_el2 -0: bic x4, x4, #SCTLR_ELx_M - bic x4, x4, #SCTLR_ELx_C - b.eq 1f - b 2f - - .balign 32 -1: pre_disable_mmu_workaround - msr sctlr_el2, x4 - isb - br x19 // jump to kernel entrypoint - -2: pre_disable_mmu_workaround - msr sctlr_el1, x4 - isb - br x19 // jump to kernel entrypoint - - .org 1b + 32 -SYM_CODE_END(efi_enter_kernel) diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7327b98d8e3f..d4a6b12a8741 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, efi_handle_t image_handle) { efi_status_t status; - unsigned long kernel_size, kernel_memsize = 0; + unsigned long kernel_size, kernel_codesize, kernel_memsize; u32 phys_seed = 0; u64 min_kimg_align = efi_get_kimg_min_align(); @@ -93,6 +93,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, SEGMENT_ALIGN >> 10); kernel_size = _edata - _text; + kernel_codesize = __inittext_end - _text; kernel_memsize = kernel_size + (_end - _edata); *reserve_size = kernel_memsize; @@ -121,7 +122,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, */ *image_addr = (u64)_text; *reserve_size = 0; - goto clean_image_to_poc; + return EFI_SUCCESS; } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, @@ -137,14 +138,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, *image_addr = *reserve_addr; memcpy((void *)*image_addr, _text, kernel_size); + caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize); -clean_image_to_poc: + return EFI_SUCCESS; +} + +asmlinkage void primary_entry(void); + +unsigned long primary_entry_offset(void) +{ /* - * Clean the copied Image to the PoC, and ensure it is not shadowed by - * stale icache entries from before relocation. + * When built as part of the kernel, the EFI stub cannot branch to the + * kernel proper via the image header, as the PE/COFF header is + * strictly not part of the in-memory presentation of the image, only + * of the file representation. So instead, we need to jump to the + * actual entrypoint in the .text region of the image. */ - dcache_clean_poc(*image_addr, *image_addr + kernel_size); - asm("ic ialluis"); - - return EFI_SUCCESS; + return (char *)primary_entry - _text; } diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index ff2d18c42ee7..f5da4fbccd86 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -56,6 +56,12 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; } +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE +#define DCTYPE "civac" +#else +#define DCTYPE "cvau" +#endif + void efi_cache_sync_image(unsigned long image_base, unsigned long alloc_size, unsigned long code_size) @@ -64,13 +70,38 @@ void efi_cache_sync_image(unsigned long image_base, u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr, CTR_EL0_DminLine_SHIFT); - do { - asm("dc civac, %0" :: "r"(image_base)); - image_base += lsize; - alloc_size -= lsize; - } while (alloc_size >= lsize); + /* only perform the cache maintenance if needed for I/D coherency */ + if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) { + do { + asm("dc " DCTYPE ", %0" :: "r"(image_base)); + image_base += lsize; + code_size -= lsize; + } while (code_size >= lsize); + } asm("ic ialluis"); dsb(ish); isb(); } + +unsigned long __weak primary_entry_offset(void) +{ + /* + * By default, we can invoke the kernel via the branch instruction in + * the image header, so offset #0. This will be overridden by the EFI + * stub build that is linked into the core kernel, as in that case, the + * image header may not have been loaded into memory, or may be mapped + * with non-executable permissions. + */ + return 0; +} + +void __noreturn efi_enter_kernel(unsigned long entrypoint, + unsigned long fdt_addr, + unsigned long fdt_size) +{ + void (* __noreturn enter_kernel)(u64, u64, u64, u64); + + enter_kernel = (void *)entrypoint + primary_entry_offset(); + enter_kernel(fdt_addr, 0, 0, 0); +} -- cgit v1.2.3 From 2ced0f30a426c7301350681f838344d5aea711e3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 25 Jan 2023 19:59:10 +0100 Subject: arm64: head: Switch endianness before populating the ID map Ensure that the endianness used for populating the ID map matches the endianness that the running kernel will be using, as this is no longer guaranteed now that create_idmap() is invoked before init_kernel_el(). Note that doing so is only safe if the MMU is off, as switching the endianness with the MMU on results in the active ID map to become invalid. So also clear the M bit when toggling the EE bit in SCTLR, and mark the MMU as disabled at boot. Note that the same issue has resulted in preserve_boot_args() recording the contents of registers X0 ... X3 in the wrong byte order, although this is arguably a very minor concern. Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on") Reported-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel Tested-by: Nathan Chancellor Link: https://lore.kernel.org/r/20230125185910.962733-1-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 3 ++- arch/arm64/kernel/head.S | 23 ++++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..2facd7933948 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -575,6 +575,7 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -583,7 +584,7 @@ #define SCTLR_ELx_LSMAOE (BIT(29)) #define SCTLR_ELx_nTLSMD (BIT(28)) #define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) #define SCTLR_ELx_EIS (BIT(22)) #define SCTLR_ELx_IESB (BIT(21)) #define SCTLR_ELx_TSCXT (BIT(20)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index dc56e1d8f36e..107a2b87577c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state) mrs x19, sctlr_el1 b.ne 0f mrs x19, sctlr_el2 -0: tst x19, #SCTLR_ELx_C // Z := (C == 0) +0: +CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) +CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) + tst x19, #SCTLR_ELx_C // Z := (C == 0) and x19, x19, #SCTLR_ELx_M // isolate M bit csel x19, xzr, x19, eq // clear x19 if Z ret + + /* + * Set the correct endianness early so all memory accesses issued + * before init_kernel_el() occur in the correct byte order. Note that + * this means the MMU must be disabled, or the active ID map will end + * up getting interpreted with the wrong byte order. + */ +1: eor x19, x19, #SCTLR_ELx_EE + bic x19, x19, #SCTLR_ELx_M + b.ne 2f + pre_disable_mmu_workaround + msr sctlr_el2, x19 + b 3f + pre_disable_mmu_workaround +2: msr sctlr_el1, x19 +3: isb + mov x19, xzr + ret SYM_CODE_END(record_mmu_state) /* -- cgit v1.2.3 From bb457bddee41077768759f6706fa641227877fa0 Mon Sep 17 00:00:00 2001 From: Prathu Baronia Date: Mon, 23 Jan 2023 16:36:38 +0530 Subject: arm64: el2_setup.h: fix spelling typo in comments - "evailable" -> "available" Signed-off-by: Prathu Baronia Reviewed-by: Mukesh Ojha Link: https://lore.kernel.org/r/20230123110639.10473-1-prathubaronia2011@gmail.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/el2_setup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..40567b9bb019 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -177,7 +177,7 @@ /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as - * if VHE was not evailable. The kernel context will be upgraded to VHE + * if VHE was not available. The kernel context will be upgraded to VHE * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. -- cgit v1.2.3 From 004fc58f917cfea5d7190139e3ed1b7a13e39c25 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 30 Jan 2023 17:44:57 +0530 Subject: arm64/mm: Intercept pfn changes in set_pte_at() Changing pfn on a user page table mapped entry, without first going through break-before-make (BBM) procedure is unsafe. This just updates set_pte_at() to intercept such changes, via an updated pgattr_change_is_safe(). This new check happens via __check_racy_pte_update(), which has now been renamed as __check_safe_pte_update(). Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Acked-by: Mark Rutland Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20230130121457.1607675-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 8 ++++++-- arch/arm64/mm/mmu.c | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b4bbeed80fb6..832c9c8fb58f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } extern void __sync_icache_dcache(pte_t pteval); +bool pgattr_change_is_safe(u64 old, u64 new); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval); * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) */ -static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, +static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, pte_t pte) { pte_t old_pte; @@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", __func__, pte_val(old_pte), pte_val(pte)); + VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), + "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", + __func__, pte_val(old_pte), pte_val(pte)); } static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, @@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, mte_sync_tags(old_pte, pte); } - __check_racy_pte_update(mm, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); set_pte(ptep, pte); } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 14c87e8d69d8..34d5f7c4c64e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -133,7 +133,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift) return phys; } -static bool pgattr_change_is_safe(u64 old, u64 new) +bool pgattr_change_is_safe(u64 old, u64 new) { /* * The following mapping attributes may be updated in live @@ -142,9 +142,13 @@ static bool pgattr_change_is_safe(u64 old, u64 new) pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ - if (old == 0 || new == 0) + if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) return true; + /* A live entry's pfn should not change */ + if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) + return false; + /* live contiguous mappings may not be manipulated at all */ if ((old | new) & PTE_CONT) return false; -- cgit v1.2.3 From c888b7bd916c4d85e06a6c3e507d5d68b229518f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:26 +0000 Subject: arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING Subsequent patches will add more GIC-related cpucaps. When we do so, it would be nice to give them a consistent HAS_GIC_* prefix. In preparation for doing so, this patch renames the existing ARM64_HAS_IRQ_PRIO_MASKING cap to ARM64_HAS_GIC_PRIO_MASKING. The cpucaps file was hand-modified; all other changes were scripted with: find . -type f -name '*.[chS]' -print0 | \ xargs -0 sed -i 's/ARM64_HAS_IRQ_PRIO_MASKING/ARM64_HAS_GIC_PRIO_MASKING/' There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/irqflags.h | 10 +++++----- arch/arm64/include/asm/ptrace.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/tools/cpucaps | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..c50928398e4b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -806,7 +806,7 @@ static inline bool system_has_full_ptr_auth(void) static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && - cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); + cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } static inline bool system_supports_mte(void) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b57b9b1e4344..f51653fb90e4 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -35,7 +35,7 @@ static inline void arch_local_irq_enable(void) asm volatile(ALTERNATIVE( "msr daifclr, #3 // arch_local_irq_enable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); @@ -54,7 +54,7 @@ static inline void arch_local_irq_disable(void) asm volatile(ALTERNATIVE( "msr daifset, #3 // arch_local_irq_disable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); @@ -70,7 +70,7 @@ static inline unsigned long arch_local_save_flags(void) asm volatile(ALTERNATIVE( "mrs %0, daif", __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (flags) : : "memory"); @@ -85,7 +85,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) asm volatile(ALTERNATIVE( "and %w0, %w1, #" __stringify(PSR_I_BIT), "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (res) : "r" ((int) flags) : "memory"); @@ -122,7 +122,7 @@ static inline void arch_local_irq_restore(unsigned long flags) asm volatile(ALTERNATIVE( "msr daif, %0", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" (flags) : "memory"); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41b332c054ab..47ec58031f11 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,7 +194,7 @@ struct pt_regs { u32 unused2; #endif u64 sdei_ttbr1; - /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ad2a1f5503f3..afd547a5309c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2534,7 +2534,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * Depends on having GICv3 */ .desc = "IRQ priority masking", - .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .capability = ARM64_HAS_GIC_PRIO_MASKING, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, .sys_reg = SYS_ID_AA64PFR0_EL1, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..e2d1d3d5de1d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -312,7 +312,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Save pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET @@ -337,7 +337,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Restore pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 mrs_s x21, SYS_ICC_CTLR_EL1 diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 373eb148498e..c993d43624b3 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -29,7 +29,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS -HAS_IRQ_PRIO_MASKING +HAS_GIC_PRIO_MASKING HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD -- cgit v1.2.3 From 8bf0a8048b155eebc05aa8896f4c378a4c538214 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:28 +0000 Subject: arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap When Priority Mask Hint Enable (PMHE) == 0b1, the GIC may use the PMR value to determine whether to signal an IRQ to a PE, and consequently after a change to the PMR value, a DSB SY may be required to ensure that interrupts are signalled to a CPU in finite time. When PMHE == 0b0, interrupts are always signalled to the relevant PE, and all masking occurs locally, without requiring a DSB SY. Since commit: f226650494c6aa87 ("arm64: Relax ICC_PMR_EL1 accesses when ICC_CTLR_EL1.PMHE is clear") ... we handle this dynamically: in most cases a static key is used to determine whether to issue a DSB SY, but the entry code must read from ICC_CTLR_EL1 as static keys aren't accessible from plain assembly. It would be much nicer to use an alternative instruction sequence for the DSB, as this would avoid the need to read from ICC_CTLR_EL1 in the entry code, and for most other code this will result in simpler code generation with fewer instructions and fewer branches. This patch adds a new ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap which is only set when ICC_CTLR_EL1.PMHE == 0b0 (and GIC priority masking is in use). This allows us to replace the existing users of the `gic_pmr_sync` static key with alternative sequences which default to a DSB SY and are relaxed to a NOP when PMHE is not in use. The entry assembly management of the PMR is slightly restructured to use a branch (rather than multiple NOPs) when priority masking is not in use. This is more in keeping with other alternatives in the entry assembly, and permits the use of a separate alternatives for the PMHE-dependent DSB SY (and removal of the conditional branch this currently requires). For consistency I've adjusted both the save and restore paths. According to bloat-o-meter, when building defconfig + CONFIG_ARM64_PSEUDO_NMI=y this shrinks the kernel text by ~4KiB: | add/remove: 4/2 grow/shrink: 42/310 up/down: 332/-5032 (-4700) The resulting vmlinux is ~66KiB smaller, though the resulting Image size is unchanged due to padding and alignment: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137508344 Jan 17 14:11 vmlinux-after | -rwxr-xr-x 1 mark mark 137575440 Jan 17 13:49 vmlinux-before | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38777344 Jan 17 14:11 Image-after | -rw-r--r-- 1 mark mark 38777344 Jan 17 13:49 Image-before Prior to this patch we did not verify the state of ICC_CTLR_EL1.PMHE on secondary CPUs. As of this patch this is verified by the cpufeature code when using GIC priority masking (i.e. when using pseudo-NMIs). Note that since commit: 7e3a57fa6ca831fa ("arm64: Document ICC_CTLR_EL3.PMHE setting requirements") ... Documentation/arm64/booting.rst specifies: | - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across | all CPUs the kernel is executing on, and must stay constant | for the lifetime of the kernel. ... so that should not adversely affect any compliant systems, and as we'll only check for the absense of PMHE when using pseudo-NMIs, this will only fire when such mismatch will adversely affect the system. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm/include/asm/arch_gicv3.h | 5 +++++ arch/arm64/include/asm/arch_gicv3.h | 5 +++++ arch/arm64/include/asm/barrier.h | 11 +++++++---- arch/arm64/kernel/cpufeature.c | 36 ++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 25 ++++++++++++++++--------- arch/arm64/kernel/image-vars.h | 2 -- arch/arm64/tools/cpucaps | 1 + drivers/irqchip/irq-gic-v3.c | 19 +------------------ 8 files changed, 71 insertions(+), 33 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index f82a819eb0db..311e83038bdb 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -252,5 +252,10 @@ static inline void gic_arch_enable_irqs(void) WARN_ON_ONCE(true); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return false; +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 48d4473e8eee..01281a5336cf 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void) asm volatile ("msr daifclr, #3" : : : "memory"); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2cfc4245d2e2..3dd8982a9ce3 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -11,6 +11,8 @@ #include +#include + #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -41,10 +43,11 @@ #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ - extern struct static_key_false gic_pmr_sync; \ - \ - if (static_branch_unlikely(&gic_pmr_sync)) \ - dsb(sy); \ + asm volatile( \ + ALTERNATIVE_CB("dsb sy", \ + ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \ + alt_cb_patch_nops) \ + ); \ } while(0) #else #define pmr_sync() do {} while (0) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 515975f42d03..bdabfc98226a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2056,6 +2056,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, return enable_pseudo_nmi; } + +static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * If we're not using priority masking then we won't be poking PMR_EL1, + * and there's no need to relax synchronization of writes to it, and + * ICC_CTLR_EL1 might not be accessible and we must avoid reads from + * that. + * + * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); + if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) + return false; + + /* + * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a + * hint for interrupt distribution, a DSB is not necessary when + * unmasking IRQs via PMR, and we can relax the barrier to a NOP. + * + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; +} #endif #ifdef CONFIG_ARM64_BTI @@ -2546,6 +2574,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, }, + { + /* + * Depends on ARM64_HAS_GIC_PRIO_MASKING + */ + .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_gic_prio_relaxed_sync, + }, #endif #ifdef CONFIG_ARM64_E0PD { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e2d1d3d5de1d..8427cdc0cfcb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -311,13 +311,16 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Save pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 -alternative_else_nop_endif + +.Lskip_pmr_save\@: #endif /* @@ -336,15 +339,19 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Restore pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 - mrs_s x21, SYS_ICC_CTLR_EL1 - tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE - dsb sy // Ensure priority change is seen by redistributor -.L__skip_pmr_sync\@: + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy alternative_else_nop_endif + +.Lskip_pmr_restore\@: #endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..97e750a35f70 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -67,9 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -/* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI -KVM_NVHE_ALIAS(gic_pmr_sync); /* Static key checked in GIC_PRIO_IRQOFF. */ KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c993d43624b3..10ce8f88f86b 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -30,6 +30,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS HAS_GIC_PRIO_MASKING +HAS_GIC_PRIO_RELAXED_SYNC HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 997104d4338e..3779836737c8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -89,15 +89,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); -/* - * Global static key controlling whether an update to PMR allowing more - * interrupts requires to be propagated to the redistributor (DSB SY). - * And this needs to be exported for modules to be able to enable - * interrupts... - */ -DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); -EXPORT_SYMBOL(gic_pmr_sync); - DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); @@ -1768,16 +1759,8 @@ static void gic_enable_nmi_support(void) for (i = 0; i < gic_data.ppi_nr; i++) refcount_set(&ppi_nmi_refs[i], 0); - /* - * Linux itself doesn't use 1:N distribution, so has no need to - * set PMHE. The only reason to have it set is if EL3 requires it - * (and we can't change it). - */ - if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) - static_branch_enable(&gic_pmr_sync); - pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", - static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed"); + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); /* * How priority values are used by the GIC depends on two things: -- cgit v1.2.3 From a5f61cc636f48bdf09450dba72c0bc914f9eed2f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:29 +0000 Subject: arm64: irqflags: use alternative branches for pseudo-NMI logic Due to the way we use alternatives in the irqflags code, even when CONFIG_ARM64_PSEUDO_NMI=n, we generate unused alternative code for pseudo-NMI management. This patch reworks the irqflags code to remove the redundant code when CONFIG_ARM64_PSEUDO_NMI=n, which benefits the more common case, and will permit further rework of our DAIF management (e.g. in preparation for ARMv8.8-A's NMI feature). Prior to this patch a defconfig kernel has hundreds of redundant instructions to access ICC_PMR_EL1 (which should only need to be manipulated in setup code), which this patch removes: | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-before-defconfig | grep icc_pmr_el1 | wc -l | 885 | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-after-defconfig | grep icc_pmr_el1 | wc -l | 5 Those instructions alone account for more than 3KiB of kernel text, and will be associated with additional alt_instr entries, padding and branches, etc. These redundant instructions exist because we use alternative sequences for to choose between DAIF / PMR management in irqflags.h, and even when CONFIG_ARM64_PSEUDO_NMI=n, those alternative sequences will generate the code for PMR management, along with alt_instr entries. We use alternatives here as this was necessary to ensure that we never encounter a mismatched local_irq_save() ... local_irq_restore() sequence in the middle of patching, which was possible to see if we used static keys to choose between DAIF and PMR management. Since commit: 21fb26bfb01ffe0d ("arm64: alternatives: add alternative_has_feature_*()") ... we have a mechanism to use alternatives similarly to static keys, allowing us to write the bulk of the logic in C code while also being able to rely on all sites being patched in one go, and avoiding a mismatched mismatched local_irq_save() ... local_irq_restore() sequence during patching. This patch rewrites arm64's local_irq_*() functions to use alternative branches. This allows for the pseudo-NMI code to be entirely elided when CONFIG_ARM64_PSEUDO_NMI=n, making a defconfig Image 64KiB smaller, and not affectint the size of an Image with CONFIG_ARM64_PSEUDO_NMI=y: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137473432 Jan 18 11:11 vmlinux-after-defconfig | -rwxr-xr-x 1 mark mark 137918776 Jan 18 11:15 vmlinux-after-pnmi | -rwxr-xr-x 1 mark mark 137380152 Jan 18 11:03 vmlinux-before-defconfig | -rwxr-xr-x 1 mark mark 137523704 Jan 18 11:08 vmlinux-before-pnmi | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38646272 Jan 18 11:11 Image-after-defconfig | -rw-r--r-- 1 mark mark 38777344 Jan 18 11:14 Image-after-pnmi | -rw-r--r-- 1 mark mark 38711808 Jan 18 11:03 Image-before-defconfig | -rw-r--r-- 1 mark mark 38777344 Jan 18 11:08 Image-before-pnmi Some sensitive code depends on being run with interrupts enabled or with interrupts disabled, and so when enabling or disabling interrupts we must ensure that the compiler does not move such code around the actual enable/disable. Before this patch, that was ensured by the combined asm volatile blocks having memory clobbers (and any sensitive code either being asm volatile, or touching memory). This patch consistently uses explicit barrier() operations before and after the enable/disable, which allows us to use the usual sysreg accessors (which are asm volatile) to manipulate the interrupt masks. The use of pmr_sync() is pulled within this critical section for consistency. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/irqflags.h | 191 ++++++++++++++++++++++++++------------ 1 file changed, 132 insertions(+), 59 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index f51653fb90e4..e0f5f6b73edd 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -21,43 +21,77 @@ * exceptions should be unmasked. */ -/* - * CPU interrupt mask handling. - */ -static inline void arch_local_irq_enable(void) +static __always_inline bool __irqflags_uses_pmr(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); +} +static __always_inline void __daif_local_irq_enable(void) +{ + barrier(); + asm volatile("msr daifclr, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifclr, #3 // arch_local_irq_enable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQON) - : "memory"); - + barrier(); + write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1); pmr_sync(); + barrier(); } -static inline void arch_local_irq_disable(void) +static inline void arch_local_irq_enable(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_enable(); + } else { + __daif_local_irq_enable(); + } +} +static __always_inline void __daif_local_irq_disable(void) +{ + barrier(); + asm volatile("msr daifset, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifset, #3 // arch_local_irq_disable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQOFF) - : "memory"); + barrier(); + write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1); + barrier(); +} + +static inline void arch_local_irq_disable(void) +{ + if (__irqflags_uses_pmr()) { + __pmr_local_irq_disable(); + } else { + __daif_local_irq_disable(); + } +} + +static __always_inline unsigned long __daif_local_save_flags(void) +{ + return read_sysreg(daif); +} + +static __always_inline unsigned long __pmr_local_save_flags(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); } /* @@ -65,69 +99,108 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { - unsigned long flags; + if (__irqflags_uses_pmr()) { + return __pmr_local_save_flags(); + } else { + return __daif_local_save_flags(); + } +} - asm volatile(ALTERNATIVE( - "mrs %0, daif", - __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_GIC_PRIO_MASKING) - : "=&r" (flags) - : - : "memory"); +static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} - return flags; +static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) +{ + return flags != GIC_PRIO_IRQON; } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { - int res; + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled_flags(flags); + } else { + return __daif_irqs_disabled_flags(flags); + } +} - asm volatile(ALTERNATIVE( - "and %w0, %w1, #" __stringify(PSR_I_BIT), - "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_GIC_PRIO_MASKING) - : "=&r" (res) - : "r" ((int) flags) - : "memory"); +static __always_inline bool __daif_irqs_disabled(void) +{ + return __daif_irqs_disabled_flags(__daif_local_save_flags()); +} - return res; +static __always_inline bool __pmr_irqs_disabled(void) +{ + return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); } -static inline int arch_irqs_disabled(void) +static inline bool arch_irqs_disabled(void) { - return arch_irqs_disabled_flags(arch_local_save_flags()); + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled(); + } else { + return __daif_irqs_disabled(); + } } -static inline unsigned long arch_local_irq_save(void) +static __always_inline unsigned long __daif_local_irq_save(void) { - unsigned long flags; + unsigned long flags = __daif_local_save_flags(); + + __daif_local_irq_disable(); + + return flags; +} - flags = arch_local_save_flags(); +static __always_inline unsigned long __pmr_local_irq_save(void) +{ + unsigned long flags = __pmr_local_save_flags(); /* * There are too many states with IRQs disabled, just keep the current * state if interrupts are already disabled/masked. */ - if (!arch_irqs_disabled_flags(flags)) - arch_local_irq_disable(); + if (!__pmr_irqs_disabled_flags(flags)) + __pmr_local_irq_disable(); return flags; } +static inline unsigned long arch_local_irq_save(void) +{ + if (__irqflags_uses_pmr()) { + return __pmr_local_irq_save(); + } else { + return __daif_local_irq_save(); + } +} + +static __always_inline void __daif_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg(flags, daif); + barrier(); +} + +static __always_inline void __pmr_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg_s(flags, SYS_ICC_PMR_EL1); + pmr_sync(); + barrier(); +} + /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile(ALTERNATIVE( - "msr daif, %0", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" (flags) - : "memory"); - - pmr_sync(); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_restore(flags); + } else { + __daif_local_irq_restore(flags); + } } #endif /* __ASM_IRQFLAGS_H */ -- cgit v1.2.3