From d798bc6f3c174c61837862cb9778d73cccd92a8e Mon Sep 17 00:00:00 2001 From: James Clark Date: Fri, 22 Nov 2024 16:46:35 +0000 Subject: arm64: Fix usage of new shifted MDCR_EL2 values Since the linked fixes commit, these masks are already shifted so remove the shifts. One issue that this fixes is SPE and TRBE not being available anymore: arm_spe_pmu arm,spe-v1: profiling buffer owned by higher exception level Fixes: 641630313e9c ("arm64: sysreg: Migrate MDCR_EL2 definition to table") Signed-off-by: James Clark Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241122164636.2944180-1-james.clark@linaro.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 4 ++-- arch/arm64/kernel/hyp-stub.S | 4 ++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 4cd41464be3f..f134907d3c08 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -79,7 +79,7 @@ 1 << PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter .Lskip_spe_el2_\@: - mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) + mov x0, #MDCR_EL2_E2PB_MASK orr x2, x2, x0 // If we don't have VHE, then // use EL1&0 translation. @@ -92,7 +92,7 @@ and x0, x0, TRBIDR_EL1_P cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 - mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) + mov x0, #MDCR_EL2_E2TB_MASK orr x2, x2, x0 // allow the EL1&0 translation // to own it. diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 65f76064c86b..ae990da1eae5 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -114,8 +114,8 @@ SYM_CODE_START_LOCAL(__finalise_el2) // Use EL2 translations for SPE & TRBE and disable access from EL1 mrs x0, mdcr_el2 - bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) - bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) + bic x0, x0, #MDCR_EL2_E2PB_MASK + bic x0, x0, #MDCR_EL2_E2TB_MASK msr mdcr_el2, x0 // Transfer the MM state from EL1 to EL2 diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 01616c39a810..071993c16de8 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -126,7 +126,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) /* Trap SPE */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPMS; - mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; + mdcr_clear |= MDCR_EL2_E2PB_MASK; } /* Trap Trace Filter */ @@ -143,7 +143,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) /* Trap External Trace */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids)) - mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT; + mdcr_clear |= MDCR_EL2_E2TB_MASK; vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 &= ~mdcr_clear; -- cgit v1.2.3 From 6fc3a49f23856fdf155ab35f2244295f7870bf83 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 25 Nov 2024 09:47:56 +0000 Subject: KVM: arm64: Fix S1/S2 combination when FWB==1 and S2 has Device memory type The G.a revision of the ARM ARM had it pretty clear that HCR_EL2.FWB had no influence on "The way that stage 1 memory types and attributes are combined with stage 2 Device type and attributes." (D5.5.5). However, this wording was lost in further revisions of the architecture. Restore the intended behaviour, which is to take the strongest memory type of S1 and S2 in this case, as if FWB was 0. The specification is being fixed accordingly. Fixes: be04cebf3e788 ("KVM: arm64: nv: Add emulation of AT S12E{0,1}{R,W}") Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20241125094756.609590-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/at.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c index 8c5d7990e5b3..3d7eb395e33d 100644 --- a/arch/arm64/kvm/at.c +++ b/arch/arm64/kvm/at.c @@ -739,8 +739,15 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par, final_attr = s1_parattr; break; default: - /* MemAttr[2]=0, Device from S2 */ - final_attr = s2_memattr & GENMASK(1,0) << 2; + /* + * MemAttr[2]=0, Device from S2. + * + * FWB does not influence the way that stage 1 + * memory types and attributes are combined + * with stage 2 Device type and attributes. + */ + final_attr = min(s2_memattr_to_attr(s2_memattr), + s1_parattr); } } else { /* Combination of R_HMNDG, R_TNHFM and R_GQFSF */ -- cgit v1.2.3 From 03c7527e97f73081633d773f9f8c2373f9854b25 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 3 Dec 2024 19:02:36 +0000 Subject: KVM: arm64: Do not allow ID_AA64MMFR0_EL1.ASIDbits to be overridden Catalin reports that a hypervisor lying to a guest about the size of the ASID field may result in unexpected issues: - if the underlying HW does only supports 8 bit ASIDs, the ASID field in a TLBI VAE1* operation is only 8 bits, and the HW will ignore the other 8 bits - if on the contrary the HW is 16 bit capable, the ASID field in the same TLBI operation is always 16 bits, irrespective of the value of TCR_ELx.AS. This could lead to missed invalidations if the guest was lead to assume that the HW had 8 bit ASIDs while they really are 16 bit wide. In order to avoid any potential disaster that would be hard to debug, prenent the migration between a host with 8 bit ASIDs to one with wider ASIDs (the converse was obviously always forbidden). This is also consistent with what we already do for VMIDs. If it becomes absolutely mandatory to support such a migration path in the future, we will have to trap and emulate all TLBIs, something that nobody should look forward to. Fixes: d5a32b60dc18 ("KVM: arm64: Allow userspace to change ID_AA64MMFR{0-2}_EL1") Reported-by: Catalin Marinas Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: James Morse Cc: Oliver Upton Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20241203190236.505759-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 83c6b4a07ef5..e2a5c2918d9e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2618,7 +2618,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 | ID_AA64MMFR0_EL1_TGRAN4_2 | ID_AA64MMFR0_EL1_TGRAN64_2 | - ID_AA64MMFR0_EL1_TGRAN16_2)), + ID_AA64MMFR0_EL1_TGRAN16_2 | + ID_AA64MMFR0_EL1_ASIDBITS)), ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | ID_AA64MMFR1_EL1_HCX | ID_AA64MMFR1_EL1_TWED | -- cgit v1.2.3 From be7e611274224b23776469d7f7ce50e25ac53142 Mon Sep 17 00:00:00 2001 From: Keisuke Nishimura Date: Sat, 30 Nov 2024 15:49:53 +0100 Subject: KVM: arm64: vgic-its: Add error handling in vgic_its_cache_translation The return value of xa_store() needs to be checked. This fix adds an error handling path that resolves the kref inconsistency on failure. As suggested by Oliver Upton, this function does not return the error code intentionally because the translation cache is best effort. Fixes: 8201d1028caa ("KVM: arm64: vgic-its: Maintain a translation cache per ITS") Signed-off-by: Keisuke Nishimura Suggested-by: Oliver Upton Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241130144952.23729-1-keisuke.nishimura@inria.fr Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-its.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index f4c4494645c3..fb96802799c6 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -608,12 +608,22 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, lockdep_assert_held(&its->its_lock); vgic_get_irq_kref(irq); + old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); + + /* + * Put the reference taken on @irq if the store fails. Intentionally do + * not return the error as the translation cache is best effort. + */ + if (xa_is_err(old)) { + vgic_put_irq(kvm, irq); + return; + } + /* * We could have raced with another CPU caching the same * translation behind our back, ensure we don't leak a * reference if that is the case. */ - old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); if (old) vgic_put_irq(kvm, old); } -- cgit v1.2.3