diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/cpu.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 99 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 46 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_nested.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_pgtable.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 5 |
11 files changed, 132 insertions, 34 deletions
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index b1e43f56ee46..6c13fd47e170 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -56,6 +56,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; u64 reg_id_aa64mmfr3; + u64 reg_id_aa64mmfr4; u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; u64 reg_id_aa64zfr0; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index bd8d4ca81a48..d7c6988188d9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -363,6 +363,7 @@ struct arm64_cpu_capabilities { u8 field_pos; u8 field_width; u8 min_field_value; + u8 max_field_value; u8 hwcap_type; bool sign; unsigned long hwcap; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 3c6f8ba1e479..a1769e415d72 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -102,9 +102,7 @@ #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -#define HCRX_GUEST_FLAGS \ - (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \ - (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0)) +#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index b804fe832184..debc3753d2ef 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -209,7 +209,8 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu) static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) { - return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H; + return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) || + (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H)); } static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 21c57b812569..6883963bbc3a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) return index; } +struct kvm_sysreg_masks; + +enum fgt_group_id { + __NO_FGT_GROUP__, + HFGxTR_GROUP, + HDFGRTR_GROUP, + HDFGWTR_GROUP = HDFGRTR_GROUP, + HFGITR_GROUP, + HAFGRTR_GROUP, + + /* Must be last */ + __NR_FGT_GROUP_IDS__ +}; + struct kvm_arch { struct kvm_s2_mmu mmu; + /* + * Fine-Grained UNDEF, mimicking the FGT layout defined by the + * architecture. We track them globally, as we present the + * same feature-set to all vcpus. + * + * Index 0 is currently spare. + */ + u64 fgu[__NR_FGT_GROUP_IDS__]; + /* Interrupt controller */ struct vgic_dist vgic; @@ -274,6 +297,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 /* Initial ID reg values loaded */ #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 + /* Fine-Grained UNDEF initialised */ +#define KVM_ARCH_FLAG_FGU_INITIALIZED 8 unsigned long flags; /* VM-wide vCPU feature set */ @@ -294,6 +319,9 @@ struct kvm_arch { /* PMCR_EL0.N value for the guest */ u8 pmcr_n; + /* Iterator for idreg debugfs */ + u8 idreg_debugfs_iter; + /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; struct maple_tree smccc_filter; @@ -312,6 +340,9 @@ struct kvm_arch { #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) u64 id_regs[KVM_ARM_ID_REG_NUM]; + /* Masks for VNCR-baked sysregs */ + struct kvm_sysreg_masks *sysreg_masks; + /* * For an untrusted host VM, 'pkvm.handle' is used to lookup * the associated pKVM instance in the hypervisor. @@ -474,6 +505,13 @@ enum vcpu_sysreg { NR_SYS_REGS /* Nothing after this line! */ }; +struct kvm_sysreg_masks { + struct { + u64 res0; + u64 res1; + } mask[NR_SYS_REGS - __VNCR_START__]; +}; + struct kvm_cpu_context { struct user_pt_regs regs; /* sp = sp_el0 */ @@ -549,6 +587,7 @@ struct kvm_vcpu_arch { /* Values of trap registers for the guest. */ u64 hcr_el2; + u64 hcrx_el2; u64 mdcr_el2; u64 cptr_el2; @@ -868,7 +907,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) -#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) +u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg); +#define __vcpu_sys_reg(v,r) \ + (*({ \ + const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ + u64 *__r = __ctxt_sys_reg(ctxt, (r)); \ + if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \ + *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \ + __r; \ + })) u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); @@ -1055,14 +1102,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); +void kvm_sys_regs_create_debugfs(struct kvm *kvm); void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); int __init kvm_sys_reg_table_init(void); +struct sys_reg_desc; +int __init populate_sysreg_config(const struct sys_reg_desc *sr, + unsigned int idx); int __init populate_nv_trap_config(void); bool lock_all_vcpus(struct kvm *kvm); void unlock_all_vcpus(struct kvm *kvm); +void kvm_init_sysreg(struct kvm_vcpu *); + /* MMIO helpers */ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); @@ -1233,4 +1286,48 @@ static inline void kvm_hyp_reserve(void) { } void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); +#define __expand_field_sign_unsigned(id, fld, val) \ + ((u64)SYS_FIELD_VALUE(id, fld, val)) + +#define __expand_field_sign_signed(id, fld, val) \ + ({ \ + u64 __val = SYS_FIELD_VALUE(id, fld, val); \ + sign_extend64(__val, id##_##fld##_WIDTH - 1); \ + }) + +#define expand_field_sign(id, fld, val) \ + (id##_##fld##_SIGNED ? \ + __expand_field_sign_signed(id, fld, val) : \ + __expand_field_sign_unsigned(id, fld, val)) + +#define get_idreg_field_unsigned(kvm, id, fld) \ + ({ \ + u64 __val = IDREG((kvm), SYS_##id); \ + FIELD_GET(id##_##fld##_MASK, __val); \ + }) + +#define get_idreg_field_signed(kvm, id, fld) \ + ({ \ + u64 __val = get_idreg_field_unsigned(kvm, id, fld); \ + sign_extend64(__val, id##_##fld##_WIDTH - 1); \ + }) + +#define get_idreg_field_enum(kvm, id, fld) \ + get_idreg_field_unsigned(kvm, id, fld) + +#define get_idreg_field(kvm, id, fld) \ + (id##_##fld##_SIGNED ? \ + get_idreg_field_signed(kvm, id, fld) : \ + get_idreg_field_unsigned(kvm, id, fld)) + +#define kvm_has_feat(kvm, id, fld, limit) \ + (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit)) + +#define kvm_has_feat_enum(kvm, id, fld, val) \ + (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val)) + +#define kvm_has_feat_range(kvm, id, fld, min, max) \ + (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \ + get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max)) + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 145ce73fc16c..3e2a1ac0c9bb 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); /* * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the * static inline can allow the compiler to out-of-line this. KVM always wants - * the macro version as its always inlined. + * the macro version as it's always inlined. */ #define __kvm_swab32(x) ___constant_swab32(x) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e3e793d0ec30..d5e48d870461 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -54,27 +54,6 @@ #include <asm/alternative.h> /* - * Convert a kernel VA into a HYP VA. - * reg: VA to be converted. - * - * The actual code generation takes place in kvm_update_va_mask, and - * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_update_va_mask uses the - * specific registers encoded in the instructions). - */ -.macro kern_hyp_va reg -#ifndef __KVM_VHE_HYPERVISOR__ -alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask - and \reg, \reg, #1 /* mask with va_mask */ - ror \reg, \reg, #1 /* rotate to the first tag bit */ - add \reg, \reg, #0 /* insert the low 12 bits of the tag */ - add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ - ror \reg, \reg, #63 /* rotate back */ -alternative_cb_end -#endif -.endm - -/* * Convert a hypervisor VA to a PA * reg: hypervisor address to be converted in place * tmp: temporary register @@ -127,14 +106,29 @@ void kvm_apply_hyp_relocations(void); #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) +/* + * Convert a kernel VA into a HYP VA. + * + * Can be called from hyp or non-hyp context. + * + * The actual code generation takes place in kvm_update_va_mask(), and + * the instructions below are only there to reserve the space and + * perform the register allocation (kvm_update_va_mask() uses the + * specific registers encoded in the instructions). + */ static __always_inline unsigned long __kern_hyp_va(unsigned long v) { +/* + * This #ifndef is an optimisation for when this is called from VHE hyp + * context. When called from a VHE non-hyp context, kvm_update_va_mask() will + * replace the instructions with `nop`s. + */ #ifndef __KVM_VHE_HYPERVISOR__ - asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" - "ror %0, %0, #1\n" - "add %0, %0, #0\n" - "add %0, %0, #0, lsl 12\n" - "ror %0, %0, #63\n", + asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" /* mask with va_mask */ + "ror %0, %0, #1\n" /* rotate to the first tag bit */ + "add %0, %0, #0\n" /* insert the low 12 bits of the tag */ + "add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */ + "ror %0, %0, #63\n", /* rotate back */ ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 4882905357f4..c77d795556e1 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) return ttbr0 & ~GENMASK_ULL(63, 48); } -extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu); int kvm_init_nv_sysregs(struct kvm *kvm); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index cfdf40f734b1..19278dfe7978 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -197,6 +197,7 @@ enum kvm_pgtable_stage2_flags { * @KVM_PGTABLE_PROT_W: Write permission. * @KVM_PGTABLE_PROT_R: Read permission. * @KVM_PGTABLE_PROT_DEVICE: Device attributes. + * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes. * @KVM_PGTABLE_PROT_SW0: Software bit 0. * @KVM_PGTABLE_PROT_SW1: Software bit 1. * @KVM_PGTABLE_PROT_SW2: Software bit 2. @@ -208,6 +209,7 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_R = BIT(2), KVM_PGTABLE_PROT_DEVICE = BIT(3), + KVM_PGTABLE_PROT_NORMAL_NC = BIT(4), KVM_PGTABLE_PROT_SW0 = BIT(55), KVM_PGTABLE_PROT_SW1 = BIT(56), diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d82305ab420f..449ca2ff1df6 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -173,6 +173,7 @@ * Memory types for Stage-2 translation */ #define MT_S2_NORMAL 0xf +#define MT_S2_NORMAL_NC 0x5 #define MT_S2_DEVICE_nGnRE 0x1 /* @@ -180,6 +181,7 @@ * Stage-2 enforces Normal-WB and Device-nGnRE */ #define MT_S2_FWB_NORMAL 6 +#define MT_S2_FWB_NORMAL_NC 5 #define MT_S2_FWB_DEVICE_nGnRE 1 #ifdef CONFIG_ARM64_4K_PAGES diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c3b19b376c86..9e8999592f3a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1181,6 +1181,8 @@ par; \ }) +#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val + #define SYS_FIELD_GET(reg, field, val) \ FIELD_GET(reg##_##field##_MASK, val) @@ -1188,7 +1190,8 @@ FIELD_PREP(reg##_##field##_MASK, val) #define SYS_FIELD_PREP_ENUM(reg, field, val) \ - FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + FIELD_PREP(reg##_##field##_MASK, \ + SYS_FIELD_VALUE(reg, field, val)) #endif |