summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/include
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/include')
-rw-r--r--tools/testing/selftests/kvm/include/arm64/arch_timer.h54
-rw-r--r--tools/testing/selftests/kvm/include/arm64/delay.h4
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic.h9
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3_its.h8
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v5.h150
-rw-r--r--tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h7
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h185
-rw-r--r--tools/testing/selftests/kvm/include/arm64/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/arm64/vgic.h23
-rw-r--r--tools/testing/selftests/kvm/include/kvm_syscalls.h82
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h548
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_types.h8
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/arch_timer.h85
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h8
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/pmu.h66
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/processor.h235
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/lru_gen_util.h51
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h30
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h110
-rw-r--r--tools/testing/selftests/kvm/include/riscv/arch_timer.h22
-rw-r--r--tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h1
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h35
-rw-r--r--tools/testing/selftests/kvm/include/riscv/sbi.h37
-rw-r--r--tools/testing/selftests/kvm/include/riscv/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/s390/diag318_test_handler.h2
-rw-r--r--tools/testing/selftests/kvm/include/s390/facility.h4
-rw-r--r--tools/testing/selftests/kvm/include/s390/kvm_util_arch.h1
-rw-r--r--tools/testing/selftests/kvm/include/s390/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h62
-rw-r--r--tools/testing/selftests/kvm/include/timer_test.h18
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h22
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86/apic.h29
-rw-r--r--tools/testing/selftests/kvm/include/x86/evmcs.h22
-rw-r--r--tools/testing/selftests/kvm/include/x86/hyperv.h28
-rw-r--r--tools/testing/selftests/kvm/include/x86/kvm_util_arch.h34
-rw-r--r--tools/testing/selftests/kvm/include/x86/pmu.h33
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h461
-rw-r--r--tools/testing/selftests/kvm/include/x86/sev.h65
-rw-r--r--tools/testing/selftests/kvm/include/x86/smm.h16
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm.h17
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm_util.h19
-rw-r--r--tools/testing/selftests/kvm/include/x86/ucall.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86/vmx.h85
46 files changed, 2067 insertions, 655 deletions
diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
index bf461de34785..a5836d4ab7ee 100644
--- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
@@ -18,20 +18,20 @@ enum arch_timer {
#define CTL_ISTATUS (1 << 2)
#define msec_to_cycles(msec) \
- (timer_get_cntfrq() * (uint64_t)(msec) / 1000)
+ (timer_get_cntfrq() * (u64)(msec) / 1000)
#define usec_to_cycles(usec) \
- (timer_get_cntfrq() * (uint64_t)(usec) / 1000000)
+ (timer_get_cntfrq() * (u64)(usec) / 1000000)
#define cycles_to_usec(cycles) \
- ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq())
+ ((u64)(cycles) * 1000000 / timer_get_cntfrq())
-static inline uint32_t timer_get_cntfrq(void)
+static inline u32 timer_get_cntfrq(void)
{
return read_sysreg(cntfrq_el0);
}
-static inline uint64_t timer_get_cntct(enum arch_timer timer)
+static inline u64 timer_get_cntct(enum arch_timer timer)
{
isb();
@@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
return 0;
}
-static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
+static inline void timer_set_cval(enum arch_timer timer, u64 cval)
{
switch (timer) {
case VIRTUAL:
@@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
isb();
}
-static inline uint64_t timer_get_cval(enum arch_timer timer)
+static inline u64 timer_get_cval(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
@@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
return 0;
}
-static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
+static inline void timer_set_tval(enum arch_timer timer, s32 tval)
{
switch (timer) {
case VIRTUAL:
@@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
isb();
}
-static inline int32_t timer_get_tval(enum arch_timer timer)
+static inline s32 timer_get_tval(enum arch_timer timer)
{
isb();
switch (timer) {
@@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer)
return 0;
}
-static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
+static inline void timer_set_ctl(enum arch_timer timer, u32 ctl)
{
switch (timer) {
case VIRTUAL:
@@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
isb();
}
-static inline uint32_t timer_get_ctl(enum arch_timer timer)
+static inline u32 timer_get_ctl(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
@@ -142,17 +142,41 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
return 0;
}
-static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec)
+static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec)
{
- uint64_t now_ct = timer_get_cntct(timer);
- uint64_t next_ct = now_ct + msec_to_cycles(msec);
+ u64 now_ct = timer_get_cntct(timer);
+ u64 next_ct = now_ct + msec_to_cycles(msec);
timer_set_cval(timer, next_ct);
}
-static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec)
+static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec)
{
timer_set_tval(timer, msec_to_cycles(msec));
}
+static inline u32 vcpu_get_vtimer_irq(struct kvm_vcpu *vcpu)
+{
+ u32 intid;
+ u64 attr;
+
+ attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HVTIMER :
+ KVM_ARM_VCPU_TIMER_IRQ_VTIMER;
+ vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid);
+
+ return intid;
+}
+
+static inline u32 vcpu_get_ptimer_irq(struct kvm_vcpu *vcpu)
+{
+ u32 intid;
+ u64 attr;
+
+ attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HPTIMER :
+ KVM_ARM_VCPU_TIMER_IRQ_PTIMER;
+ vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid);
+
+ return intid;
+}
+
#endif /* SELFTEST_KVM_ARCH_TIMER_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/delay.h b/tools/testing/selftests/kvm/include/arm64/delay.h
index 329e4f5079ea..6a5d4634af2c 100644
--- a/tools/testing/selftests/kvm/include/arm64/delay.h
+++ b/tools/testing/selftests/kvm/include/arm64/delay.h
@@ -8,10 +8,10 @@
#include "arch_timer.h"
-static inline void __delay(uint64_t cycles)
+static inline void __delay(u64 cycles)
{
enum arch_timer timer = VIRTUAL;
- uint64_t start = timer_get_cntct(timer);
+ u64 start = timer_get_cntct(timer);
while ((timer_get_cntct(timer) - start) < cycles)
cpu_relax();
diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h
index baeb3c859389..615745093c98 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic.h
@@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid);
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
-void gic_set_priority_mask(uint64_t mask);
-void gic_set_priority(uint32_t intid, uint32_t prio);
+void gic_set_priority_mask(u64 mask);
+void gic_set_priority(u32 intid, u32 prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
@@ -57,8 +57,9 @@ void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
+void gic_irq_set_group(unsigned int intid, bool group);
-void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
- vm_paddr_t pend_table);
+void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
+ gpa_t pend_table);
#endif /* SELFTEST_KVM_GIC_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
index 3722ed9c8f96..a43a407e2d5c 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
@@ -5,15 +5,15 @@
#include <linux/sizes.h>
-void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
- vm_paddr_t device_tbl, size_t device_tbl_sz,
- vm_paddr_t cmdq, size_t cmdq_size);
+void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
+ size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size);
-void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
+void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
size_t itt_size, bool valid);
void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid);
void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
u32 collection_id, u32 intid);
void its_send_invall_cmd(void *cmdq_base, u32 collection_id);
+void its_send_sync_cmd(void *cmdq_base, u32 vcpu_id);
#endif // __SELFTESTS_GIC_V3_ITS_H__
diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v5.h b/tools/testing/selftests/kvm/include/arm64/gic_v5.h
new file mode 100644
index 000000000000..eb523d9277cf
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/arm64/gic_v5.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SELFTESTS_GIC_V5_H
+#define __SELFTESTS_GIC_V5_H
+
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+
+#include <linux/bitfield.h>
+
+#include "processor.h"
+
+/*
+ * Definitions for GICv5 instructions for the Current Domain
+ */
+#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3)
+#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0)
+#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0)
+#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1)
+#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1)
+#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7)
+#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4)
+#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2)
+#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5)
+#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0)
+#define GICV5_OP_GICR_CDNMIA sys_insn(1, 0, 12, 3, 1)
+
+/* Definitions for GIC CDAFF */
+#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32)
+#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28)
+#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDI */
+#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDIS */
+#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r)
+#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r)
+
+/* Definitions for GIC CDEN */
+#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDHM */
+#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32)
+#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPEND */
+#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32)
+#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPRI */
+#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35)
+#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDRCFG */
+#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GICR CDIA */
+#define GICV5_GICR_CDIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GICR_CDIA_VALID_MASK, r)
+#define GICV5_GICR_CDIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GICR_CDIA_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GICR_CDIA_INTID GENMASK_ULL(31, 0)
+
+/* Definitions for GICR CDNMIA */
+#define GICV5_GICR_CDNMIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDNMIA_VALID(r) FIELD_GET(GICV5_GICR_CDNMIA_VALID_MASK, r)
+#define GICV5_GICR_CDNMIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GICR_CDNMIA_ID_MASK GENMASK_ULL(23, 0)
+
+#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
+#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
+
+#define __GIC_BARRIER_INSN(op0, op1, CRn, CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | \
+ sys_insn((op0), (op1), (CRn), (CRm), (op2)) | \
+ ((Rt) & 0x1f))
+
+#define GSB_SYS_BARRIER_INSN __GIC_BARRIER_INSN(1, 0, 12, 0, 0, 31)
+#define GSB_ACK_BARRIER_INSN __GIC_BARRIER_INSN(1, 0, 12, 0, 1, 31)
+
+#define gsb_ack() asm volatile(GSB_ACK_BARRIER_INSN : : : "memory")
+#define gsb_sys() asm volatile(GSB_SYS_BARRIER_INSN : : : "memory")
+
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#define GICV5_IRQ_DEFAULT_PRI 0b10000
+
+#define GICV5_ARCH_PPI_SW_PPI 0x3
+
+void gicv5_ppi_priority_init(void)
+{
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR0_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR1_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR2_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR3_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR4_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR5_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR6_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR7_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR8_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR9_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR10_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR11_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR12_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR13_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR14_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR15_EL1);
+
+ /*
+ * Context syncronization required to make sure system register writes
+ * effects are synchronised.
+ */
+ isb();
+}
+
+void gicv5_cpu_disable_interrupts(void)
+{
+ u64 cr0;
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+void gicv5_cpu_enable_interrupts(void)
+{
+ u64 cr0, pcr;
+
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1);
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1);
+
+ gicv5_ppi_priority_init();
+
+ pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_DEFAULT_PRI);
+ write_sysreg_s(pcr, SYS_ICC_PCR_EL1);
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
index e43a57d99b56..4a2033708227 100644
--- a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
@@ -2,6 +2,11 @@
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H
-struct kvm_vm_arch {};
+struct kvm_mmu_arch {};
+
+struct kvm_vm_arch {
+ bool has_gic;
+ int gic_fd;
+};
#endif // SELFTEST_KVM_UTIL_ARCH_H
diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h
index 1e8d0d531fbd..b8a902ba8573 100644
--- a/tools/testing/selftests/kvm/include/arm64/processor.h
+++ b/tools/testing/selftests/kvm/include/arm64/processor.h
@@ -62,8 +62,73 @@
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
+/* TCR_EL1 specific flags */
+#define TCR_T0SZ_OFFSET 0
+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
+
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
+
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)
+
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)
+#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)
+
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
+#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
+
+#define TCR_EPD1_SHIFT 23
+#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT)
+
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
+#define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT)
+#define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT)
+#define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT)
+#define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT)
+
+#define TCR_TBI1 (UL(1) << 38)
+#define TCR_HA (UL(1) << 39)
+#define TCR_DS (UL(1) << 59)
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PTE_ATTRINDX(t) ((t) << 2)
+#define PTE_ATTRINDX_MASK GENMASK(4, 2)
+#define PTE_ATTRINDX_SHIFT 2
+
+#define PTE_VALID BIT(0)
+#define PGD_TYPE_TABLE BIT(1)
+#define PUD_TYPE_TABLE BIT(1)
+#define PMD_TYPE_TABLE BIT(1)
+#define PTE_TYPE_PAGE BIT(1)
+
+#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */
+#define PTE_AF BIT(10)
+
+#define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift))
+#define PTE_ADDR_51_48 GENMASK(15, 12)
+#define PTE_ADDR_51_48_SHIFT 12
+#define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift))
+#define PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
+#define PTE_ADDR_51_50_LPA2_SHIFT 8
+
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
-struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init, void *guest_code);
struct ex_regs {
@@ -102,14 +167,8 @@ enum {
(v) == VECTOR_SYNC_LOWER_64 || \
(v) == VECTOR_SYNC_LOWER_32)
-/* Access flag */
-#define PTE_AF (1ULL << 10)
-
-/* Access flag update enable/disable */
-#define TCR_EL1_HA (1ULL << 39)
-
-void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
- uint32_t *ipa16k, uint32_t *ipa64k);
+void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
+ u32 *ipa16k, u32 *ipa64k);
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
@@ -120,7 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm,
void vm_install_sync_handler(struct kvm_vm *vm,
int vector, int ec, handler_fn handler);
-uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
+u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level);
+u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva);
static inline void cpu_relax(void)
{
@@ -199,6 +259,16 @@ static inline void local_irq_disable(void)
asm volatile("msr daifset, #3" : : : "memory");
}
+static inline void local_serror_enable(void)
+{
+ asm volatile("msr daifclr, #4" : : : "memory");
+}
+
+static inline void local_serror_disable(void)
+{
+ asm volatile("msr daifset, #4" : : : "memory");
+}
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -217,9 +287,9 @@ struct arm_smccc_res {
* @res: pointer to write the return values from registers x0-x3
*
*/
-void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res);
+void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res);
/**
* smccc_smc - Invoke a SMCCC function using the smc conduit
@@ -228,11 +298,94 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
* @res: pointer to write the return values from registers x0-x3
*
*/
-void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res);
+void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res);
/* Execute a Wait For Interrupt instruction. */
void wfi(void);
+void test_wants_mte(void);
+void test_disable_default_vgic(void);
+
+bool vm_supports_el2(struct kvm_vm *vm);
+
+static inline bool test_supports_el2(void)
+{
+ struct kvm_vm *vm = vm_create(1);
+ bool supported = vm_supports_el2(vm);
+
+ kvm_vm_free(vm);
+ return supported;
+}
+
+static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)
+{
+ return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
+}
+
+#define MAPPED_EL2_SYSREG(el2, el1) \
+ case SYS_##el1: \
+ if (vcpu_has_el2(vcpu)) \
+ alias = SYS_##el2; \
+ break
+
+
+static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding)
+{
+ u32 alias = encoding;
+
+ BUILD_BUG_ON(!__builtin_constant_p(encoding));
+
+ switch (encoding) {
+ MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1);
+ MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1);
+ MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1);
+ MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1);
+ MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1);
+ MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1);
+ MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1);
+ MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1);
+ MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1);
+ MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1);
+ MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1);
+ MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1);
+ MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1);
+ MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1);
+ MAPPED_EL2_SYSREG(POR_EL2, POR_EL1);
+ MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1);
+ MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1);
+ MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1);
+ MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1);
+ MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1);
+ MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1);
+ MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1);
+ case SYS_SP_EL1:
+ if (!vcpu_has_el2(vcpu))
+ return ARM64_CORE_REG(sp_el1);
+
+ alias = SYS_SP_EL2;
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return KVM_ARM64_SYS_REG(alias);
+}
+
+void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init);
+
+static inline unsigned int get_current_el(void)
+{
+ return (read_sysreg(CurrentEL) >> 2) & 0x3;
+}
+
+#define do_smccc(...) \
+do { \
+ if (get_current_el() == 2) \
+ smccc_smc(__VA_ARGS__); \
+ else \
+ smccc_hvc(__VA_ARGS__); \
+} while (0)
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/ucall.h b/tools/testing/selftests/kvm/include/arm64/ucall.h
index 4ec801f37f00..2210d3d94c40 100644
--- a/tools/testing/selftests/kvm/include/arm64/ucall.h
+++ b/tools/testing/selftests/kvm/include/arm64/ucall.h
@@ -10,9 +10,9 @@
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
-extern vm_vaddr_t *ucall_exit_mmio_addr;
+extern gva_t *ucall_exit_mmio_addr;
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
}
diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h
index c481d0c00a5d..1f8b04373987 100644
--- a/tools/testing/selftests/kvm/include/arm64/vgic.h
+++ b/tools/testing/selftests/kvm/include/arm64/vgic.h
@@ -11,24 +11,27 @@
#include "kvm_util.h"
#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
- (((uint64_t)(count) << 52) | \
- ((uint64_t)((base) >> 16) << 16) | \
- ((uint64_t)(flags) << 12) | \
+ (((u64)(count) << 52) | \
+ ((u64)((base) >> 16) << 16) | \
+ ((u64)(flags) << 12) | \
index)
-int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
+bool kvm_supports_vgic_v3(void);
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
+void __vgic_v3_init(int fd);
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
#define VGIC_MAX_RESERVED 1023
-void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
-int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
+void kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
+int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
-void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
-int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
+void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
+int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
/* The vcpu arg only applies to private interrupts. */
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
diff --git a/tools/testing/selftests/kvm/include/kvm_syscalls.h b/tools/testing/selftests/kvm/include/kvm_syscalls.h
new file mode 100644
index 000000000000..843c9904c46f
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/kvm_syscalls.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_SYSCALLS_H
+#define SELFTEST_KVM_SYSCALLS_H
+
+#include <sys/syscall.h>
+
+#define MAP_ARGS0(m,...)
+#define MAP_ARGS1(m,t,a,...) m(t,a)
+#define MAP_ARGS2(m,t,a,...) m(t,a), MAP_ARGS1(m,__VA_ARGS__)
+#define MAP_ARGS3(m,t,a,...) m(t,a), MAP_ARGS2(m,__VA_ARGS__)
+#define MAP_ARGS4(m,t,a,...) m(t,a), MAP_ARGS3(m,__VA_ARGS__)
+#define MAP_ARGS5(m,t,a,...) m(t,a), MAP_ARGS4(m,__VA_ARGS__)
+#define MAP_ARGS6(m,t,a,...) m(t,a), MAP_ARGS5(m,__VA_ARGS__)
+#define MAP_ARGS(n,...) MAP_ARGS##n(__VA_ARGS__)
+
+#define __DECLARE_ARGS(t, a) t a
+#define __UNPACK_ARGS(t, a) a
+
+#define DECLARE_ARGS(nr_args, args...) MAP_ARGS(nr_args, __DECLARE_ARGS, args)
+#define UNPACK_ARGS(nr_args, args...) MAP_ARGS(nr_args, __UNPACK_ARGS, args)
+
+#define __KVM_SYSCALL_ERROR(_name, _ret) \
+ "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
+
+/* Define a kvm_<syscall>() API to assert success. */
+#define __KVM_SYSCALL_DEFINE(name, nr_args, args...) \
+static inline void kvm_##name(DECLARE_ARGS(nr_args, args)) \
+{ \
+ int r; \
+ \
+ r = name(UNPACK_ARGS(nr_args, args)); \
+ TEST_ASSERT(!r, __KVM_SYSCALL_ERROR(#name, r)); \
+}
+
+/*
+ * Macro to define syscall APIs, either because KVM selftests doesn't link to
+ * the standard library, e.g. libnuma, or because there is no library that yet
+ * provides the syscall. These
+ */
+#define KVM_SYSCALL_DEFINE(name, nr_args, args...) \
+static inline long name(DECLARE_ARGS(nr_args, args)) \
+{ \
+ return syscall(__NR_##name, UNPACK_ARGS(nr_args, args)); \
+} \
+__KVM_SYSCALL_DEFINE(name, nr_args, args)
+
+/*
+ * Special case mmap(), as KVM selftest rarely/never specific an address,
+ * rarely specify an offset, and because the unique return code requires
+ * special handling anyways.
+ */
+static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd,
+ off_t offset)
+{
+ void *mem;
+
+ mem = mmap(NULL, size, prot, flags, fd, offset);
+ TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()",
+ (int)(unsigned long)MAP_FAILED));
+ return mem;
+}
+
+static inline void *kvm_mmap(size_t size, int prot, int flags, int fd)
+{
+ return __kvm_mmap(size, prot, flags, fd, 0);
+}
+
+static inline int kvm_dup(int fd)
+{
+ int new_fd = dup(fd);
+
+ TEST_ASSERT(new_fd >= 0, __KVM_SYSCALL_ERROR("dup()", new_fd));
+ return new_fd;
+}
+
+__KVM_SYSCALL_DEFINE(munmap, 2, void *, mem, size_t, size);
+__KVM_SYSCALL_DEFINE(close, 1, int, fd);
+__KVM_SYSCALL_DEFINE(fallocate, 4, int, fd, int, mode, loff_t, offset, loff_t, len);
+__KVM_SYSCALL_DEFINE(ftruncate, 2, unsigned int, fd, off_t, length);
+__KVM_SYSCALL_DEFINE(madvise, 3, void *, addr, size_t, length, int, advice);
+
+#endif /* SELFTEST_KVM_SYSCALLS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 4c4e5a847f67..2ecaaa0e9965 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -18,8 +18,12 @@
#include <asm/atomic.h>
#include <asm/kvm.h>
+#include <sys/eventfd.h>
#include <sys/ioctl.h>
+#include <pthread.h>
+
+#include "kvm_syscalls.h"
#include "kvm_util_arch.h"
#include "kvm_util_types.h"
#include "sparsebit.h"
@@ -46,18 +50,28 @@ struct userspace_mem_region {
struct hlist_node slot_node;
};
+struct kvm_binary_stats {
+ int fd;
+ struct kvm_stats_header header;
+ struct kvm_stats_desc *desc;
+};
+
struct kvm_vcpu {
struct list_head list;
- uint32_t id;
+ u32 id;
int fd;
struct kvm_vm *vm;
struct kvm_run *run;
#ifdef __x86_64__
struct kvm_cpuid2 *cpuid;
#endif
+#ifdef __aarch64__
+ struct kvm_vcpu_init init;
+#endif
+ struct kvm_binary_stats stats;
struct kvm_dirty_gfn *dirty_gfns;
- uint32_t fetch_index;
- uint32_t dirty_gfns_count;
+ u32 fetch_index;
+ u32 dirty_gfns_count;
};
struct userspace_mem_regions {
@@ -74,42 +88,51 @@ enum kvm_mem_region_type {
NR_MEM_REGIONS,
};
+struct kvm_mmu {
+ bool pgd_created;
+ u64 pgd;
+ int pgtable_levels;
+
+ struct kvm_mmu_arch arch;
+};
+
struct kvm_vm {
int mode;
unsigned long type;
int kvm_fd;
int fd;
- unsigned int pgtable_levels;
unsigned int page_size;
unsigned int page_shift;
unsigned int pa_bits;
unsigned int va_bits;
- uint64_t max_gfn;
+ u64 max_gfn;
struct list_head vcpus;
struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
bool has_irqchip;
- bool pgd_created;
- vm_paddr_t ucall_mmio_addr;
- vm_paddr_t pgd;
- vm_vaddr_t handlers;
- uint32_t dirty_ring_size;
- uint64_t gpa_tag_mask;
+ gpa_t ucall_mmio_addr;
+ gva_t handlers;
+ u32 dirty_ring_size;
+ gpa_t gpa_tag_mask;
+
+ /*
+ * "mmu" is the guest's stage-1, with a short name because the vast
+ * majority of tests only care about the stage-1 MMU.
+ */
+ struct kvm_mmu mmu;
+ struct kvm_mmu stage2_mmu;
struct kvm_vm_arch arch;
- /* Cache of information for binary stats interface */
- int stats_fd;
- struct kvm_stats_header stats_header;
- struct kvm_stats_desc *stats_desc;
+ struct kvm_binary_stats stats;
/*
* KVM region slots. These are the default memslots used by page
* allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
* memslot.
*/
- uint32_t memslots[NR_MEM_REGIONS];
+ u32 memslots[NR_MEM_REGIONS];
};
struct vcpu_reg_sublist {
@@ -141,7 +164,7 @@ struct vcpu_reg_list {
else
struct userspace_mem_region *
-memslot2region(struct kvm_vm *vm, uint32_t memslot);
+memslot2region(struct kvm_vm *vm, u32 memslot);
static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
enum kvm_mem_region_type type)
@@ -167,24 +190,36 @@ enum vm_guest_mode {
VM_MODE_P40V48_4K,
VM_MODE_P40V48_16K,
VM_MODE_P40V48_64K,
- VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_PXXVYY_4K, /* For 48-bit or 57-bit VA, depending on host support */
VM_MODE_P47V64_4K,
VM_MODE_P44V64_4K,
VM_MODE_P36V48_4K,
VM_MODE_P36V48_16K,
VM_MODE_P36V48_64K,
+ VM_MODE_P47V47_16K,
VM_MODE_P36V47_16K,
+
+ VM_MODE_P56V57_4K, /* For riscv64 */
+ VM_MODE_P56V48_4K,
+ VM_MODE_P56V39_4K,
+ VM_MODE_P50V57_4K,
+ VM_MODE_P50V48_4K,
+ VM_MODE_P50V39_4K,
+ VM_MODE_P41V57_4K,
+ VM_MODE_P41V48_4K,
+ VM_MODE_P41V39_4K,
+
NUM_VM_MODES,
};
struct vm_shape {
- uint32_t type;
- uint8_t mode;
- uint8_t pad0;
- uint16_t pad1;
+ u32 type;
+ u8 mode;
+ u8 pad0;
+ u16 pad1;
};
-kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
+kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64));
#define VM_TYPE_DEFAULT 0
@@ -198,17 +233,17 @@ kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
shape; \
})
-#if defined(__aarch64__)
-
extern enum vm_guest_mode vm_mode_default;
+#if defined(__aarch64__)
+
#define VM_MODE_DEFAULT vm_mode_default
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
#elif defined(__x86_64__)
-#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
+#define VM_MODE_DEFAULT VM_MODE_PXXVYY_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
@@ -224,7 +259,12 @@ extern enum vm_guest_mode vm_mode_default;
#error "RISC-V 32-bit kvm selftests not supported"
#endif
-#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
+#define VM_MODE_DEFAULT vm_mode_default
+#define MIN_PAGE_SHIFT 12U
+#define ptes_per_page(page_size) ((page_size) / 8)
+
+#elif defined(__loongarch__)
+#define VM_MODE_DEFAULT VM_MODE_P47V47_16K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
@@ -243,16 +283,22 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
+int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
-bool get_kvm_param_bool(const char *param);
-bool get_kvm_intel_param_bool(const char *param);
-bool get_kvm_amd_param_bool(const char *param);
+int kvm_get_module_param_integer(const char *module_name, const char *param);
+bool kvm_get_module_param_bool(const char *module_name, const char *param);
-int get_kvm_param_integer(const char *param);
-int get_kvm_intel_param_integer(const char *param);
-int get_kvm_amd_param_integer(const char *param);
+static inline bool get_kvm_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm", param);
+}
+
+static inline int get_kvm_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm", param);
+}
unsigned int kvm_check_cap(long cap);
@@ -261,9 +307,6 @@ static inline bool kvm_has_cap(long cap)
return kvm_check_cap(cap);
}
-#define __KVM_SYSCALL_ERROR(_name, _ret) \
- "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
-
/*
* Use the "inner", double-underscore macro when reporting errors from within
* other macros so that the name of ioctl() and not its literal numeric value
@@ -361,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
return ret;
}
-static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+
+static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size, uint64_t attributes)
+static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa,
+ u64 size, u64 attributes)
{
struct kvm_memory_attributes attr = {
.attributes = attributes,
@@ -395,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
}
-static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
}
-static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_set_memory_attributes(vm, gpa, size, 0);
}
-void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size,
bool punch_hole);
-static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, true);
}
-static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, false);
}
-void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
-const char *vm_guest_mode_string(uint32_t i);
+void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size);
+const char *vm_guest_mode_string(u32 i);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp);
@@ -431,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
int kvm_memfd_alloc(size_t size, bool hugepages);
-void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
{
@@ -441,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
}
static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
- uint64_t first_page, uint32_t num_pages)
+ u64 first_page, u32 num_pages)
{
struct kvm_clear_dirty_log args = {
.dirty_bitmap = log,
@@ -453,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log
vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
}
-static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
+static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
{
return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
}
static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
- uint64_t address,
- uint64_t size, bool pio)
+ u64 address,
+ u64 size, bool pio)
{
struct kvm_coalesced_mmio_zone zone = {
.addr = address,
@@ -472,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
}
static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
- uint64_t address,
- uint64_t size, bool pio)
+ u64 address,
+ u64 size, bool pio)
{
struct kvm_coalesced_mmio_zone zone = {
.addr = address,
@@ -492,6 +536,44 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
return fd;
}
+static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd,
+ u32 flags)
+{
+ struct kvm_irqfd irqfd = {
+ .fd = eventfd,
+ .gsi = gsi,
+ .flags = flags,
+ .resamplefd = -1,
+ };
+
+ return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
+}
+
+static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags)
+{
+ int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
+
+ TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
+}
+
+static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
+{
+ kvm_irqfd(vm, gsi, eventfd, 0);
+}
+
+static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
+{
+ kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
+}
+
+static inline int kvm_new_eventfd(void)
+{
+ int fd = eventfd(0, 0);
+
+ TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
+ return fd;
+}
+
static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
{
ssize_t ret;
@@ -528,24 +610,62 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc
}
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
- struct kvm_stats_desc *desc, uint64_t *data,
+ struct kvm_stats_desc *desc, u64 *data,
size_t max_elements);
-void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
- size_t max_elements);
+void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
+ u64 *data, size_t max_elements);
-static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
+#define __get_stat(stats, stat) \
+({ \
+ u64 data; \
+ \
+ kvm_get_stat(stats, #stat, &data, 1); \
+ data; \
+})
+
+#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
+#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
+
+static inline bool read_smt_control(char *buf, size_t buf_size)
+{
+ FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
+ bool ret;
+
+ if (!f)
+ return false;
+
+ ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
+ fclose(f);
+
+ return ret;
+}
+
+static inline bool is_smt_possible(void)
+{
+ char buf[16];
+
+ if (read_smt_control(buf, sizeof(buf)) &&
+ (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
+ return false;
+
+ return true;
+}
+
+static inline bool is_smt_on(void)
{
- uint64_t data;
+ char buf[16];
+
+ if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
+ return true;
- __vm_get_stat(vm, stat_name, &data, 1);
- return data;
+ return false;
}
void vm_create_irqchip(struct kvm_vm *vm);
-static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
- uint64_t flags)
+static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
+ u64 flags)
{
struct kvm_create_guest_memfd guest_memfd = {
.size = size,
@@ -555,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
}
-static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
- uint64_t flags)
+static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
+ u64 flags)
{
int fd = __vm_create_guest_memfd(vm, size, flags);
@@ -564,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
return fd;
}
-void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva);
-int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva);
-void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset);
-int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset);
+void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva);
+int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva);
+void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset);
+int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset);
void vm_userspace_mem_region_add(struct kvm_vm *vm,
- enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot, uint64_t npages,
- uint32_t flags);
+ enum vm_mem_backing_src_type src_type,
+ gpa_t gpa, u32 slot, u64 npages, u32 flags);
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot, uint64_t npages,
- uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
+ int guest_memfd_fd, u64 guest_memfd_offset);
#ifndef vm_arch_has_protected_memory
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
@@ -590,35 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
}
#endif
-void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
-void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
-void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
-void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
-vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
-vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
-vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
-
-void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags);
+void vm_mem_region_reload(struct kvm_vm *vm, u32 slot);
+void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa);
+void vm_mem_region_delete(struct kvm_vm *vm, u32 slot);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
+void vm_populate_gva_bitmap(struct kvm_vm *vm);
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva);
+gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva);
+gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type);
+gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type);
+gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages);
+gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
+gva_t vm_alloc_page(struct kvm_vm *vm);
+
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
unsigned int npages);
-void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
-void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
-vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
-void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
+void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
+void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
+gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa);
#ifndef vcpu_arch_put_guest
#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
#endif
-static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
+static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa)
{
return gpa & ~vm->gpa_tag_mask;
}
@@ -634,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu)
void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
-static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
- uint64_t arg0)
+static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap,
+ u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
@@ -690,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
}
-static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)addr };
return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
}
-static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+
+static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
}
-static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
+
+static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id)
{
- uint64_t val;
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ u64 val;
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
return val;
}
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
@@ -759,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
return fd;
}
-int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
+int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr);
-static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
+static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
{
int ret = __kvm_has_device_attr(dev_fd, group, attr);
TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
}
-int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
+int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val);
-static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
- uint64_t attr, void *val)
+static inline void kvm_device_attr_get(int dev_fd, u32 group,
+ u64 attr, void *val)
{
int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
}
-int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
+int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val);
-static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
- uint64_t attr, void *val)
+static inline void kvm_device_attr_set(int dev_fd, u32 group,
+ u64 attr, void *val)
{
int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
}
-static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr)
+static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr)
{
return __kvm_has_device_attr(vcpu->fd, group, attr);
}
-static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr)
+static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr)
{
kvm_has_device_attr(vcpu->fd, group, attr);
}
-static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
return __kvm_device_attr_get(vcpu->fd, group, attr, val);
}
-static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
kvm_device_attr_get(vcpu->fd, group, attr, val);
}
-static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
return __kvm_device_attr_set(vcpu->fd, group, attr, val);
}
-static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
kvm_device_attr_set(vcpu->fd, group, attr, val);
}
-int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
-int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
+int __kvm_test_create_device(struct kvm_vm *vm, u64 type);
+int __kvm_create_device(struct kvm_vm *vm, u64 type);
-static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
+static inline int kvm_create_device(struct kvm_vm *vm, u64 type)
{
int fd = __kvm_create_device(vm, type);
@@ -841,9 +962,9 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
* VM VCPU Args Set
*
* Input Args:
- * vm - Virtual Machine
+ * vcpu - vCPU
* num - number of arguments
- * ... - arguments, each of type uint64_t
+ * ... - arguments, each of type u64
*
* Output Args: None
*
@@ -851,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
*
* Sets the first @num input parameters for the function at @vcpu's entry point,
* per the C calling convention of the architecture, to the values given as
- * variable args. Each of the variable args is expected to be of type uint64_t.
+ * variable args. Each of the variable args is expected to be of type u64.
* The maximum @num can be is specific to the architecture.
*/
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
-void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
-int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
+void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
+int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
- uint32_t gsi, uint32_t pin);
+ u32 gsi, u32 pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason);
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot);
-vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot,
- bool protected);
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,
+ u32 memslot, bool protected);
+gpa_t vm_alloc_page_table(struct kvm_vm *vm);
-static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
+static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ gpa_t min_gpa, u32 memslot)
{
/*
* By default, allocate memory as protected for VMs that support
* protected memory, as the majority of memory for such VMs is
* protected, i.e. using shared memory is effectively opt-in.
*/
- return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
+ return __vm_phy_pages_alloc(vm, num, min_gpa, memslot,
vm_arch_has_protected_memory(vm));
}
@@ -895,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
* calculate the amount of memory needed for per-vCPU data, e.g. stacks.
*/
struct kvm_vm *____vm_create(struct vm_shape shape);
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
- uint64_t nr_extra_pages);
+struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
+ u64 nr_extra_pages);
static inline struct kvm_vm *vm_create_barebones(void)
{
@@ -913,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
return ____vm_create(shape);
}
-static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
+static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus)
{
return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
}
-struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
- uint64_t extra_mem_pages,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
+ u64 extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[]);
-static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
+static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus,
void *guest_code,
struct kvm_vcpu *vcpus[])
{
@@ -933,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code);
/*
@@ -941,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
* additional pages of guest memory. Returns the VM and vCPU (via out param).
*/
static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code)
{
return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
@@ -963,9 +1082,38 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
-void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
+void kvm_set_files_rlimit(u32 nr_vcpus);
+
+int __pin_task_to_cpu(pthread_t task, int cpu);
+
+static inline void pin_task_to_cpu(pthread_t task, int cpu)
+{
+ int r;
+
+ r = __pin_task_to_cpu(task, cpu);
+ TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
+}
+
+static inline int pin_task_to_any_cpu(pthread_t task)
+{
+ int cpu = sched_getcpu();
+
+ pin_task_to_cpu(task, cpu);
+ return cpu;
+}
+
+static inline void pin_self_to_cpu(int cpu)
+{
+ pin_task_to_cpu(pthread_self(), cpu);
+}
+
+static inline int pin_self_to_any_cpu(void)
+{
+ return pin_task_to_any_cpu(pthread_self());
+}
+
void kvm_print_vcpu_pinning_help(void);
-void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
int nr_vcpus);
unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
@@ -977,20 +1125,16 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
{
unsigned int n;
n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
-#ifdef __s390x__
- /* s390 requires 1M aligned guest sizes */
- n = (n + 255) & ~255;
-#endif
return n;
}
#define sync_global_to_guest(vm, g) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
memcpy(_p, &(g), sizeof(g)); \
})
#define sync_global_from_guest(vm, g) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
memcpy(&(g), _p, sizeof(g)); \
})
@@ -1001,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
* undesirable to change the host's copy of the global.
*/
#define write_guest_global(vm, g, val) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
typeof(g) _val = val; \
\
memcpy(_p, &(_val), sizeof(g)); \
@@ -1010,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
- uint8_t indent);
+ u8 indent);
static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
- uint8_t indent)
+ u8 indent)
{
vcpu_arch_dump(stream, vcpu, indent);
}
@@ -1025,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
* vm - Virtual Machine
* vcpu_id - The id of the VCPU to add to the VM.
*/
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
-static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
void *guest_code)
{
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
@@ -1039,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
}
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
-struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id);
static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
- uint32_t vcpu_id)
+ u32 vcpu_id)
{
return vm_arch_vcpu_recreate(vm, vcpu_id);
}
@@ -1057,26 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm)
}
/*
- * VM Virtual Page Map
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - VM Virtual Address
- * paddr - VM Physical Address
- * memslot - Memory region slot for new virtual translation tables
- *
- * Output Args: None
- *
- * Return: None
- *
* Within @vm, creates a virtual translation for the page starting
- * at @vaddr to the page starting at @paddr.
+ * at @gva to the page starting at @gpa.
*/
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);
-static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- virt_arch_pg_map(vm, vaddr, paddr);
+ virt_arch_pg_map(vm, gva, gpa);
+ sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);
}
@@ -1095,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr
* Returns the VM physical address of the translated VM virtual
* address given by @gva.
*/
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva);
-static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
return addr_arch_gva2gpa(vm, gva);
}
@@ -1117,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
* Dumps to the FILE stream given by @stream, the contents of all the
* virtual translation tables for the VM given by @vm.
*/
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
-static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
virt_arch_dump(stream, vm, indent);
}
@@ -1130,17 +1263,26 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
}
+static inline u64 vm_page_align(struct kvm_vm *vm, u64 v)
+{
+ return (v + vm->page_size - 1) & ~(vm->page_size - 1);
+}
+
/*
- * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
+ * Arch hook that is invoked via a constructor, i.e. before executing main(),
* to allow for arch-specific setup that is common to all tests, e.g. computing
* the default guest "mode".
*/
void kvm_selftest_arch_init(void);
-void kvm_arch_vm_post_create(struct kvm_vm *vm);
+void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
+void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
+void kvm_arch_vm_release(struct kvm_vm *vm);
+
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);
-bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
+u32 guest_get_vcpuid(void);
-uint32_t guest_get_vcpuid(void);
+bool kvm_arch_has_default_irqchip(void);
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h
index ec787b97cf18..ed0087e31674 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_types.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_types.h
@@ -2,6 +2,8 @@
#ifndef SELFTEST_KVM_UTIL_TYPES_H
#define SELFTEST_KVM_UTIL_TYPES_H
+#include <linux/types.h>
+
/*
* Provide a version of static_assert() that is guaranteed to have an optional
* message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE
@@ -14,7 +16,9 @@
#define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
#define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
-typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
-typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
+typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */
+typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */
+
+#define INVALID_GPA (~(u64)0)
#endif /* SELFTEST_KVM_UTIL_TYPES_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
new file mode 100644
index 000000000000..3888aeeb3524
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch Constant Timer specific interface
+ */
+#ifndef SELFTEST_KVM_ARCH_TIMER_H
+#define SELFTEST_KVM_ARCH_TIMER_H
+
+#include "processor.h"
+
+/* LoongArch timer frequency is constant 100MHZ */
+#define TIMER_FREQ (100UL << 20)
+#define msec_to_cycles(msec) (TIMER_FREQ * (unsigned long)(msec) / 1000)
+#define usec_to_cycles(usec) (TIMER_FREQ * (unsigned long)(usec) / 1000000)
+#define cycles_to_usec(cycles) ((unsigned long)(cycles) * 1000000 / TIMER_FREQ)
+
+static inline unsigned long timer_get_cycles(void)
+{
+ unsigned long val = 0;
+
+ __asm__ __volatile__(
+ "rdtime.d %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+
+ return val;
+}
+
+static inline unsigned long timer_get_cfg(void)
+{
+ return csr_read(LOONGARCH_CSR_TCFG);
+}
+
+static inline unsigned long timer_get_val(void)
+{
+ return csr_read(LOONGARCH_CSR_TVAL);
+}
+
+static inline void disable_timer(void)
+{
+ csr_write(0, LOONGARCH_CSR_TCFG);
+}
+
+static inline void timer_irq_enable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val |= ECFGF_TIMER;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void timer_irq_disable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val &= ~ECFGF_TIMER;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void timer_set_next_cmp_ms(unsigned int msec, bool period)
+{
+ unsigned long val;
+
+ val = msec_to_cycles(msec) & CSR_TCFG_VAL;
+ val |= CSR_TCFG_EN;
+ if (period)
+ val |= CSR_TCFG_PERIOD;
+ csr_write(val, LOONGARCH_CSR_TCFG);
+}
+
+static inline void __delay(u64 cycles)
+{
+ u64 start = timer_get_cycles();
+
+ while ((timer_get_cycles() - start) < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ __delay(usec_to_cycles(usec));
+}
+#endif /* SELFTEST_KVM_ARCH_TIMER_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h b/tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h
new file mode 100644
index 000000000000..d5095900e442
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UTIL_ARCH_H
+#define SELFTEST_KVM_UTIL_ARCH_H
+
+struct kvm_mmu_arch {};
+struct kvm_vm_arch {};
+
+#endif // SELFTEST_KVM_UTIL_ARCH_H
diff --git a/tools/testing/selftests/kvm/include/loongarch/pmu.h b/tools/testing/selftests/kvm/include/loongarch/pmu.h
new file mode 100644
index 000000000000..478e6a9bbb2b
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/pmu.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * LoongArch PMU specific interface
+ */
+#ifndef SELFTEST_KVM_PMU_H
+#define SELFTEST_KVM_PMU_H
+
+#include "processor.h"
+
+#define LOONGARCH_CPUCFG6 0x6
+#define CPUCFG6_PMP BIT(0)
+#define CPUCFG6_PAMVER GENMASK(3, 1)
+#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
+#define CPUCFG6_PMBITS GENMASK(13, 8)
+#define CPUCFG6_PMBITS_SHIFT 8
+#define CPUCFG6_UPM BIT(14)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* perf event 3 count value */
+#define CSR_PERFCTRL_PLV0 BIT(16)
+#define CSR_PERFCTRL_PLV1 BIT(17)
+#define CSR_PERFCTRL_PLV2 BIT(18)
+#define CSR_PERFCTRL_PLV3 BIT(19)
+#define CSR_PERFCTRL_PMIE BIT(20)
+#define PMU_ENVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
+/* Hardware event codes (from LoongArch perf_event.c */
+#define LOONGARCH_PMU_EVENT_CYCLES 0x00 /* CPU cycles */
+#define LOONGARCH_PMU_EVENT_INSTR_RETIRED 0x01 /* Instructions retired */
+#define PERF_COUNT_HW_BRANCH_INSTRUCTIONS 0x02 /* Branch instructions */
+#define PERF_COUNT_HW_BRANCH_MISSES 0x03 /* Branch misses */
+
+#define NUM_LOOPS 1000
+#define EXPECTED_INSTR_MIN (NUM_LOOPS + 10) /* Loop + overhead */
+#define EXPECTED_CYCLES_MIN NUM_LOOPS /* At least 1 cycle per iteration */
+#define UPPER_BOUND (10 * NUM_LOOPS)
+
+#define PMU_OVERFLOW (1ULL << 63)
+
+static inline void pmu_irq_enable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val |= ECFGF_PMU;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void pmu_irq_disable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val &= ~ECFGF_PMU;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/loongarch/processor.h b/tools/testing/selftests/kvm/include/loongarch/processor.h
new file mode 100644
index 000000000000..93dc1fbd2e79
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/processor.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef SELFTEST_KVM_PROCESSOR_H
+#define SELFTEST_KVM_PROCESSOR_H
+
+#ifndef __ASSEMBLER__
+#include "ucall_common.h"
+
+#else
+/* general registers */
+#define zero $r0
+#define ra $r1
+#define tp $r2
+#define sp $r3
+#define a0 $r4
+#define a1 $r5
+#define a2 $r6
+#define a3 $r7
+#define a4 $r8
+#define a5 $r9
+#define a6 $r10
+#define a7 $r11
+#define t0 $r12
+#define t1 $r13
+#define t2 $r14
+#define t3 $r15
+#define t4 $r16
+#define t5 $r17
+#define t6 $r18
+#define t7 $r19
+#define t8 $r20
+#define u0 $r21
+#define fp $r22
+#define s0 $r23
+#define s1 $r24
+#define s2 $r25
+#define s3 $r26
+#define s4 $r27
+#define s5 $r28
+#define s6 $r29
+#define s7 $r30
+#define s8 $r31
+#endif
+
+/*
+ * LoongArch page table entry definition
+ * Original header file arch/loongarch/include/asm/loongarch.h
+ */
+#define _PAGE_VALID_SHIFT 0
+#define _PAGE_DIRTY_SHIFT 1
+#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
+#define PLV_KERN 0
+#define PLV_USER 3
+#define PLV_MASK 0x3
+#define _CACHE_SHIFT 4 /* 4~5, two bits */
+#define _PAGE_PRESENT_SHIFT 7
+#define _PAGE_WRITE_SHIFT 8
+
+#define _PAGE_VALID BIT_ULL(_PAGE_VALID_SHIFT)
+#define _PAGE_PRESENT BIT_ULL(_PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE BIT_ULL(_PAGE_WRITE_SHIFT)
+#define _PAGE_DIRTY BIT_ULL(_PAGE_DIRTY_SHIFT)
+#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT)
+#define __READABLE (_PAGE_VALID)
+#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
+/* Coherent Cached */
+#define _CACHE_CC BIT_ULL(_CACHE_SHIFT)
+#define PS_4K 0x0000000c
+#define PS_16K 0x0000000e
+#define PS_64K 0x00000010
+#define PS_DEFAULT_SIZE PS_16K
+
+/* LoongArch Basic CSR registers */
+#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */
+#define CSR_CRMD_PG_SHIFT 4
+#define CSR_CRMD_PG BIT_ULL(CSR_CRMD_PG_SHIFT)
+#define CSR_CRMD_IE_SHIFT 2
+#define CSR_CRMD_IE BIT_ULL(CSR_CRMD_IE_SHIFT)
+#define CSR_CRMD_PLV_SHIFT 0
+#define CSR_CRMD_PLV_WIDTH 2
+#define CSR_CRMD_PLV (0x3UL << CSR_CRMD_PLV_SHIFT)
+#define PLV_MASK 0x3
+#define LOONGARCH_CSR_PRMD 0x1
+#define LOONGARCH_CSR_EUEN 0x2
+#define LOONGARCH_CSR_ECFG 0x4
+#define ECFGB_PMU 10
+#define ECFGF_PMU (BIT_ULL(ECFGB_PMU))
+#define ECFGB_TIMER 11
+#define ECFGF_TIMER (BIT_ULL(ECFGB_TIMER))
+#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
+#define CSR_ESTAT_EXC_SHIFT 16
+#define CSR_ESTAT_EXC_WIDTH 6
+#define CSR_ESTAT_EXC (0x3f << CSR_ESTAT_EXC_SHIFT)
+#define EXCCODE_INT 0 /* Interrupt */
+#define INT_PMI 10 /* PMU interrupt */
+#define INT_TI 11 /* Timer interrupt*/
+#define LOONGARCH_CSR_ERA 0x6 /* ERA */
+#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
+#define LOONGARCH_CSR_EENTRY 0xc
+#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize */
+#define CSR_TLBIDX_PS_SHIFT 24
+#define CSR_TLBIDX_PS_WIDTH 6
+#define CSR_TLBIDX_PS (0x3fUL << CSR_TLBIDX_PS_SHIFT)
+#define CSR_TLBIDX_SIZEM 0x3f000000
+#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT
+#define LOONGARCH_CSR_ASID 0x18 /* ASID */
+#define LOONGARCH_CSR_PGDL 0x19
+#define LOONGARCH_CSR_PGDH 0x1a
+/* Page table base */
+#define LOONGARCH_CSR_PGD 0x1b
+#define LOONGARCH_CSR_PWCTL0 0x1c
+#define LOONGARCH_CSR_PWCTL1 0x1d
+#define LOONGARCH_CSR_STLBPGSIZE 0x1e
+#define LOONGARCH_CSR_CPUID 0x20
+#define LOONGARCH_CSR_KS0 0x30
+#define LOONGARCH_CSR_KS1 0x31
+#define LOONGARCH_CSR_TMID 0x40
+#define LOONGARCH_CSR_TCFG 0x41
+#define CSR_TCFG_VAL (BIT_ULL(48) - BIT_ULL(2))
+#define CSR_TCFG_PERIOD_SHIFT 1
+#define CSR_TCFG_PERIOD (0x1UL << CSR_TCFG_PERIOD_SHIFT)
+#define CSR_TCFG_EN (0x1UL)
+#define LOONGARCH_CSR_TVAL 0x42
+#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */
+#define CSR_TINTCLR_TI_SHIFT 0
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
+/* TLB refill exception entry */
+#define LOONGARCH_CSR_TLBRENTRY 0x88
+#define LOONGARCH_CSR_TLBRSAVE 0x8b
+#define LOONGARCH_CSR_TLBREHI 0x8e
+#define CSR_TLBREHI_PS_SHIFT 0
+#define CSR_TLBREHI_PS (0x3fUL << CSR_TLBREHI_PS_SHIFT)
+
+#define read_cpucfg(reg) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ "cpucfg %0, %1\n\t" \
+ : "=r" (__v) \
+ : "r" (reg) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ "csrrd %[val], %[reg]\n\t" \
+ : [val] "=r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(v, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__ ( \
+ "csrwr %[val], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+#define EXREGS_GPRS (32)
+
+#ifndef __ASSEMBLER__
+void handle_tlb_refill(void);
+void handle_exception(void);
+
+struct ex_regs {
+ unsigned long regs[EXREGS_GPRS];
+ unsigned long pc;
+ unsigned long estat;
+ unsigned long badv;
+ unsigned long prmd;
+};
+
+#define PC_OFFSET_EXREGS offsetof(struct ex_regs, pc)
+#define ESTAT_OFFSET_EXREGS offsetof(struct ex_regs, estat)
+#define BADV_OFFSET_EXREGS offsetof(struct ex_regs, badv)
+#define PRMD_OFFSET_EXREGS offsetof(struct ex_regs, prmd)
+#define EXREGS_SIZE sizeof(struct ex_regs)
+
+#define VECTOR_NUM 64
+
+typedef void(*handler_fn)(struct ex_regs *);
+
+struct handlers {
+ handler_fn exception_handlers[VECTOR_NUM];
+};
+
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu);
+void vm_init_descriptor_tables(struct kvm_vm *vm);
+void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler);
+
+static inline void cpu_relax(void)
+{
+ asm volatile("nop" ::: "memory");
+}
+
+static inline void local_irq_enable(void)
+{
+ unsigned int flags = CSR_CRMD_IE;
+ register unsigned int mask asm("$t0") = CSR_CRMD_IE;
+
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline void local_irq_disable(void)
+{
+ unsigned int flags = 0;
+ register unsigned int mask asm("$t0") = CSR_CRMD_IE;
+
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+#else
+#define PC_OFFSET_EXREGS ((EXREGS_GPRS + 0) * 8)
+#define ESTAT_OFFSET_EXREGS ((EXREGS_GPRS + 1) * 8)
+#define BADV_OFFSET_EXREGS ((EXREGS_GPRS + 2) * 8)
+#define PRMD_OFFSET_EXREGS ((EXREGS_GPRS + 3) * 8)
+#define EXREGS_SIZE ((EXREGS_GPRS + 4) * 8)
+#endif
+
+#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/ucall.h b/tools/testing/selftests/kvm/include/loongarch/ucall.h
new file mode 100644
index 000000000000..2210d3d94c40
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/ucall.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UCALL_H
+#define SELFTEST_KVM_UCALL_H
+
+#include "kvm_util.h"
+
+#define UCALL_EXIT_REASON KVM_EXIT_MMIO
+
+/*
+ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
+ * VM), it must not be accessed from host code.
+ */
+extern gva_t *ucall_exit_mmio_addr;
+
+static inline void ucall_arch_do_ucall(gva_t uc)
+{
+ WRITE_ONCE(*ucall_exit_mmio_addr, uc);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/lru_gen_util.h b/tools/testing/selftests/kvm/include/lru_gen_util.h
new file mode 100644
index 000000000000..d32ff5d8ffd0
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/lru_gen_util.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tools for integrating with lru_gen, like parsing the lru_gen debugfs output.
+ *
+ * Copyright (C) 2025, Google LLC.
+ */
+#ifndef SELFTEST_KVM_LRU_GEN_UTIL_H
+#define SELFTEST_KVM_LRU_GEN_UTIL_H
+
+#include <inttypes.h>
+#include <limits.h>
+#include <stdlib.h>
+
+#include "test_util.h"
+
+#define MAX_NR_GENS 16 /* MAX_NR_GENS in include/linux/mmzone.h */
+#define MAX_NR_NODES 4 /* Maximum number of nodes supported by the test */
+
+#define LRU_GEN_DEBUGFS "/sys/kernel/debug/lru_gen"
+#define LRU_GEN_ENABLED_PATH "/sys/kernel/mm/lru_gen/enabled"
+#define LRU_GEN_ENABLED 1
+#define LRU_GEN_MM_WALK 2
+
+struct generation_stats {
+ int gen;
+ long age_ms;
+ long nr_anon;
+ long nr_file;
+};
+
+struct node_stats {
+ int node;
+ int nr_gens; /* Number of populated gens entries. */
+ struct generation_stats gens[MAX_NR_GENS];
+};
+
+struct memcg_stats {
+ unsigned long memcg_id;
+ int nr_nodes; /* Number of populated nodes entries. */
+ struct node_stats nodes[MAX_NR_NODES];
+};
+
+void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg);
+long lru_gen_sum_memcg_stats(const struct memcg_stats *stats);
+long lru_gen_sum_memcg_stats_for_gen(int gen, const struct memcg_stats *stats);
+void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg);
+int lru_gen_find_generation(const struct memcg_stats *stats,
+ unsigned long total_pages);
+bool lru_gen_usable(void);
+
+#endif /* SELFTEST_KVM_LRU_GEN_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
index 9071eb6dea60..0d1d6230cc05 100644
--- a/tools/testing/selftests/kvm/include/memstress.h
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -20,9 +20,9 @@
#define MEMSTRESS_MEM_SLOT_INDEX 1
struct memstress_vcpu_args {
- uint64_t gpa;
- uint64_t gva;
- uint64_t pages;
+ gpa_t gpa;
+ gva_t gva;
+ u64 pages;
/* Only used by the host userspace part of the vCPU thread */
struct kvm_vcpu *vcpu;
@@ -32,11 +32,11 @@ struct memstress_vcpu_args {
struct memstress_args {
struct kvm_vm *vm;
/* The starting address and size of the guest test region. */
- uint64_t gpa;
- uint64_t size;
- uint64_t guest_page_size;
- uint32_t random_seed;
- uint32_t write_percent;
+ gpa_t gpa;
+ u64 size;
+ u64 guest_page_size;
+ u32 random_seed;
+ u32 write_percent;
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested;
@@ -45,7 +45,7 @@ struct memstress_args {
/* True if all vCPUs are pinned to pCPUs */
bool pin_vcpus;
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
- uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
+ u32 vcpu_to_pcpu[KVM_MAX_VCPUS];
/* Test is done, stop running vCPUs. */
bool stop_vcpus;
@@ -56,27 +56,27 @@ struct memstress_args {
extern struct memstress_args memstress_args;
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
- uint64_t vcpu_memory_bytes, int slots,
+ u64 vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
void memstress_destroy_vm(struct kvm_vm *vm);
-void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
+void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent);
void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
void memstress_join_vcpu_threads(int vcpus);
-void memstress_guest_code(uint32_t vcpu_id);
+void memstress_guest_code(u32 vcpu_id);
-uint64_t memstress_nested_pages(int nr_vcpus);
+u64 memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot);
-unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
+ int slots, u64 pages_per_slot);
+unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot);
void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
#endif /* SELFTEST_KVM_MEMSTRESS_H */
diff --git a/tools/testing/selftests/kvm/include/numaif.h b/tools/testing/selftests/kvm/include/numaif.h
index b020547403fd..29572a6d789c 100644
--- a/tools/testing/selftests/kvm/include/numaif.h
+++ b/tools/testing/selftests/kvm/include/numaif.h
@@ -1,55 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tools/testing/selftests/kvm/include/numaif.h
- *
- * Copyright (C) 2020, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- * Header file that provides access to NUMA API functions not explicitly
- * exported to user space.
- */
+/* Copyright (C) 2020, Google LLC. */
#ifndef SELFTEST_KVM_NUMAIF_H
#define SELFTEST_KVM_NUMAIF_H
-#define __NR_get_mempolicy 239
-#define __NR_migrate_pages 256
+#include <dirent.h>
-/* System calls */
-long get_mempolicy(int *policy, const unsigned long *nmask,
- unsigned long maxnode, void *addr, int flags)
+#include <linux/mempolicy.h>
+
+#include "kvm_syscalls.h"
+
+KVM_SYSCALL_DEFINE(get_mempolicy, 5, int *, policy, const unsigned long *, nmask,
+ unsigned long, maxnode, void *, addr, int, flags);
+
+KVM_SYSCALL_DEFINE(set_mempolicy, 3, int, mode, const unsigned long *, nmask,
+ unsigned long, maxnode);
+
+KVM_SYSCALL_DEFINE(set_mempolicy_home_node, 4, unsigned long, start,
+ unsigned long, len, unsigned long, home_node,
+ unsigned long, flags);
+
+KVM_SYSCALL_DEFINE(migrate_pages, 4, int, pid, unsigned long, maxnode,
+ const unsigned long *, frommask, const unsigned long *, tomask);
+
+KVM_SYSCALL_DEFINE(move_pages, 6, int, pid, unsigned long, count, void *, pages,
+ const int *, nodes, int *, status, int, flags);
+
+KVM_SYSCALL_DEFINE(mbind, 6, void *, addr, unsigned long, size, int, mode,
+ const unsigned long *, nodemask, unsigned long, maxnode,
+ unsigned int, flags);
+
+static inline int get_max_numa_node(void)
{
- return syscall(__NR_get_mempolicy, policy, nmask,
- maxnode, addr, flags);
+ struct dirent *de;
+ int max_node = 0;
+ DIR *d;
+
+ /*
+ * Assume there's a single node if the kernel doesn't support NUMA,
+ * or if no nodes are found.
+ */
+ d = opendir("/sys/devices/system/node");
+ if (!d)
+ return 0;
+
+ while ((de = readdir(d)) != NULL) {
+ int node_id;
+ char *endptr;
+
+ if (strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ node_id = strtol(de->d_name + 4, &endptr, 10);
+ if (*endptr != '\0')
+ continue;
+
+ if (node_id > max_node)
+ max_node = node_id;
+ }
+ closedir(d);
+
+ return max_node;
}
-long migrate_pages(int pid, unsigned long maxnode,
- const unsigned long *frommask,
- const unsigned long *tomask)
+static bool is_numa_available(void)
{
- return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask);
+ /*
+ * Probe for NUMA by doing a dummy get_mempolicy(). If the syscall
+ * fails with ENOSYS, then the kernel was built without NUMA support.
+ * if the syscall fails with EPERM, then the process/user lacks the
+ * necessary capabilities (CAP_SYS_NICE).
+ */
+ return !get_mempolicy(NULL, NULL, 0, NULL, 0) ||
+ (errno != ENOSYS && errno != EPERM);
}
-/* Policies */
-#define MPOL_DEFAULT 0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND 2
-#define MPOL_INTERLEAVE 3
-
-#define MPOL_MAX MPOL_INTERLEAVE
-
-/* Flags for get_mem_policy */
-#define MPOL_F_NODE (1<<0) /* return next il node or node of address */
- /* Warning: MPOL_F_NODE is unsupported and
- * subject to change. Don't use.
- */
-#define MPOL_F_ADDR (1<<1) /* look up vma using address */
-#define MPOL_F_MEMS_ALLOWED (1<<2) /* query nodes allowed in cpuset */
-
-/* Flags for mbind */
-#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
-#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
-#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
+static inline bool is_multi_numa_node_system(void)
+{
+ return is_numa_available() && get_max_numa_node() >= 1;
+}
#endif /* SELFTEST_KVM_NUMAIF_H */
diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h
index 225d81dad064..28ffc014da2a 100644
--- a/tools/testing/selftests/kvm/include/riscv/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h
@@ -14,25 +14,25 @@
static unsigned long timer_freq;
#define msec_to_cycles(msec) \
- ((timer_freq) * (uint64_t)(msec) / 1000)
+ ((timer_freq) * (u64)(msec) / 1000)
#define usec_to_cycles(usec) \
- ((timer_freq) * (uint64_t)(usec) / 1000000)
+ ((timer_freq) * (u64)(usec) / 1000000)
#define cycles_to_usec(cycles) \
- ((uint64_t)(cycles) * 1000000 / (timer_freq))
+ ((u64)(cycles) * 1000000 / (timer_freq))
-static inline uint64_t timer_get_cycles(void)
+static inline u64 timer_get_cycles(void)
{
return csr_read(CSR_TIME);
}
-static inline void timer_set_cmp(uint64_t cval)
+static inline void timer_set_cmp(u64 cval)
{
csr_write(CSR_STIMECMP, cval);
}
-static inline uint64_t timer_get_cmp(void)
+static inline u64 timer_get_cmp(void)
{
return csr_read(CSR_STIMECMP);
}
@@ -47,17 +47,17 @@ static inline void timer_irq_disable(void)
csr_clear(CSR_SIE, IE_TIE);
}
-static inline void timer_set_next_cmp_ms(uint32_t msec)
+static inline void timer_set_next_cmp_ms(u32 msec)
{
- uint64_t now_ct = timer_get_cycles();
- uint64_t next_ct = now_ct + msec_to_cycles(msec);
+ u64 now_ct = timer_get_cycles();
+ u64 next_ct = now_ct + msec_to_cycles(msec);
timer_set_cmp(next_ct);
}
-static inline void __delay(uint64_t cycles)
+static inline void __delay(u64 cycles)
{
- uint64_t start = timer_get_cycles();
+ u64 start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
diff --git a/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h b/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h
index e43a57d99b56..d5095900e442 100644
--- a/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h
@@ -2,6 +2,7 @@
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H
+struct kvm_mmu_arch {};
struct kvm_vm_arch {};
#endif // SELFTEST_KVM_UTIL_ARCH_H
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index 5f389166338c..e3acf2ae9881 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -9,10 +9,23 @@
#include <linux/stringify.h>
#include <asm/csr.h>
+#include <asm/vdso/processor.h>
#include "kvm_util.h"
-static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
- uint64_t idx, uint64_t size)
+#define INSN_OPCODE_MASK 0x007c
+#define INSN_OPCODE_SHIFT 2
+#define INSN_OPCODE_SYSTEM 28
+
+#define INSN_MASK_FUNCT3 0x7000
+#define INSN_SHIFT_FUNCT3 12
+
+#define INSN_CSR_MASK 0xfff00000
+#define INSN_CSR_SHIFT 20
+
+#define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3)
+#define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT)
+
+static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size)
{
return KVM_REG_RISCV | type | subtype | idx | size;
}
@@ -48,19 +61,20 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
KVM_REG_RISCV_SBI_SINGLE, \
idx, KVM_REG_SIZE_ULONG)
-bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext);
+bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext);
-static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext)
+static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext)
{
return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext));
}
-static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext)
+static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext)
{
return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext));
}
-struct ex_regs {
+struct pt_regs {
+ unsigned long epc;
unsigned long ra;
unsigned long sp;
unsigned long gp;
@@ -92,16 +106,19 @@ struct ex_regs {
unsigned long t4;
unsigned long t5;
unsigned long t6;
- unsigned long epc;
+ /* Supervisor/Machine CSRs */
unsigned long status;
+ unsigned long badaddr;
unsigned long cause;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
};
#define NR_VECTORS 2
#define NR_EXCEPTIONS 32
#define EC_MASK (NR_EXCEPTIONS - 1)
-typedef void(*exception_handler_fn)(struct ex_regs *);
+typedef void(*exception_handler_fn)(struct pt_regs *);
void vm_init_vector_tables(struct kvm_vm *vm);
void vcpu_init_vector_tables(struct kvm_vcpu *vcpu);
@@ -174,4 +191,6 @@ static inline void local_irq_disable(void)
csr_clear(CSR_SSTATUS, SR_SIE);
}
+unsigned long riscv64_get_satp_mode(void);
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/riscv/sbi.h b/tools/testing/selftests/kvm/include/riscv/sbi.h
index 046b432ae896..16f1815ac48f 100644
--- a/tools/testing/selftests/kvm/include/riscv/sbi.h
+++ b/tools/testing/selftests/kvm/include/riscv/sbi.h
@@ -97,6 +97,43 @@ enum sbi_pmu_hw_generic_events_t {
SBI_PMU_HW_GENERAL_MAX,
};
+enum sbi_pmu_fw_generic_events_t {
+ SBI_PMU_FW_MISALIGNED_LOAD = 0,
+ SBI_PMU_FW_MISALIGNED_STORE = 1,
+ SBI_PMU_FW_ACCESS_LOAD = 2,
+ SBI_PMU_FW_ACCESS_STORE = 3,
+ SBI_PMU_FW_ILLEGAL_INSN = 4,
+ SBI_PMU_FW_SET_TIMER = 5,
+ SBI_PMU_FW_IPI_SENT = 6,
+ SBI_PMU_FW_IPI_RCVD = 7,
+ SBI_PMU_FW_FENCE_I_SENT = 8,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
+ SBI_PMU_FW_SFENCE_VMA_SENT = 10,
+ SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
+ SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
+ SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
+
+ SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
+ SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
+
+ SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
+ SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
+ SBI_PMU_FW_MAX,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_event_type {
+ SBI_PMU_EVENT_TYPE_HW = 0x0,
+ SBI_PMU_EVENT_TYPE_CACHE = 0x1,
+ SBI_PMU_EVENT_TYPE_RAW = 0x2,
+ SBI_PMU_EVENT_TYPE_RAW_V2 = 0x3,
+ SBI_PMU_EVENT_TYPE_FW = 0xf,
+};
+
/* SBI PMU counter types */
enum sbi_pmu_ctr_type {
SBI_PMU_CTR_TYPE_HW = 0x0,
diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h
index a695ae36f3e0..2de7c6a36096 100644
--- a/tools/testing/selftests/kvm/include/riscv/ucall.h
+++ b/tools/testing/selftests/kvm/include/riscv/ucall.h
@@ -7,11 +7,11 @@
#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
KVM_RISCV_SELFTESTS_SBI_UCALL,
diff --git a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
index b0ed71302722..6deaf18fec22 100644
--- a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
+++ b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
@@ -8,6 +8,6 @@
#ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER
#define SELFTEST_KVM_DIAG318_TEST_HANDLER
-uint64_t get_diag318_info(void);
+u64 get_diag318_info(void);
#endif
diff --git a/tools/testing/selftests/kvm/include/s390/facility.h b/tools/testing/selftests/kvm/include/s390/facility.h
index 00a1ced6538b..41a265742666 100644
--- a/tools/testing/selftests/kvm/include/s390/facility.h
+++ b/tools/testing/selftests/kvm/include/s390/facility.h
@@ -16,7 +16,7 @@
/* alt_stfle_fac_list[16] + stfle_fac_list[16] */
#define NB_STFL_DOUBLEWORDS 32
-extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
+extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
extern bool stfle_flag;
static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
@@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-static inline void stfle(uint64_t *fac, unsigned int nb_doublewords)
+static inline void stfle(u64 *fac, unsigned int nb_doublewords)
{
register unsigned long r0 asm("0") = nb_doublewords - 1;
diff --git a/tools/testing/selftests/kvm/include/s390/kvm_util_arch.h b/tools/testing/selftests/kvm/include/s390/kvm_util_arch.h
index e43a57d99b56..d5095900e442 100644
--- a/tools/testing/selftests/kvm/include/s390/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/s390/kvm_util_arch.h
@@ -2,6 +2,7 @@
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H
+struct kvm_mmu_arch {};
struct kvm_vm_arch {};
#endif // SELFTEST_KVM_UTIL_ARCH_H
diff --git a/tools/testing/selftests/kvm/include/s390/ucall.h b/tools/testing/selftests/kvm/include/s390/ucall.h
index 8035a872a351..3907d629304f 100644
--- a/tools/testing/selftests/kvm/include/s390/ucall.h
+++ b/tools/testing/selftests/kvm/include/s390/ucall.h
@@ -6,11 +6,11 @@
#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index bc760761e1a3..e027e5790946 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -6,7 +6,7 @@
*
* Header file that describes API to the sparsebit library.
* This library provides a memory efficient means of storing
- * the settings of bits indexed via a uint64_t. Memory usage
+ * the settings of bits indexed via a u64. Memory usage
* is reasonable, significantly less than (2^64 / 8) bytes, as
* long as bits that are mostly set or mostly cleared are close
* to each other. This library is efficient in memory usage
@@ -25,8 +25,8 @@ extern "C" {
#endif
struct sparsebit;
-typedef uint64_t sparsebit_idx_t;
-typedef uint64_t sparsebit_num_t;
+typedef u64 sparsebit_idx_t;
+typedef u64 sparsebit_num_t;
struct sparsebit *sparsebit_alloc(void);
void sparsebit_free(struct sparsebit **sbitp);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 3e473058849f..d9b433b834f1 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -8,6 +8,8 @@
#ifndef SELFTEST_KVM_TEST_UTIL_H
#define SELFTEST_KVM_TEST_UTIL_H
+#include <setjmp.h>
+#include <signal.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -20,9 +22,11 @@
#include <sys/mman.h>
#include "kselftest.h"
+#include <linux/types.h>
+
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
-static inline int _no_printf(const char *format, ...) { return 0; }
+static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; }
#ifdef DEBUG
#define pr_debug(...) printf(__VA_ARGS__)
@@ -78,27 +82,44 @@ do { \
__builtin_unreachable(); \
} while (0)
+extern sigjmp_buf expect_sigbus_jmpbuf;
+void expect_sigbus_handler(int signum);
+
+#define TEST_EXPECT_SIGBUS(action) \
+do { \
+ struct sigaction sa_old, sa_new = { \
+ .sa_handler = expect_sigbus_handler, \
+ }; \
+ \
+ sigaction(SIGBUS, &sa_new, &sa_old); \
+ if (sigsetjmp(expect_sigbus_jmpbuf, 1) == 0) { \
+ action; \
+ TEST_FAIL("'%s' should have triggered SIGBUS", #action); \
+ } \
+ sigaction(SIGBUS, &sa_old, NULL); \
+} while (0)
+
size_t parse_size(const char *size);
-int64_t timespec_to_ns(struct timespec ts);
-struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
+s64 timespec_to_ns(struct timespec ts);
+struct timespec timespec_add_ns(struct timespec ts, s64 ns);
struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
struct timespec timespec_elapsed(struct timespec start);
struct timespec timespec_div(struct timespec ts, int divisor);
struct guest_random_state {
- uint32_t seed;
+ u32 seed;
};
-extern uint32_t guest_random_seed;
+extern u32 guest_random_seed;
extern struct guest_random_state guest_rng;
-struct guest_random_state new_guest_random_state(uint32_t seed);
-uint32_t guest_random_u32(struct guest_random_state *state);
+struct guest_random_state new_guest_random_state(u32 seed);
+u32 guest_random_u32(struct guest_random_state *state);
static inline bool __guest_random_bool(struct guest_random_state *state,
- uint8_t percent)
+ u8 percent)
{
return (guest_random_u32(state) % 100) < percent;
}
@@ -108,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state)
return __guest_random_bool(state, 50);
}
-static inline uint64_t guest_random_u64(struct guest_random_state *state)
+static inline u64 guest_random_u64(struct guest_random_state *state)
{
- return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state);
+ return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state);
}
enum vm_mem_backing_src_type {
@@ -139,7 +160,7 @@ enum vm_mem_backing_src_type {
struct vm_mem_backing_src_alias {
const char *name;
- uint32_t flag;
+ u32 flag;
};
#define MIN_RUN_DELAY_NS 200000UL
@@ -147,12 +168,13 @@ struct vm_mem_backing_src_alias {
bool thp_configured(void);
size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
-const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
-size_t get_backing_src_pagesz(uint32_t i);
-bool is_backing_src_hugetlb(uint32_t i);
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i);
+size_t get_backing_src_pagesz(u32 i);
+bool is_backing_src_hugetlb(u32 i);
void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
long get_run_delay(void);
+bool is_numa_balancing_enabled(void);
/*
* Whether or not the given source type is shared memory (as opposed to
@@ -169,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t)
}
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
-static inline uint64_t align_up(uint64_t x, uint64_t size)
+static inline u64 align_up(u64 x, u64 size)
{
- uint64_t mask = size - 1;
+ u64 mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return ((x + mask) & ~mask);
}
-static inline uint64_t align_down(uint64_t x, uint64_t size)
+static inline u64 align_down(u64 x, u64 size)
{
- uint64_t x_aligned_up = align_up(x, size);
+ u64 x_aligned_up = align_up(x, size);
if (x == x_aligned_up)
return x;
@@ -195,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size)
int atoi_paranoid(const char *num_str);
-static inline uint32_t atoi_positive(const char *name, const char *num_str)
+static inline u32 atoi_positive(const char *name, const char *num_str)
{
int num = atoi_paranoid(num_str);
@@ -203,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str)
return num;
}
-static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
+static inline u32 atoi_non_negative(const char *name, const char *num_str)
{
int num = atoi_paranoid(num_str);
diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h
index 9b6edaafe6d4..b7d5d2c84701 100644
--- a/tools/testing/selftests/kvm/include/timer_test.h
+++ b/tools/testing/selftests/kvm/include/timer_test.h
@@ -18,21 +18,21 @@
/* Timer test cmdline parameters */
struct test_args {
- uint32_t nr_vcpus;
- uint32_t nr_iter;
- uint32_t timer_period_ms;
- uint32_t migration_freq_ms;
- uint32_t timer_err_margin_us;
+ u32 nr_vcpus;
+ u32 nr_iter;
+ u32 timer_period_ms;
+ u32 migration_freq_ms;
+ u32 timer_err_margin_us;
/* Members of struct kvm_arm_counter_offset */
- uint64_t counter_offset;
- uint64_t reserved;
+ u64 counter_offset;
+ u64 reserved;
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
- uint32_t nr_iter;
+ u32 nr_iter;
int guest_stage;
- uint64_t xcnt;
+ u64 xcnt;
};
extern struct test_args test_args;
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index d9d6581b8d4f..cbdcb0a50c4f 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -21,26 +21,26 @@ enum {
#define UCALL_BUFFER_LEN 1024
struct ucall {
- uint64_t cmd;
- uint64_t args[UCALL_MAX_ARGS];
+ u64 cmd;
+ u64 args[UCALL_MAX_ARGS];
char buffer[UCALL_BUFFER_LEN];
/* Host virtual address of this struct. */
struct ucall *hva;
};
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
-void ucall_arch_do_ucall(vm_vaddr_t uc);
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa);
+void ucall_arch_do_ucall(gva_t uc);
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
-void ucall(uint64_t cmd, int nargs, ...);
-__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
-__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
+void ucall(u64 cmd, int nargs, ...);
+__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...);
+__printf(5, 6) void ucall_assert(u64 cmd, const char *exp,
const char *file, unsigned int line,
const char *fmt, ...);
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
-void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
-int ucall_nr_pages_required(uint64_t page_size);
+u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
+void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa);
+int ucall_nr_pages_required(u64 page_size);
/*
* Perform userspace call without any associated data. This bare call avoids
@@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size);
* the full ucall() are problematic and/or unwanted. Note, this will come out
* as UCALL_NONE on the backend.
*/
-#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL)
+#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL)
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h
index 60f7f9d435dc..0bc1dc16600e 100644
--- a/tools/testing/selftests/kvm/include/userfaultfd_util.h
+++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h
@@ -25,7 +25,7 @@ struct uffd_reader_args {
struct uffd_desc {
int uffd;
- uint64_t num_readers;
+ u64 num_readers;
/* Holds the write ends of the pipes for killing the readers. */
int *pipefds;
pthread_t *readers;
@@ -33,8 +33,8 @@ struct uffd_desc {
};
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
- void *hva, uint64_t len,
- uint64_t num_readers,
+ void *hva, u64 len,
+ u64 num_readers,
uffd_handler_t handler);
void uffd_stop_demand_paging(struct uffd_desc *uffd);
diff --git a/tools/testing/selftests/kvm/include/x86/apic.h b/tools/testing/selftests/kvm/include/x86/apic.h
index 80fe9f69b38d..31887bdc3d6c 100644
--- a/tools/testing/selftests/kvm/include/x86/apic.h
+++ b/tools/testing/selftests/kvm/include/x86/apic.h
@@ -28,10 +28,13 @@
#define GET_APIC_ID_FIELD(x) (((x) >> 24) & 0xFF)
#define APIC_TASKPRI 0x80
#define APIC_PROCPRI 0xA0
+#define GET_APIC_PRI(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SET_APIC_PRI(x, y) (((x) & ~GENMASK(7, 4)) | (y << 4))
#define APIC_EOI 0xB0
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
+#define APIC_ISR 0x100
#define APIC_IRR 0x200
#define APIC_ICR 0x300
#define APIC_LVTCMCI 0x2f0
@@ -67,47 +70,51 @@
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
+#define APIC_SELF_IPI 0x3F0
+
+#define APIC_VECTOR_TO_BIT_NUMBER(v) ((unsigned int)(v) % 32)
+#define APIC_VECTOR_TO_REG_OFFSET(v) ((unsigned int)(v) / 32 * 0x10)
void apic_disable(void);
void xapic_enable(void);
void x2apic_enable(void);
-static inline uint32_t get_bsp_flag(void)
+static inline u32 get_bsp_flag(void)
{
return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
}
-static inline uint32_t xapic_read_reg(unsigned int reg)
+static inline u32 xapic_read_reg(unsigned int reg)
{
- return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2];
+ return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2];
}
-static inline void xapic_write_reg(unsigned int reg, uint32_t val)
+static inline void xapic_write_reg(unsigned int reg, u32 val)
{
- ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val;
+ ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val;
}
-static inline uint64_t x2apic_read_reg(unsigned int reg)
+static inline u64 x2apic_read_reg(unsigned int reg)
{
return rdmsr(APIC_BASE_MSR + (reg >> 4));
}
-static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value)
+static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value)
{
return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value);
}
-static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
+static inline void x2apic_write_reg(unsigned int reg, u64 value)
{
- uint8_t fault = x2apic_write_reg_safe(reg, value);
+ u8 fault = x2apic_write_reg_safe(reg, value);
__GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n",
fault, APIC_BASE_MSR + (reg >> 4), value);
}
-static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value)
+static inline void x2apic_write_reg_fault(unsigned int reg, u64 value)
{
- uint8_t fault = x2apic_write_reg_safe(reg, value);
+ u8 fault = x2apic_write_reg_safe(reg, value);
__GUEST_ASSERT(fault == GP_VECTOR,
"Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
diff --git a/tools/testing/selftests/kvm/include/x86/evmcs.h b/tools/testing/selftests/kvm/include/x86/evmcs.h
index 5a74bb30e2f8..be79bda024bf 100644
--- a/tools/testing/selftests/kvm/include/x86/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86/evmcs.h
@@ -10,9 +10,9 @@
#include "hyperv.h"
#include "vmx.h"
-#define u16 uint16_t
-#define u32 uint32_t
-#define u64 uint64_t
+#define u16 u16
+#define u32 u32
+#define u64 u64
#define EVMCS_VERSION 1
@@ -245,7 +245,7 @@ static inline void evmcs_enable(void)
enable_evmcs = true;
}
-static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
+static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs)
{
current_vp_assist->current_nested_vmcs = vmcs_pa;
current_vp_assist->enlighten_vmentry = 1;
@@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv)
return true;
}
-static inline int evmcs_vmptrst(uint64_t *value)
+static inline int evmcs_vmptrst(u64 *value)
{
*value = current_vp_assist->current_nested_vmcs &
~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
@@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value)
return 0;
}
-static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
+static inline int evmcs_vmread(u64 encoding, u64 *value)
{
switch (encoding) {
case GUEST_RIP:
@@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
return 0;
}
-static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
+static inline int evmcs_vmwrite(u64 encoding, u64 value)
{
switch (encoding) {
case GUEST_RIP:
@@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void)
"pop %%rbp;"
: [ret]"=&a"(ret)
: [host_rsp]"r"
- ((uint64_t)&current_evmcs->host_rsp),
+ ((u64)&current_evmcs->host_rsp),
[host_rip]"r"
- ((uint64_t)&current_evmcs->host_rip)
+ ((u64)&current_evmcs->host_rip)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void)
"pop %%rbp;"
: [ret]"=&a"(ret)
: [host_rsp]"r"
- ((uint64_t)&current_evmcs->host_rsp),
+ ((u64)&current_evmcs->host_rsp),
[host_rip]"r"
- ((uint64_t)&current_evmcs->host_rip)
+ ((u64)&current_evmcs->host_rip)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
diff --git a/tools/testing/selftests/kvm/include/x86/hyperv.h b/tools/testing/selftests/kvm/include/x86/hyperv.h
index f13e532be240..78003f5a22f3 100644
--- a/tools/testing/selftests/kvm/include/x86/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86/hyperv.h
@@ -254,12 +254,12 @@
* Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status'
* is set to the hypercall status (if no exception occurred).
*/
-static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address,
- uint64_t *hv_status)
+static inline u8 __hyperv_hypercall(u64 control, gva_t input_address,
+ gva_t output_address,
+ u64 *hv_status)
{
- uint64_t error_code;
- uint8_t vector;
+ u64 error_code;
+ u8 vector;
/* Note both the hypercall and the "asm safe" clobber r9-r11. */
asm volatile("mov %[output_address], %%r8\n\t"
@@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
}
/* Issue a Hyper-V hypercall and assert that it succeeded. */
-static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address)
+static inline void hyperv_hypercall(u64 control, gva_t input_address,
+ gva_t output_address)
{
- uint64_t hv_status;
- uint8_t vector;
+ u64 hv_status;
+ u8 vector;
vector = __hyperv_hypercall(control, input_address, output_address, &hv_status);
@@ -327,27 +327,27 @@ struct hv_vp_assist_page {
extern struct hv_vp_assist_page *current_vp_assist;
-int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist);
+int enable_vp_assist(u64 vp_assist_pa, void *vp_assist);
struct hyperv_test_pages {
/* VP assist page */
void *vp_assist_hva;
- uint64_t vp_assist_gpa;
+ u64 vp_assist_gpa;
void *vp_assist;
/* Partition assist page */
void *partition_assist_hva;
- uint64_t partition_assist_gpa;
+ u64 partition_assist_gpa;
void *partition_assist;
/* Enlightened VMCS */
void *enlightened_vmcs_hva;
- uint64_t enlightened_vmcs_gpa;
+ u64 enlightened_vmcs_gpa;
void *enlightened_vmcs;
};
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
- vm_vaddr_t *p_hv_pages_gva);
+ gva_t *p_hv_pages_gva);
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
index 972bb1c4ab4c..c33ab6e04171 100644
--- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
@@ -10,13 +10,35 @@
extern bool is_forced_emulation_enabled;
+struct pte_masks {
+ u64 present;
+ u64 writable;
+ u64 user;
+ u64 readable;
+ u64 executable;
+ u64 accessed;
+ u64 dirty;
+ u64 huge;
+ u64 nx;
+ u64 c;
+ u64 s;
+
+ u64 always_set;
+};
+
+struct kvm_mmu_arch {
+ struct pte_masks pte_masks;
+};
+
+struct kvm_mmu;
+
struct kvm_vm_arch {
- vm_vaddr_t gdt;
- vm_vaddr_t tss;
- vm_vaddr_t idt;
+ gva_t gdt;
+ gva_t tss;
+ gva_t idt;
- uint64_t c_bit;
- uint64_t s_bit;
+ u64 c_bit;
+ u64 s_bit;
int sev_fd;
bool is_pt_protected;
};
@@ -40,7 +62,7 @@ do { \
: "+m" (mem) \
: "r" (val) : "memory"); \
} else { \
- uint64_t __old = READ_ONCE(mem); \
+ u64 __old = READ_ONCE(mem); \
\
__asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \
: [ptr] "+m" (mem), [old] "+a" (__old) \
diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h
index 3c10c4dc0ae8..98537cc8840d 100644
--- a/tools/testing/selftests/kvm/include/x86/pmu.h
+++ b/tools/testing/selftests/kvm/include/x86/pmu.h
@@ -5,7 +5,10 @@
#ifndef SELFTEST_KVM_PMU_H
#define SELFTEST_KVM_PMU_H
-#include <stdint.h>
+#include <stdbool.h>
+
+#include <linux/types.h>
+#include <linux/bits.h>
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
@@ -61,6 +64,11 @@
#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
+#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)
+#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)
+#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)
+#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)
+#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)
#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
@@ -80,6 +88,11 @@ enum intel_pmu_architectural_events {
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
+ INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
+ INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
+ INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
+ INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
+ INTEL_ARCH_LBR_INSERTS_INDEX,
NR_INTEL_ARCH_EVENTS,
};
@@ -91,7 +104,21 @@ enum amd_pmu_zen_events {
NR_AMD_ZEN_EVENTS,
};
-extern const uint64_t intel_pmu_arch_events[];
-extern const uint64_t amd_pmu_zen_events[];
+extern const u64 intel_pmu_arch_events[];
+extern const u64 amd_pmu_zen_events[];
+
+enum pmu_errata {
+ INSTRUCTIONS_RETIRED_OVERCOUNT,
+ BRANCHES_RETIRED_OVERCOUNT,
+};
+
+extern u64 pmu_errata_mask;
+
+void kvm_init_pmu_errata(void);
+
+static inline bool this_pmu_has_errata(enum pmu_errata errata)
+{
+ return pmu_errata_mask & BIT_ULL(errata);
+}
#endif /* SELFTEST_KVM_PMU_H */
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index d60da8966772..77f576ee7789 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -21,7 +21,9 @@
extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
-extern uint64_t guest_tsc_khz;
+extern bool host_cpu_is_hygon;
+extern bool host_cpu_is_amd_compatible;
+extern u64 guest_tsc_khz;
#ifndef MAX_NR_CPUID_ENTRIES
#define MAX_NR_CPUID_ENTRIES 100
@@ -34,6 +36,8 @@ extern uint64_t guest_tsc_khz;
#define NMI_VECTOR 0x02
+const char *ex_str(int vector);
+
#define X86_EFLAGS_FIXED (1u << 1)
#define X86_CR4_VME (1ul << 0)
@@ -183,6 +187,9 @@ struct kvm_x86_cpu_feature {
* Extended Leafs, a.k.a. AMD defined
*/
#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
+#define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
+#define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
+#define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
@@ -196,9 +203,14 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
+#define X86_FEATURE_V_VMSAVE_VMLOAD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 15)
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
+#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
+#define X86_FEATURE_SEV_SNP KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4)
+#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
+#define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2)
/*
* KVM defined paravirt features.
@@ -258,7 +270,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
-#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
+#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
@@ -285,6 +297,8 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
+#define X86_PROPERTY_NR_PERFCTR_CORE KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
+#define X86_PROPERTY_NR_PERFCTR_NB KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
@@ -323,6 +337,11 @@ struct kvm_x86_pmu_feature {
#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
+#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8)
+#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9)
+#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10)
+#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11)
+#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12)
#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
@@ -346,16 +365,6 @@ static inline unsigned int x86_model(unsigned int eax)
return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
}
-/* Page table bitfield declarations */
-#define PTE_PRESENT_MASK BIT_ULL(0)
-#define PTE_WRITABLE_MASK BIT_ULL(1)
-#define PTE_USER_MASK BIT_ULL(2)
-#define PTE_ACCESSED_MASK BIT_ULL(5)
-#define PTE_DIRTY_MASK BIT_ULL(6)
-#define PTE_LARGE_MASK BIT_ULL(7)
-#define PTE_GLOBAL_MASK BIT_ULL(8)
-#define PTE_NX_MASK BIT_ULL(63)
-
#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
#define PAGE_SHIFT 12
@@ -390,17 +399,17 @@ struct gpr64_regs {
};
struct desc64 {
- uint16_t limit0;
- uint16_t base0;
+ u16 limit0;
+ u16 base0;
unsigned base1:8, type:4, s:1, dpl:2, p:1;
unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
- uint32_t base3;
- uint32_t zero1;
+ u32 base3;
+ u32 zero1;
} __attribute__((packed));
struct desc_ptr {
- uint16_t size;
- uint64_t address;
+ u16 size;
+ u64 address;
} __attribute__((packed));
struct kvm_x86_state {
@@ -418,16 +427,18 @@ struct kvm_x86_state {
struct kvm_msrs msrs;
};
-static inline uint64_t get_desc64_base(const struct desc64 *desc)
+static inline u64 get_desc64_base(const struct desc64 *desc)
{
- return ((uint64_t)desc->base3 << 32) |
- (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+ return (u64)desc->base3 << 32 |
+ (u64)desc->base2 << 24 |
+ (u64)desc->base1 << 16 |
+ (u64)desc->base0;
}
-static inline uint64_t rdtsc(void)
+static inline u64 rdtsc(void)
{
- uint32_t eax, edx;
- uint64_t tsc_val;
+ u32 eax, edx;
+ u64 tsc_val;
/*
* The lfence is to wait (on Intel CPUs) until all previous
* instructions have been executed. If software requires RDTSC to be
@@ -435,39 +446,39 @@ static inline uint64_t rdtsc(void)
* execute LFENCE immediately after RDTSC
*/
__asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
- tsc_val = ((uint64_t)edx) << 32 | eax;
+ tsc_val = ((u64)edx) << 32 | eax;
return tsc_val;
}
-static inline uint64_t rdtscp(uint32_t *aux)
+static inline u64 rdtscp(u32 *aux)
{
- uint32_t eax, edx;
+ u32 eax, edx;
__asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
- return ((uint64_t)edx) << 32 | eax;
+ return ((u64)edx) << 32 | eax;
}
-static inline uint64_t rdmsr(uint32_t msr)
+static inline u64 rdmsr(u32 msr)
{
- uint32_t a, d;
+ u32 a, d;
__asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
- return a | ((uint64_t) d << 32);
+ return a | ((u64)d << 32);
}
-static inline void wrmsr(uint32_t msr, uint64_t value)
+static inline void wrmsr(u32 msr, u64 value)
{
- uint32_t a = value;
- uint32_t d = value >> 32;
+ u32 a = value;
+ u32 d = value >> 32;
__asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
}
-static inline uint16_t inw(uint16_t port)
+static inline u16 inw(u16 port)
{
- uint16_t tmp;
+ u16 tmp;
__asm__ __volatile__("in %%dx, %%ax"
: /* output */ "=a" (tmp)
@@ -476,101 +487,124 @@ static inline uint16_t inw(uint16_t port)
return tmp;
}
-static inline uint16_t get_es(void)
+static inline u16 get_es(void)
{
- uint16_t es;
+ u16 es;
__asm__ __volatile__("mov %%es, %[es]"
: /* output */ [es]"=rm"(es));
return es;
}
-static inline uint16_t get_cs(void)
+static inline u16 get_cs(void)
{
- uint16_t cs;
+ u16 cs;
__asm__ __volatile__("mov %%cs, %[cs]"
: /* output */ [cs]"=rm"(cs));
return cs;
}
-static inline uint16_t get_ss(void)
+static inline u16 get_ss(void)
{
- uint16_t ss;
+ u16 ss;
__asm__ __volatile__("mov %%ss, %[ss]"
: /* output */ [ss]"=rm"(ss));
return ss;
}
-static inline uint16_t get_ds(void)
+static inline u16 get_ds(void)
{
- uint16_t ds;
+ u16 ds;
__asm__ __volatile__("mov %%ds, %[ds]"
: /* output */ [ds]"=rm"(ds));
return ds;
}
-static inline uint16_t get_fs(void)
+static inline u16 get_fs(void)
{
- uint16_t fs;
+ u16 fs;
__asm__ __volatile__("mov %%fs, %[fs]"
: /* output */ [fs]"=rm"(fs));
return fs;
}
-static inline uint16_t get_gs(void)
+static inline u16 get_gs(void)
{
- uint16_t gs;
+ u16 gs;
__asm__ __volatile__("mov %%gs, %[gs]"
: /* output */ [gs]"=rm"(gs));
return gs;
}
-static inline uint16_t get_tr(void)
+static inline u16 get_tr(void)
{
- uint16_t tr;
+ u16 tr;
__asm__ __volatile__("str %[tr]"
: /* output */ [tr]"=rm"(tr));
return tr;
}
-static inline uint64_t get_cr0(void)
+static inline u64 get_cr0(void)
{
- uint64_t cr0;
+ u64 cr0;
__asm__ __volatile__("mov %%cr0, %[cr0]"
: /* output */ [cr0]"=r"(cr0));
return cr0;
}
-static inline uint64_t get_cr3(void)
+static inline void set_cr0(u64 val)
{
- uint64_t cr3;
+ __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
+static inline u64 get_cr3(void)
+{
+ u64 cr3;
__asm__ __volatile__("mov %%cr3, %[cr3]"
: /* output */ [cr3]"=r"(cr3));
return cr3;
}
-static inline uint64_t get_cr4(void)
+static inline void set_cr3(u64 val)
+{
+ __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
+}
+
+static inline u64 get_cr4(void)
{
- uint64_t cr4;
+ u64 cr4;
__asm__ __volatile__("mov %%cr4, %[cr4]"
: /* output */ [cr4]"=r"(cr4));
return cr4;
}
-static inline void set_cr4(uint64_t val)
+static inline void set_cr4(u64 val)
{
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
+static inline u64 get_cr8(void)
+{
+ u64 cr8;
+
+ __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
+ return cr8;
+}
+
+static inline void set_cr8(u64 val)
+{
+ __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
+}
+
static inline void set_idt(const struct desc_ptr *idt_desc)
{
__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
@@ -617,14 +651,14 @@ static inline struct desc_ptr get_idt(void)
return idt;
}
-static inline void outl(uint16_t port, uint32_t value)
+static inline void outl(u16 port, u32 value)
{
__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
}
-static inline void __cpuid(uint32_t function, uint32_t index,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
+static inline void __cpuid(u32 function, u32 index,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
{
*eax = function;
*ecx = index;
@@ -638,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index,
: "memory");
}
-static inline void cpuid(uint32_t function,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
+static inline void cpuid(u32 function,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
{
return __cpuid(function, 0, eax, ebx, ecx, edx);
}
-static inline uint32_t this_cpu_fms(void)
+static inline u32 this_cpu_fms(void)
{
- uint32_t eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
cpuid(1, &eax, &ebx, &ecx, &edx);
return eax;
}
-static inline uint32_t this_cpu_family(void)
+static inline u32 this_cpu_family(void)
{
return x86_family(this_cpu_fms());
}
-static inline uint32_t this_cpu_model(void)
+static inline u32 this_cpu_model(void)
{
return x86_model(this_cpu_fms());
}
static inline bool this_cpu_vendor_string_is(const char *vendor)
{
- const uint32_t *chunk = (const uint32_t *)vendor;
- uint32_t eax, ebx, ecx, edx;
+ const u32 *chunk = (const u32 *)vendor;
+ u32 eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
@@ -685,10 +719,14 @@ static inline bool this_cpu_is_amd(void)
return this_cpu_vendor_string_is("AuthenticAMD");
}
-static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
- uint8_t reg, uint8_t lo, uint8_t hi)
+static inline bool this_cpu_is_hygon(void)
{
- uint32_t gprs[4];
+ return this_cpu_vendor_string_is("HygonGenuine");
+}
+
+static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi)
+{
+ u32 gprs[4];
__cpuid(function, index,
&gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
@@ -703,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
feature.reg, feature.bit, feature.bit);
}
-static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
+static inline u32 this_cpu_property(struct kvm_x86_cpu_property property)
{
return __this_cpu_has(property.function, property.index,
property.reg, property.lo_bit, property.hi_bit);
@@ -711,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
{
- uint32_t max_leaf;
+ u32 max_leaf;
switch (property.function & 0xc0000000) {
case 0:
@@ -731,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
{
- uint32_t nr_bits;
+ u32 nr_bits;
if (feature.f.reg == KVM_CPUID_EBX) {
nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -743,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
return nr_bits > feature.f.bit || this_cpu_has(feature.f);
}
-static __always_inline uint64_t this_cpu_supported_xcr0(void)
+static __always_inline u64 this_cpu_supported_xcr0(void)
{
if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
return 0;
return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
- ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+ ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
}
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -828,7 +866,7 @@ static inline void cpu_relax(void)
static inline void udelay(unsigned long usec)
{
- uint64_t start, now, cycles;
+ u64 start, now, cycles;
GUEST_ASSERT(guest_tsc_khz);
cycles = guest_tsc_khz / 1000 * usec;
@@ -859,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state);
const struct kvm_msr_list *kvm_get_msr_index_list(void);
const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
-bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
-uint64_t kvm_get_feature_msr(uint64_t msr_index);
+bool kvm_msr_is_in_save_restore_list(u32 msr_index);
+u64 kvm_get_feature_msr(u64 msr_index);
static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
struct kvm_msrs *msrs)
@@ -915,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index);
+ u32 function, u32 index);
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-static inline uint32_t kvm_cpu_fms(void)
+static inline u32 kvm_cpu_fms(void)
{
return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
}
-static inline uint32_t kvm_cpu_family(void)
+static inline u32 kvm_cpu_family(void)
{
return x86_family(kvm_cpu_fms());
}
-static inline uint32_t kvm_cpu_model(void)
+static inline u32 kvm_cpu_model(void)
{
return x86_model(kvm_cpu_fms());
}
@@ -941,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
}
-uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
- struct kvm_x86_cpu_property property);
+u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property);
-static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
+static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property)
{
return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
}
static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
{
- uint32_t max_leaf;
+ u32 max_leaf;
switch (property.function & 0xc0000000) {
case 0:
@@ -971,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
{
- uint32_t nr_bits;
+ u32 nr_bits;
if (feature.f.reg == KVM_CPUID_EBX) {
nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -983,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
}
-static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
+static __always_inline u64 kvm_cpu_supported_xcr0(void)
{
if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
return 0;
return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
- ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+ ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
}
static inline size_t kvm_cpuid2_size(int nr_entries)
@@ -1023,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
}
static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
- uint32_t function,
- uint32_t index)
+ u32 function,
+ u32 index)
{
TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)");
@@ -1035,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v
}
static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
- uint32_t function)
+ u32 function)
{
return __vcpu_get_cpuid_entry(vcpu, function, 0);
}
@@ -1065,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_property property,
- uint32_t value);
-void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
+ u32 value);
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr);
-void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function);
static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_feature feature)
@@ -1096,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
}
-uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
-int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
+u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index);
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value);
/*
* Assert on an MSR access(es) and pretty print the MSR name when possible.
@@ -1122,14 +1160,14 @@ do { \
* is changing, etc. This is NOT an exhaustive list! The intent is to filter
* out MSRs that are not durable _and_ that a selftest wants to write.
*/
-static inline bool is_durable_msr(uint32_t msr)
+static inline bool is_durable_msr(u32 msr)
{
return msr != MSR_IA32_TSC;
}
#define vcpu_set_msr(vcpu, msr, val) \
do { \
- uint64_t r, v = val; \
+ u64 r, v = val; \
\
TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
"KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
@@ -1141,36 +1179,41 @@ do { \
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
void kvm_init_vm_address_properties(struct kvm_vm *vm);
-bool vm_is_unrestricted_guest(struct kvm_vm *vm);
struct ex_regs {
- uint64_t rax, rcx, rdx, rbx;
- uint64_t rbp, rsi, rdi;
- uint64_t r8, r9, r10, r11;
- uint64_t r12, r13, r14, r15;
- uint64_t vector;
- uint64_t error_code;
- uint64_t rip;
- uint64_t cs;
- uint64_t rflags;
+ u64 rax, rcx, rdx, rbx;
+ u64 rbp, rsi, rdi;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+ u64 vector;
+ u64 error_code;
+ u64 rip;
+ u64 cs;
+ u64 rflags;
};
struct idt_entry {
- uint16_t offset0;
- uint16_t selector;
- uint16_t ist : 3;
- uint16_t : 5;
- uint16_t type : 4;
- uint16_t : 1;
- uint16_t dpl : 2;
- uint16_t p : 1;
- uint16_t offset1;
- uint32_t offset2; uint32_t reserved;
+ u16 offset0;
+ u16 selector;
+ u16 ist : 3;
+ u16 : 5;
+ u16 type : 4;
+ u16 : 1;
+ u16 dpl : 2;
+ u16 p : 1;
+ u16 offset1;
+ u32 offset2; u32 reserved;
};
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
+/*
+ * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be
+ * used to signal "no expcetion".
+ */
+#define KVM_MAGIC_DE_VECTOR 0xff
+
/* If a toddler were to say "abracadabra". */
#define KVM_EXCEPTION_MAGIC 0xabacadabaULL
@@ -1218,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe(insn, inputs...) \
({ \
- uint64_t ign_error_code; \
- uint8_t vector; \
+ u64 ign_error_code; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
@@ -1230,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_ec(insn, error_code, inputs...) \
({ \
- uint8_t vector; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
@@ -1241,10 +1284,10 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_fep(insn, inputs...) \
({ \
- uint64_t ign_error_code; \
- uint8_t vector; \
+ u64 ign_error_code; \
+ u8 vector; \
\
- asm volatile(KVM_ASM_SAFE(insn) \
+ asm volatile(KVM_ASM_SAFE_FEP(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
@@ -1253,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
({ \
- uint8_t vector; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE_FEP(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
@@ -1263,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
})
#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
-static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
+static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \
{ \
- uint64_t error_code; \
- uint8_t vector; \
- uint32_t a, d; \
+ u64 error_code; \
+ u8 vector; \
+ u32 a, d; \
\
asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
: "=a"(a), "=d"(d), \
@@ -1275,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
: "c"(idx) \
: KVM_ASM_SAFE_CLOBBERS); \
\
- *val = (uint64_t)a | ((uint64_t)d << 32); \
+ *val = (u64)a | ((u64)d << 32); \
return vector; \
}
@@ -1291,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr)
BUILD_READ_U64_SAFE_HELPERS(rdpmc)
BUILD_READ_U64_SAFE_HELPERS(xgetbv)
-static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
+static inline u8 wrmsr_safe(u32 msr, u64 val)
{
return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
}
-static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
+static inline u8 xsetbv_safe(u32 index, u64 value)
{
u32 eax = value;
u32 edx = value >> 32;
@@ -1306,6 +1349,26 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
bool kvm_is_tdp_enabled(void);
+static inline bool get_kvm_intel_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm_intel", param);
+}
+
+static inline bool get_kvm_amd_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm_amd", param);
+}
+
+static inline int get_kvm_intel_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm_intel", param);
+}
+
+static inline int get_kvm_amd_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm_amd", param);
+}
+
static inline bool kvm_is_pmu_enabled(void)
{
return get_kvm_param_bool("enable_pmu");
@@ -1316,30 +1379,80 @@ static inline bool kvm_is_forced_emulation_enabled(void)
return !!get_kvm_param_integer("force_emulation_prefix");
}
-uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
- int *level);
-uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
+static inline bool kvm_is_unrestricted_guest_enabled(void)
+{
+ return get_kvm_intel_param_bool("unrestricted_guest");
+}
+
+static inline bool kvm_is_ignore_msrs(void)
+{
+ return get_kvm_param_bool("ignore_msrs");
+}
+
+static inline bool kvm_is_lbrv_enabled(void)
+{
+ return !!get_kvm_amd_param_integer("lbrv");
+}
+
+u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva);
-uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
- uint64_t a3);
-uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
-void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
+u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3);
+u64 __xen_hypercall(u64 nr, u64 a0, void *a1);
+void xen_hypercall(u64 nr, u64 a0, void *a1);
-static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
- uint64_t size, uint64_t flags)
+static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
}
-static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
- uint64_t flags)
+static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
- uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+ u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
GUEST_ASSERT(!ret);
}
-void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
+/*
+ * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
+ * intended to be a wake event arrives *after* HLT is executed. Modern CPUs,
+ * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
+ * instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may
+ * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
+ */
+static inline void safe_halt(void)
+{
+ asm volatile("sti; hlt");
+}
+
+/*
+ * Enable interrupts and ensure that interrupts are evaluated upon return from
+ * this function, i.e. execute a nop to consume the STi interrupt shadow.
+ */
+static inline void sti_nop(void)
+{
+ asm volatile ("sti; nop");
+}
+
+/*
+ * Enable interrupts for one instruction (nop), to allow the CPU to process all
+ * interrupts that are already pending.
+ */
+static inline void sti_nop_cli(void)
+{
+ asm volatile ("sti; nop; cli");
+}
+
+static inline void sti(void)
+{
+ asm volatile("sti");
+}
+
+static inline void cli(void)
+{
+ asm volatile ("cli");
+}
+
+void __vm_xsave_require_permission(u64 xfeature, const char *name);
#define vm_xsave_require_permission(xfeature) \
__vm_xsave_require_permission(xfeature, #xfeature)
@@ -1350,7 +1463,7 @@ enum pg_level {
PG_LEVEL_2M,
PG_LEVEL_1G,
PG_LEVEL_512G,
- PG_LEVEL_NUM
+ PG_LEVEL_256T
};
#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
@@ -1360,9 +1473,51 @@ enum pg_level {
#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
-void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t nr_bytes, int level);
+#define PTE_PRESENT_MASK(mmu) ((mmu)->arch.pte_masks.present)
+#define PTE_WRITABLE_MASK(mmu) ((mmu)->arch.pte_masks.writable)
+#define PTE_USER_MASK(mmu) ((mmu)->arch.pte_masks.user)
+#define PTE_READABLE_MASK(mmu) ((mmu)->arch.pte_masks.readable)
+#define PTE_EXECUTABLE_MASK(mmu) ((mmu)->arch.pte_masks.executable)
+#define PTE_ACCESSED_MASK(mmu) ((mmu)->arch.pte_masks.accessed)
+#define PTE_DIRTY_MASK(mmu) ((mmu)->arch.pte_masks.dirty)
+#define PTE_HUGE_MASK(mmu) ((mmu)->arch.pte_masks.huge)
+#define PTE_NX_MASK(mmu) ((mmu)->arch.pte_masks.nx)
+#define PTE_C_BIT_MASK(mmu) ((mmu)->arch.pte_masks.c)
+#define PTE_S_BIT_MASK(mmu) ((mmu)->arch.pte_masks.s)
+#define PTE_ALWAYS_SET_MASK(mmu) ((mmu)->arch.pte_masks.always_set)
+
+/*
+ * For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present
+ * if it's executable or readable, as EPT supports execute-only PTEs, but not
+ * write-only PTEs.
+ */
+#define is_present_pte(mmu, pte) \
+ (PTE_PRESENT_MASK(mmu) ? \
+ !!(*(pte) & PTE_PRESENT_MASK(mmu)) : \
+ !!(*(pte) & (PTE_READABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu))))
+#define is_executable_pte(mmu, pte) \
+ ((*(pte) & (PTE_EXECUTABLE_MASK(mmu) | PTE_NX_MASK(mmu))) == PTE_EXECUTABLE_MASK(mmu))
+#define is_writable_pte(mmu, pte) (!!(*(pte) & PTE_WRITABLE_MASK(mmu)))
+#define is_user_pte(mmu, pte) (!!(*(pte) & PTE_USER_MASK(mmu)))
+#define is_accessed_pte(mmu, pte) (!!(*(pte) & PTE_ACCESSED_MASK(mmu)))
+#define is_dirty_pte(mmu, pte) (!!(*(pte) & PTE_DIRTY_MASK(mmu)))
+#define is_huge_pte(mmu, pte) (!!(*(pte) & PTE_HUGE_MASK(mmu)))
+#define is_nx_pte(mmu, pte) (!is_executable_pte(mmu, pte))
+
+void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
+ struct pte_masks *pte_masks);
+
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
+ gpa_t gpa, int level);
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 nr_bytes, int level);
+
+void vm_enable_tdp(struct kvm_vm *vm);
+bool kvm_cpu_has_tdp(void);
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);
+void tdp_identity_map_default_memslots(struct kvm_vm *vm);
+void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size);
+u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
/*
* Basic CPU control in CR0
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index 82c11c81a956..1af44c151d60 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -25,19 +25,51 @@ enum sev_guest_state {
#define SEV_POLICY_NO_DBG (1UL << 0)
#define SEV_POLICY_ES (1UL << 2)
+#define SNP_POLICY_SMT (1ULL << 16)
+#define SNP_POLICY_RSVD_MBO (1ULL << 17)
+#define SNP_POLICY_DBG (1ULL << 19)
+
#define GHCB_MSR_TERM_REQ 0x100
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
+static inline bool is_sev_snp_vm(struct kvm_vm *vm)
+{
+ return vm->type == KVM_X86_SNP_VM;
+}
+
+static inline bool is_sev_es_vm(struct kvm_vm *vm)
+{
+ return is_sev_snp_vm(vm) || vm->type == KVM_X86_SEV_ES_VM;
+}
+
+static inline bool is_sev_vm(struct kvm_vm *vm)
+{
+ return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM;
+}
+
+void sev_vm_launch(struct kvm_vm *vm, u32 policy);
+void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement);
void sev_vm_launch_finish(struct kvm_vm *vm);
+void snp_vm_launch_start(struct kvm_vm *vm, u64 policy);
+void snp_vm_launch_update(struct kvm_vm *vm);
+void snp_vm_launch_finish(struct kvm_vm *vm);
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
struct kvm_vcpu **cpu);
-void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement);
+void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement);
kvm_static_assert(SEV_RET_SUCCESS == 0);
/*
+ * A SEV-SNP VM requires the policy reserved bit to always be set.
+ * The SMT policy bit is also required to be set based on SMT being
+ * available and active on the system.
+ */
+static inline u64 snp_default_policy(void)
+{
+ return SNP_POLICY_RSVD_MBO | (is_smt_on() ? SNP_POLICY_SMT : 0);
+}
+
+/*
* The KVM_MEMORY_ENCRYPT_OP uAPI is utter garbage and takes an "unsigned long"
* instead of a proper struct. The size of the parameter is embedded in the
* ioctl number, i.e. is ABI and thus immutable. Hack around the mess by
@@ -53,7 +85,7 @@ kvm_static_assert(SEV_RET_SUCCESS == 0);
unsigned long raw; \
} sev_cmd = { .c = { \
.id = (cmd), \
- .data = (uint64_t)(arg), \
+ .data = (u64)(arg), \
.sev_fd = (vm)->arch.sev_fd, \
} }; \
\
@@ -70,6 +102,12 @@ kvm_static_assert(SEV_RET_SUCCESS == 0);
void sev_vm_init(struct kvm_vm *vm);
void sev_es_vm_init(struct kvm_vm *vm);
+void snp_vm_init(struct kvm_vm *vm);
+
+static inline void vmgexit(void)
+{
+ __asm__ __volatile__("rep; vmmcall");
+}
static inline void sev_register_encrypted_memory(struct kvm_vm *vm,
struct userspace_mem_region *region)
@@ -82,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm,
vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
}
-static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
- uint64_t size)
+static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
struct kvm_sev_launch_update_data update_data = {
.uaddr = (unsigned long)addr_gpa2hva(vm, gpa),
@@ -93,4 +131,17 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
}
+static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
+ u64 hva, u64 size, u8 type)
+{
+ struct kvm_sev_snp_launch_update update_data = {
+ .uaddr = hva,
+ .gfn_start = gpa >> PAGE_SHIFT,
+ .len = size,
+ .type = type,
+ };
+
+ vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data);
+}
+
#endif /* SELFTEST_KVM_SEV_H */
diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h
new file mode 100644
index 000000000000..2d1afa09819b
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86/smm.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef SELFTEST_KVM_SMM_H
+#define SELFTEST_KVM_SMM_H
+
+#include "kvm_util.h"
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
+ const void *smi_handler, size_t handler_size);
+
+void inject_smi(struct kvm_vcpu *vcpu);
+
+#endif /* SELFTEST_KVM_SMM_H */
diff --git a/tools/testing/selftests/kvm/include/x86/svm.h b/tools/testing/selftests/kvm/include/x86/svm.h
index 29cffd0a9181..c8539166270e 100644
--- a/tools/testing/selftests/kvm/include/x86/svm.h
+++ b/tools/testing/selftests/kvm/include/x86/svm.h
@@ -92,19 +92,18 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 int_vector;
u32 int_state;
u8 reserved_3[4];
- u32 exit_code;
- u32 exit_code_hi;
+ u64 exit_code;
u64 exit_info_1;
u64 exit_info_2;
u32 exit_int_info;
u32 exit_int_info_err;
- u64 nested_ctl;
+ u64 misc_ctl;
u64 avic_vapic_bar;
u8 reserved_4[8];
u32 event_inj;
u32 event_inj_err;
u64 nested_cr3;
- u64 virt_ext;
+ u64 misc_ctl2;
u32 clean;
u32 reserved_5;
u64 next_rip;
@@ -156,9 +155,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_ENABLE_SHIFT 31
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
-#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
-#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
-
#define SVM_INTERRUPT_SHADOW_MASK 1
#define SVM_IOIO_STR_SHIFT 2
@@ -176,8 +172,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
-#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
-#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
+#define SVM_MISC_ENABLE_NP BIT(0)
+#define SVM_MISC_ENABLE_SEV BIT(1)
+
+#define SVM_MISC2_ENABLE_V_LBR BIT_ULL(0)
+#define SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE BIT_ULL(1)
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
diff --git a/tools/testing/selftests/kvm/include/x86/svm_util.h b/tools/testing/selftests/kvm/include/x86/svm_util.h
index b74c6dcddcbd..6c013eb838be 100644
--- a/tools/testing/selftests/kvm/include/x86/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86/svm_util.h
@@ -16,17 +16,20 @@ struct svm_test_data {
/* VMCB */
struct vmcb *vmcb; /* gva */
void *vmcb_hva;
- uint64_t vmcb_gpa;
+ u64 vmcb_gpa;
/* host state-save area */
struct vmcb_save_area *save_area; /* gva */
void *save_area_hva;
- uint64_t save_area_gpa;
+ u64 save_area_gpa;
/* MSR-Bitmap */
void *msr; /* gva */
void *msr_hva;
- uint64_t msr_gpa;
+ u64 msr_gpa;
+
+ /* NPT */
+ u64 ncr3_gpa;
};
static inline void vmmcall(void)
@@ -53,9 +56,15 @@ static inline void vmmcall(void)
"clgi\n" \
)
-struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
+struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
-void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+void run_guest(struct vmcb *vmcb, u64 vmcb_gpa);
+
+static inline bool kvm_cpu_has_npt(void)
+{
+ return kvm_cpu_has(X86_FEATURE_NPT);
+}
+void vm_enable_npt(struct kvm_vm *vm);
int open_sev_dev_path_or_exit(void);
diff --git a/tools/testing/selftests/kvm/include/x86/ucall.h b/tools/testing/selftests/kvm/include/x86/ucall.h
index d3825dcc3cd9..0e4950041e3e 100644
--- a/tools/testing/selftests/kvm/include/x86/ucall.h
+++ b/tools/testing/selftests/kvm/include/x86/ucall.h
@@ -6,7 +6,7 @@
#define UCALL_EXIT_REASON KVM_EXIT_IO
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h
index edb3c391b982..90fffaf91595 100644
--- a/tools/testing/selftests/kvm/include/x86/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86/vmx.h
@@ -285,16 +285,16 @@ enum vmcs_field {
};
struct vmx_msr_entry {
- uint32_t index;
- uint32_t reserved;
- uint64_t value;
+ u32 index;
+ u32 reserved;
+ u64 value;
} __attribute__ ((aligned(16)));
#include "evmcs.h"
-static inline int vmxon(uint64_t phys)
+static inline int vmxon(u64 phys)
{
- uint8_t ret;
+ u8 ret;
__asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
@@ -309,9 +309,9 @@ static inline void vmxoff(void)
__asm__ __volatile__("vmxoff");
}
-static inline int vmclear(uint64_t vmcs_pa)
+static inline int vmclear(u64 vmcs_pa)
{
- uint8_t ret;
+ u8 ret;
__asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
@@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa)
return ret;
}
-static inline int vmptrld(uint64_t vmcs_pa)
+static inline int vmptrld(u64 vmcs_pa)
{
- uint8_t ret;
+ u8 ret;
if (enable_evmcs)
return -1;
@@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa)
return ret;
}
-static inline int vmptrst(uint64_t *value)
+static inline int vmptrst(u64 *value)
{
- uint64_t tmp;
- uint8_t ret;
+ u64 tmp;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmptrst(value);
@@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value)
* A wrapper around vmptrst that ignores errors and returns zero if the
* vmptrst instruction fails.
*/
-static inline uint64_t vmptrstz(void)
+static inline u64 vmptrstz(void)
{
- uint64_t value = 0;
+ u64 value = 0;
vmptrst(&value);
return value;
}
@@ -391,8 +391,8 @@ static inline int vmlaunch(void)
"pop %%rcx;"
"pop %%rbp;"
: [ret]"=&a"(ret)
- : [host_rsp]"r"((uint64_t)HOST_RSP),
- [host_rip]"r"((uint64_t)HOST_RIP)
+ : [host_rsp]"r"((u64)HOST_RSP),
+ [host_rip]"r"((u64)HOST_RIP)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -426,8 +426,8 @@ static inline int vmresume(void)
"pop %%rcx;"
"pop %%rbp;"
: [ret]"=&a"(ret)
- : [host_rsp]"r"((uint64_t)HOST_RSP),
- [host_rip]"r"((uint64_t)HOST_RIP)
+ : [host_rsp]"r"((u64)HOST_RSP),
+ [host_rip]"r"((u64)HOST_RIP)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -447,10 +447,10 @@ static inline void vmcall(void)
"r10", "r11", "r12", "r13", "r14", "r15");
}
-static inline int vmread(uint64_t encoding, uint64_t *value)
+static inline int vmread(u64 encoding, u64 *value)
{
- uint64_t tmp;
- uint8_t ret;
+ u64 tmp;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmread(encoding, value);
@@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
* A wrapper around vmread that ignores errors and returns zero if the
* vmread instruction fails.
*/
-static inline uint64_t vmreadz(uint64_t encoding)
+static inline u64 vmreadz(u64 encoding)
{
- uint64_t value = 0;
+ u64 value = 0;
vmread(encoding, &value);
return value;
}
-static inline int vmwrite(uint64_t encoding, uint64_t value)
+static inline int vmwrite(u64 encoding, u64 value)
{
- uint8_t ret;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmwrite(encoding, value);
@@ -490,43 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
return ret;
}
-static inline uint32_t vmcs_revision(void)
+static inline u32 vmcs_revision(void)
{
return rdmsr(MSR_IA32_VMX_BASIC);
}
struct vmx_pages {
void *vmxon_hva;
- uint64_t vmxon_gpa;
+ u64 vmxon_gpa;
void *vmxon;
void *vmcs_hva;
- uint64_t vmcs_gpa;
+ u64 vmcs_gpa;
void *vmcs;
void *msr_hva;
- uint64_t msr_gpa;
+ u64 msr_gpa;
void *msr;
void *shadow_vmcs_hva;
- uint64_t shadow_vmcs_gpa;
+ u64 shadow_vmcs_gpa;
void *shadow_vmcs;
void *vmread_hva;
- uint64_t vmread_gpa;
+ u64 vmread_gpa;
void *vmread;
void *vmwrite_hva;
- uint64_t vmwrite_gpa;
+ u64 vmwrite_gpa;
void *vmwrite;
- void *eptp_hva;
- uint64_t eptp_gpa;
- void *eptp;
-
void *apic_access_hva;
- uint64_t apic_access_gpa;
+ u64 apic_access_gpa;
void *apic_access;
+
+ u64 eptp_gpa;
};
union vmx_basic {
@@ -552,24 +550,15 @@ union vmx_ctrl_msr {
};
};
-struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
+struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
bool ept_1g_pages_supported(void);
-void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr);
-void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size);
-void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t memslot);
-void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t addr, uint64_t size);
bool kvm_cpu_has_ept(void);
-void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t eptp_memslot);
+void vm_enable_ept(struct kvm_vm *vm);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
#endif /* SELFTEST_KVM_VMX_H */