diff options
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/hyp-main.c')
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/hyp-main.c | 45 |
1 files changed, 39 insertions, 6 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 82df7fc24760..bde658d51404 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -12,9 +12,11 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> -#define cpu_reg(ctxt, r) (ctxt)->regs.regs[r] -#define DECLARE_REG(type, name, ctxt, reg) \ - type name = (type)cpu_reg(ctxt, (reg)) +#include <nvhe/trap_handler.h> + +DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); + +void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) { @@ -150,12 +152,43 @@ inval: cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED; } +static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt) +{ + __kvm_hyp_host_forward_smc(host_ctxt); +} + +static void skip_host_instruction(void) +{ + write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR); +} + +static void handle_host_smc(struct kvm_cpu_context *host_ctxt) +{ + bool handled; + + handled = kvm_host_psci_handler(host_ctxt); + if (!handled) + default_host_smc_handler(host_ctxt); + + /* + * Unlike HVC, the return address of an SMC is the instruction's PC. + * Move the return address past the instruction. + */ + skip_host_instruction(); +} + void handle_trap(struct kvm_cpu_context *host_ctxt) { u64 esr = read_sysreg_el2(SYS_ESR); - if (unlikely(ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)) + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_HVC64: + handle_host_hcall(host_ctxt); + break; + case ESR_ELx_EC_SMC64: + handle_host_smc(host_ctxt); + break; + default: hyp_panic(); - - handle_host_hcall(host_ctxt); + } } |