diff options
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 200 |
1 files changed, 179 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0a7c88786ec0..403e642c78f5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_SPAPR_RESIZE_HPT: - /* Disable this on POWER9 until code handles new HPTE format */ - r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300); + r = !!hv_enabled; break; #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -763,7 +762,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; - vcpu->arch.dec_expires = ~(u64)0; + vcpu->arch.dec_expires = get_tb(); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); @@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, + u64 gpr) +{ + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + u32 hi, lo; + u32 di; + +#ifdef __BIG_ENDIAN + hi = gpr >> 32; + lo = gpr & 0xffffffff; +#else + lo = gpr >> 32; + hi = gpr & 0xffffffff; +#endif + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; + VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; +} +#endif /* CONFIG_ALTIVEC */ + #ifdef CONFIG_PPC_FPU static inline u64 sp_to_dp(u32 fprs) { @@ -1033,6 +1060,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, kvmppc_set_vsr_dword_dump(vcpu, gpr); break; #endif +#ifdef CONFIG_ALTIVEC + case KVM_MMIO_REG_VMX: + kvmppc_set_vmx_dword(vcpu, gpr); + break; +#endif default: BUG(); } @@ -1106,11 +1138,9 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, { enum emulation_result emulated = EMULATE_DONE; - /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ - if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || - (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ + if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; - } while (vcpu->arch.mmio_vsx_copy_nums) { emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, @@ -1252,11 +1282,9 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.io_gpr = rs; - /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ - if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || - (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ + if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; - } while (vcpu->arch.mmio_vsx_copy_nums) { if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) @@ -1312,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +/* handle quadword load access in two halves */ +int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, int is_default_endian) +{ + enum emulation_result emulated; + + while (vcpu->arch.mmio_vmx_copy_nums) { + emulated = __kvmppc_handle_load(run, vcpu, rt, 8, + is_default_endian, 0); + + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) +{ + vector128 vrs = VCPU_VSX_VR(vcpu, rs); + u32 di; + u64 w0, w1; + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return -1; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + w0 = vrs.u[di * 2]; + w1 = vrs.u[di * 2 + 1]; + +#ifdef __BIG_ENDIAN + *val = (w0 << 32) | w1; +#else + *val = (w1 << 32) | w0; +#endif + return 0; +} + +/* handle quadword store in two halves */ +int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rs, int is_default_endian) +{ + u64 val = 0; + enum emulation_result emulated = EMULATE_DONE; + + vcpu->arch.io_gpr = rs; + + while (vcpu->arch.mmio_vmx_copy_nums) { + if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) + return EMULATE_FAIL; + + emulated = kvmppc_handle_store(run, vcpu, val, 8, + is_default_endian); + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, + struct kvm_run *run) +{ + enum emulation_result emulated = EMULATE_FAIL; + int r; + + vcpu->arch.paddr_accessed += run->mmio.len; + + if (!vcpu->mmio_is_write) { + emulated = kvmppc_handle_load128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } else { + emulated = kvmppc_handle_store128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } + + switch (emulated) { + case EMULATE_DO_MMIO: + run->exit_reason = KVM_EXIT_MMIO; + r = RESUME_HOST; + break; + case EMULATE_FAIL: + pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + r = RESUME_HOST; + break; + default: + r = RESUME_GUEST; + break; + } + return r; +} +#endif /* CONFIG_ALTIVEC */ + int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = 0; @@ -1413,6 +1546,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; + vcpu_load(vcpu); + if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) @@ -1427,7 +1562,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; - return r; + goto out; + } + } +#endif +#ifdef CONFIG_ALTIVEC + if (vcpu->arch.mmio_vmx_copy_nums > 0) + vcpu->arch.mmio_vmx_copy_nums--; + + if (vcpu->arch.mmio_vmx_copy_nums > 0) { + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); + if (r == RESUME_HOST) { + vcpu->mmio_needed = 1; + goto out; } } #endif @@ -1461,6 +1608,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_sigset_deactivate(vcpu); +out: + vcpu_put(vcpu); return r; } @@ -1608,23 +1757,31 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; } -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; - long r; - switch (ioctl) { - case KVM_INTERRUPT: { + if (ioctl == KVM_INTERRUPT) { struct kvm_interrupt irq; - r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) - goto out; - r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); - goto out; + return -EFAULT; + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); } + return -ENOIOCTLCMD; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r; + + vcpu_load(vcpu); + switch (ioctl) { case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; @@ -1664,6 +1821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } out: + vcpu_put(vcpu); return r; } |