diff options
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 17 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 44 |
6 files changed, 40 insertions, 41 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 87d017e276f4..bf70ecea3974 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -19,6 +19,8 @@ struct x86_exception { u8 vector; bool error_code_valid; u16 error_code; + bool nested_page_fault; + u64 address; /* cr2 or nested page fault gpa */ }; /* diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9980a2484624..0c0941db31c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -242,7 +242,8 @@ struct kvm_mmu { void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf); - void (*inject_page_fault)(struct kvm_vcpu *vcpu); + void (*inject_page_fault)(struct kvm_vcpu *vcpu, + struct x86_exception *fault); void (*free)(struct kvm_vcpu *vcpu); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, struct x86_exception *exception); @@ -318,16 +319,6 @@ struct kvm_vcpu_arch { */ struct kvm_mmu *walk_mmu; - /* - * This struct is filled with the necessary information to propagate a - * page fault into the guest - */ - struct { - u64 address; - unsigned error_code; - bool nested; - } fault; - /* only needed in kvm_pv_mmu_op() path, but it's hot so * put it here to avoid allocation */ struct kvm_pv_mmu_op_buffer mmu_op_buffer; @@ -686,11 +677,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); -void kvm_inject_page_fault(struct kvm_vcpu *vcpu); +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t gfn, void *data, int offset, int len, u32 access); -void kvm_propagate_fault(struct kvm_vcpu *vcpu); +void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); int kvm_pic_set_irq(void *opaque, int irq, int level); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9ce041469a8e..d35950087e3d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2736,9 +2736,10 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) return vcpu->arch.cr3; } -static void inject_page_fault(struct kvm_vcpu *vcpu) +static void inject_page_fault(struct kvm_vcpu *vcpu, + struct x86_exception *fault) { - vcpu->arch.mmu.inject_page_fault(vcpu); + vcpu->arch.mmu.inject_page_fault(vcpu, fault); } static void paging_free(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ad5a5a28b96e..d5a0a11d33a1 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -279,8 +279,8 @@ error: if (rsvd_fault) walker->fault.error_code |= PFERR_RSVD_MASK; - vcpu->arch.fault.address = addr; - vcpu->arch.fault.error_code = walker->fault.error_code; + walker->fault.address = addr; + walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; trace_kvm_mmu_walker_error(walker->fault.error_code); return 0; @@ -568,7 +568,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, */ if (!r) { pgprintk("%s: guest page fault\n", __func__); - inject_page_fault(vcpu); + inject_page_fault(vcpu, &walker.fault); vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ return 0; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 28274cf307ba..b985cb81a573 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1647,14 +1647,15 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, force_new_asid(vcpu); } -static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu) +static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, + struct x86_exception *fault) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code_hi = 0; - svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code; - svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address; + svm->vmcb->control.exit_info_1 = fault->error_code; + svm->vmcb->control.exit_info_2 = fault->address; nested_svm_vmexit(svm); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a7a7decba43f..47e5a41cc40e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -334,23 +334,19 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) } EXPORT_SYMBOL_GPL(kvm_requeue_exception); -void kvm_inject_page_fault(struct kvm_vcpu *vcpu) +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - unsigned error_code = vcpu->arch.fault.error_code; - ++vcpu->stat.pf_guest; - vcpu->arch.cr2 = vcpu->arch.fault.address; - kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); + vcpu->arch.cr2 = fault->address; + kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } -void kvm_propagate_fault(struct kvm_vcpu *vcpu) +void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested) - vcpu->arch.nested_mmu.inject_page_fault(vcpu); + if (mmu_is_nested(vcpu) && !fault->nested_page_fault) + vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else - vcpu->arch.mmu.inject_page_fault(vcpu); - - vcpu->arch.fault.nested = false; + vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) @@ -3610,8 +3606,6 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); - if (t_gpa == UNMAPPED_GVA) - vcpu->arch.fault.nested = true; return t_gpa; } @@ -4259,7 +4253,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) - kvm_propagate_fault(vcpu); + kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); @@ -6264,6 +6258,8 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { + struct x86_exception fault; + trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); @@ -6272,15 +6268,20 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { - vcpu->arch.fault.error_code = 0; - vcpu->arch.fault.address = work->arch.token; - kvm_inject_page_fault(vcpu); + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { + struct x86_exception fault; + trace_kvm_async_pf_ready(work->arch.token, work->gva); if (is_error_page(work->page)) work->arch.token = ~0; /* broadcast wakeup */ @@ -6289,9 +6290,12 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { - vcpu->arch.fault.error_code = 0; - vcpu->arch.fault.address = work->arch.token; - kvm_inject_page_fault(vcpu); + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; } |