diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-09-22 17:02:48 +0800 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 11:16:50 +0200 |
commit | 1cb3f3ae5a3855ba430430706da4201ace1d6ec4 (patch) | |
tree | 1ff844904958cf669c86650da4601eb392f14a91 /arch/x86/kvm/mmu.c | |
parent | d5ae7ce835cc89556dc18e2070e754f026402efa (diff) | |
download | lwn-1cb3f3ae5a3855ba430430706da4201ace1d6ec4.tar.gz lwn-1cb3f3ae5a3855ba430430706da4201ace1d6ec4.zip |
KVM: x86: retry non-page-table writing instructions
If the emulation is caused by #PF and it is non-page_table writing instruction,
it means the VM-EXIT is caused by shadow page protected, we can zap the shadow
page and retry this instruction directly
The idea is from Avi
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 232c5a30ddc8..7a22eb81b4ca 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1998,7 +1998,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; } -static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) +int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) { struct kvm_mmu_page *sp; struct hlist_node *node; @@ -2007,7 +2007,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; - + spin_lock(&kvm->mmu_lock); for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); @@ -2015,8 +2015,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); + return r; } +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) { @@ -3698,9 +3701,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); - spin_lock(&vcpu->kvm->mmu_lock); r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); - spin_unlock(&vcpu->kvm->mmu_lock); + return r; } EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); @@ -3721,10 +3723,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); } +static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) +{ + if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, void *insn, int insn_len) { - int r; + int r, emulation_type = EMULTYPE_RETRY; enum emulation_result er; r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); @@ -3736,7 +3746,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, goto out; } - er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); + if (is_mmio_page_fault(vcpu, cr2)) + emulation_type = 0; + + er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); switch (er) { case EMULATE_DONE: |