diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-03-20 14:28:27 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-21 09:12:56 -0400 |
commit | 4a632ac6ca66fb29b94a16495624c58f4d313f2f (patch) | |
tree | 53e2d7f264074c7b09fa6b0393cb40d33dd4d636 /arch/x86/kvm/mmu/mmu.c | |
parent | b869855badd1387bd12415e4d5571e931825b546 (diff) | |
download | lwn-4a632ac6ca66fb29b94a16495624c58f4d313f2f.tar.gz lwn-4a632ac6ca66fb29b94a16495624c58f4d313f2f.zip |
KVM: x86/mmu: Add separate override for MMU sync during fast CR3 switch
Add a separate "skip" override for MMU sync, a future change to avoid
TLB flushes on nested VMX transitions may need to sync the MMU even if
the TLB flush is unnecessary.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-32-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6a939204d467..97d1e9b80b8e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4303,7 +4303,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, union kvm_mmu_page_role new_role, - bool skip_tlb_flush) + bool skip_tlb_flush, bool skip_mmu_sync) { if (!fast_cr3_switch(vcpu, new_cr3, new_role)) { kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); @@ -4318,10 +4318,10 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, */ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); - if (!skip_tlb_flush) { + if (!skip_mmu_sync) kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); + if (!skip_tlb_flush) kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } /* * The last MMIO access's GVA and GPA are cached in the VCPU. When @@ -4334,10 +4334,11 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa)); } -void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush, + bool skip_mmu_sync) { __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu), - skip_tlb_flush); + skip_tlb_flush, skip_mmu_sync); } EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); @@ -5030,7 +5031,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, execonly, level); - __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false, false); if (new_role.as_u64 == context->mmu_role.as_u64) return; |