diff options
author | Wanpeng Li <wanpengli@tencent.com> | 2020-04-10 10:47:03 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-21 09:13:10 -0400 |
commit | a9ab13ff6e844ad5b3ed39339e6db9a76bb539ad (patch) | |
tree | baeae7a8f3432482d785309797a7124a3c6da7aa /arch/x86/kvm/svm/svm.c | |
parent | 873e1da16918a83cc7440e299dda0dc7ab6db34c (diff) | |
download | lwn-a9ab13ff6e844ad5b3ed39339e6db9a76bb539ad.tar.gz lwn-a9ab13ff6e844ad5b3ed39339e6db9a76bb539ad.zip |
KVM: X86: Improve latency for single target IPI fastpath
IPI and Timer cause the main MSRs write vmexits in cloud environment
observation, let's optimize virtual IPI latency more aggressively to
inject target IPI as soon as possible.
Running kvm-unit-tests/vmexit.flat IPI testing on SKX server, disable
adaptive advance lapic timer and adaptive halt-polling to avoid the
interference, this patch can give another 7% improvement.
w/o fastpath -> x86.c fastpath 4238 -> 3543 16.4%
x86.c fastpath -> vmx.c fastpath 3543 -> 3293 7%
w/o fastpath -> vmx.c fastpath 4238 -> 3293 22.3%
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200410174703.1138-3-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 24 |
1 files changed, 16 insertions, 8 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 747e60c65c98..a6f4e1bdb045 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3299,10 +3299,21 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); } +static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) +{ + if (!is_guest_mode(vcpu) && + to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && + to_svm(vcpu)->vmcb->control.exit_info_1) + return handle_fastpath_set_msr_irqoff(vcpu); + + return EXIT_FASTPATH_NONE; +} + void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); -static void svm_vcpu_run(struct kvm_vcpu *vcpu) +static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu) { + enum exit_fastpath_completion exit_fastpath; struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; @@ -3314,7 +3325,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) * again. */ if (unlikely(svm->nested.exit_required)) - return; + return EXIT_FASTPATH_NONE; /* * Disable singlestep if we're injecting an interrupt/exception. @@ -3398,6 +3409,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) stgi(); /* Any pending NMI will happen here */ + exit_fastpath = svm_exit_handlers_fastpath(vcpu); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_interrupt(&svm->vcpu); @@ -3426,6 +3438,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) svm_handle_mce(svm); mark_all_clean(svm->vmcb); + return exit_fastpath; } static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root) @@ -3727,13 +3740,8 @@ out: return ret; } -static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu, - enum exit_fastpath_completion *exit_fastpath) +static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) { - if (!is_guest_mode(vcpu) && - to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && - to_svm(vcpu)->vmcb->control.exit_info_1) - *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |