summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-06-15 15:23:11 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2016-07-01 11:03:38 +0200
commitf2485b3e0c6c0aa3a9546babc2fad3739e964ebb (patch)
tree07e778e8128e70ad5b38458686ed0cc0f3b87d98 /arch/x86/kvm/x86.c
parent91fa0f8e9e2937fd9360f326ad60d51908347afd (diff)
downloadlwn-f2485b3e0c6c0aa3a9546babc2fad3739e964ebb.tar.gz
lwn-f2485b3e0c6c0aa3a9546babc2fad3739e964ebb.zip
KVM: x86: use guest_exit_irqoff
This gains a few clock cycles per vmexit. On Intel there is no need anymore to enable the interrupts in vmx_handle_external_intr, since we are using the "acknowledge interrupt on exit" feature. AMD needs to do that, and must be careful to avoid the interrupt shadow. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 618463abeec5..0cc6cf834cdd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.exits;
- /*
- * We must have an instruction between local_irq_enable() and
- * kvm_guest_exit(), so the timer interrupt isn't delayed by
- * the interrupt shadow. The stat.exits increment will do nicely.
- * But we need to prevent reordering, hence this barrier():
- */
- barrier();
-
- guest_exit();
+ guest_exit_irqoff();
+ local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);