diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-23 14:08:58 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-05-13 12:14:49 -0400 |
commit | c300ab9f08df9e4b9f39d53a0691e234330df124 (patch) | |
tree | 33c488983f3ba30112bca45e589103df4b00c1b6 /arch/x86/kvm/x86.c | |
parent | 7ab0abdb553e62704be5b7879a0971acf9f36a06 (diff) | |
download | lwn-c300ab9f08df9e4b9f39d53a0691e234330df124.tar.gz lwn-c300ab9f08df9e4b9f39d53a0691e234330df124.zip |
KVM: x86: Replace late check_nested_events() hack with more precise fix
Add an argument to interrupt_allowed and nmi_allowed, to checking if
interrupt injection is blocked. Use the hook to handle the case where
an interrupt arrives between check_nested_events() and the injection
logic. Drop the retry of check_nested_events() that hack-a-fixed the
same condition.
Blocking injection is also a bit of a hack, e.g. KVM should do exiting
and non-exiting interrupt processing in a single pass, but it's a more
precise hack. The old comment is also misleading, e.g. KVM_REQ_EVENT is
purely an optimization, setting it on every run loop (which KVM doesn't
do) should not affect functionality, only performance.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200423022550.15113-13-sean.j.christopherson@intel.com>
[Extend to SVM, add SMI and NMI. Even though NMI and SMI cannot come
asynchronously right now, making the fix generic is easy and removes a
special case. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 36 |
1 files changed, 12 insertions, 24 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cb250f16bdf5..ec20368ac025 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7766,32 +7766,20 @@ static int inject_pending_event(struct kvm_vcpu *vcpu) if (kvm_event_needs_reinjection(vcpu)) return 0; - if (vcpu->arch.smi_pending && kvm_x86_ops.smi_allowed(vcpu)) { + if (vcpu->arch.smi_pending && + kvm_x86_ops.smi_allowed(vcpu, true)) { vcpu->arch.smi_pending = false; ++vcpu->arch.smi_count; enter_smm(vcpu); - } else if (vcpu->arch.nmi_pending && kvm_x86_ops.nmi_allowed(vcpu)) { + } else if (vcpu->arch.nmi_pending && + kvm_x86_ops.nmi_allowed(vcpu, true)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops.set_nmi(vcpu); - } else if (kvm_cpu_has_injectable_intr(vcpu)) { - /* - * Because interrupts can be injected asynchronously, we are - * calling check_nested_events again here to avoid a race condition. - * See https://lkml.org/lkml/2014/7/2/60 for discussion about this - * proposal and current concerns. Perhaps we should be setting - * KVM_REQ_EVENT only on certain events and not unconditionally? - */ - if (is_guest_mode(vcpu)) { - r = kvm_x86_ops.nested_ops->check_events(vcpu); - if (r != 0) - return r; - } - if (kvm_x86_ops.interrupt_allowed(vcpu)) { - kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), - false); - kvm_x86_ops.set_irq(vcpu); - } + } else if (kvm_cpu_has_injectable_intr(vcpu) && + kvm_x86_ops.interrupt_allowed(vcpu, true)) { + kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); + kvm_x86_ops.set_irq(vcpu); } return 0; @@ -10203,12 +10191,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && - kvm_x86_ops.nmi_allowed(vcpu))) + kvm_x86_ops.nmi_allowed(vcpu, false))) return true; if (kvm_test_request(KVM_REQ_SMI, vcpu) || (vcpu->arch.smi_pending && - kvm_x86_ops.smi_allowed(vcpu))) + kvm_x86_ops.smi_allowed(vcpu, false))) return true; if (kvm_arch_interrupt_allowed(vcpu) && @@ -10260,7 +10248,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.interrupt_allowed(vcpu); + return kvm_x86_ops.interrupt_allowed(vcpu, false); } unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) @@ -10425,7 +10413,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) * If interrupts are off we cannot even use an artificial * halt state. */ - return kvm_x86_ops.interrupt_allowed(vcpu); + return kvm_arch_interrupt_allowed(vcpu); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |