diff options
author | Cathy Avery <cavery@redhat.com> | 2020-04-14 16:11:06 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-05-13 12:14:24 -0400 |
commit | 9c3d370a8efaac49b0520617c9ab4e552aefbf1e (patch) | |
tree | 0f01cd29e4cc55cf31e8bbf016b5f435a4aa37bf | |
parent | 6e085cbfb0f0c7de4ca0f370adb572b0e07a818c (diff) | |
download | lwn-9c3d370a8efaac49b0520617c9ab4e552aefbf1e.tar.gz lwn-9c3d370a8efaac49b0520617c9ab4e552aefbf1e.zip |
KVM: SVM: Implement check_nested_events for NMI
Migrate nested guest NMI intercept processing
to new check_nested_events.
Signed-off-by: Cathy Avery <cavery@redhat.com>
Message-Id: <20200414201107.22952-2-cavery@redhat.com>
[Reorder clauses as NMIs have higher priority than IRQs; inject
immediate vmexit as is now done for IRQ vmexits. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm/nested.c | 21 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.h | 15 |
3 files changed, 23 insertions, 19 deletions
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 4654668798b7..3f268a3041a3 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -799,6 +799,20 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, return vmexit; } +static bool nested_exit_on_nmi(struct vcpu_svm *svm) +{ + return (svm->nested.intercept & (1ULL << INTERCEPT_NMI)); +} + +static void nested_svm_nmi(struct vcpu_svm *svm) +{ + svm->vmcb->control.exit_code = SVM_EXIT_NMI; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; + + nested_svm_vmexit(svm); +} + static void nested_svm_intr(struct vcpu_svm *svm) { trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); @@ -822,6 +836,13 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required || svm->nested.nested_run_pending; + if (vcpu->arch.nmi_pending && nested_exit_on_nmi(svm)) { + if (block_nested_events) + return -EBUSY; + nested_svm_nmi(svm); + return 0; + } + if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) { if (block_nested_events) return -EBUSY; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c2a4e2d21676..f97f29e1eb49 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3070,9 +3070,10 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; + ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); - ret = ret && gif_set(svm) && nested_svm_nmi(svm); + ret = ret && gif_set(svm); return ret; } @@ -3150,9 +3151,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) return; /* STGI will cause a vm exit */ } - if (svm->nested.exit_required) - return; /* we're not going to run the guest yet */ - /* * Something prevents NMI from been injected. Single step over possible * problem (IRET or exception injection or interrupt shadow) diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 435f3328c99c..a2bc33aadb67 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -373,21 +373,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm); #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ -/* This function returns true if it is save to enable the nmi window */ -static inline bool nested_svm_nmi(struct vcpu_svm *svm) -{ - if (!is_guest_mode(&svm->vcpu)) - return true; - - if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) - return true; - - svm->vmcb->control.exit_code = SVM_EXIT_NMI; - svm->nested.exit_required = true; - - return false; -} - static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) { return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); |