summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-09-25 12:51:33 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2013-09-30 09:14:23 +0200
commite0b890d35cdff506b72129b55a166d22fc9bfa64 (patch)
tree103b203c928e87b11c163a202e0441edda2b5e8a /arch/x86/kvm/vmx.c
parent6a3f95a6b04cd5efaa45269e453ef30b8680062e (diff)
downloadlwn-e0b890d35cdff506b72129b55a166d22fc9bfa64.tar.gz
lwn-e0b890d35cdff506b72129b55a166d22fc9bfa64.zip
KVM: nVMX: Amend nested_run_pending logic
EXIT_REASON_VMLAUNCH/EXIT_REASON_VMRESUME exit does not mean that nested VM will actually run during next entry. Move setting nested_run_pending closer to vmentry emulation code and move its clearing close to vmexit to minimize amount of code that will erroneously run with nested_run_pending set. Signed-off-by: Gleb Natapov <gleb@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e6e8fbc02d5e..7eb05126b44c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6742,20 +6742,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
- /*
- * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
- * we did not inject a still-pending event to L1 now because of
- * nested_run_pending, we need to re-enable this bit.
- */
- if (vmx->nested.nested_run_pending)
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
- exit_reason == EXIT_REASON_VMRESUME))
- vmx->nested.nested_run_pending = 1;
- else
- vmx->nested.nested_run_pending = 0;
-
if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
nested_vmx_vmexit(vcpu);
return 1;
@@ -7290,6 +7276,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
+ /*
+ * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+ * we did not inject a still-pending event to L1 now because of
+ * nested_run_pending, we need to re-enable this bit.
+ */
+ if (vmx->nested.nested_run_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ vmx->nested.nested_run_pending = 0;
+
vmx_complete_atomic_exit(vmx);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
@@ -7948,6 +7944,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
enter_guest_mode(vcpu);
+ vmx->nested.nested_run_pending = 1;
+
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
cpu = get_cpu();