diff options
author | Liran Alon <liran.alon@oracle.com> | 2018-05-22 17:16:15 +0300 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2018-05-24 19:45:45 +0200 |
commit | cd9a491f6ef731cdee07dd0a2fe003389424a393 (patch) | |
tree | 728c12364ae108f31672ba632cc5a4bbae026e8c | |
parent | 6f1e03bcabcdbb199940dab0a60b1371cf95f6f9 (diff) | |
download | lwn-cd9a491f6ef731cdee07dd0a2fe003389424a393.tar.gz lwn-cd9a491f6ef731cdee07dd0a2fe003389424a393.zip |
KVM: nVMX: Emulate L1 individual-address invvpid by L0 individual-address invvpid
When vmcs12 uses VPID, all TLB entries populated by L2 are tagged with
vmx->nested.vpid02. Currently, INVVPID executed by L1 is emulated by L0
by using INVVPID single/global-context to flush all TLB entries
tagged with vmx->nested.vpid02 regardless of INVVPID type executed by
L1.
However, we can easily optimize the case of L1 INVVPID on an
individual-address. Just INVVPID given individual-address tagged with
vmx->nested.vpid02.
Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
[Squashed with a preparatory patch that added the !operand.vpid line.]
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 98f05e2b0ecc..e50beb76d846 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1572,6 +1572,11 @@ static inline bool cpu_has_vmx_invept_global(void) return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } +static inline bool cpu_has_vmx_invvpid_individual_addr(void) +{ + return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT; +} + static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; @@ -8513,12 +8518,19 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: - if (is_noncanonical_address(operand.gla, vcpu)) { + if (!operand.vpid || + is_noncanonical_address(operand.gla, vcpu)) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } - /* fall through */ + if (cpu_has_vmx_invvpid_individual_addr() && + vmx->nested.vpid02) { + __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, + vmx->nested.vpid02, operand.gla); + } else + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) { @@ -8526,15 +8538,16 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); break; case VMX_VPID_EXTENT_ALL_CONTEXT: + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); break; default: WARN_ON_ONCE(1); return kvm_skip_emulated_instruction(vcpu); } - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); |