summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/svm.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2020-11-17 05:15:41 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2021-03-15 04:42:31 -0400
commit9e8f0fbfff1a7787658ce1add0625f59c4faf0ef (patch)
tree94af561fb14bf8480c39c19903f984d676f06a7a /arch/x86/kvm/svm/svm.c
parent193015adf40d0465c240d4e9a7b6e4b84b531f8b (diff)
downloadlwn-9e8f0fbfff1a7787658ce1add0625f59c4faf0ef.tar.gz
lwn-9e8f0fbfff1a7787658ce1add0625f59c4faf0ef.zip
KVM: nSVM: rename functions and variables according to vmcbXY nomenclature
Now that SVM is using a separate vmcb01 and vmcb02 (and also uses the vmcb12 naming) we can give clearer names to functions that write to and read from those VMCBs. Likewise, variables and parameters can be renamed from nested_vmcb to vmcb12. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm/svm.c')
-rw-r--r--arch/x86/kvm/svm/svm.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8779b7bdca1a..a3c24b46610f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2124,7 +2124,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
static int vmload_interception(struct vcpu_svm *svm)
{
- struct vmcb *nested_vmcb;
+ struct vmcb *vmcb12;
struct kvm_host_map map;
int ret;
@@ -2138,11 +2138,11 @@ static int vmload_interception(struct vcpu_svm *svm)
return 1;
}
- nested_vmcb = map.hva;
+ vmcb12 = map.hva;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
- nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
+ nested_svm_vmloadsave(vmcb12, svm->vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
@@ -2150,7 +2150,7 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
- struct vmcb *nested_vmcb;
+ struct vmcb *vmcb12;
struct kvm_host_map map;
int ret;
@@ -2164,11 +2164,11 @@ static int vmsave_interception(struct vcpu_svm *svm)
return 1;
}
- nested_vmcb = map.hva;
+ vmcb12 = map.hva;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
- nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
+ nested_svm_vmloadsave(svm->vmcb, vmcb12);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
@@ -3949,7 +3949,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->next_rip = 0;
if (is_guest_mode(&svm->vcpu)) {
- sync_nested_vmcb_control(svm);
+ nested_sync_control_from_vmcb02(svm);
svm->nested.nested_run_pending = 0;
}