summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-02-10 07:30:31 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2022-04-29 12:49:17 -0400
commit39e7e2bf321ac7784823e61abcbdf1459aeaee94 (patch)
tree198eaf1a553d7ce15712a2b59b8d2ef19aa99c26 /arch/x86/kvm/mmu/mmu.c
parent82ffa13f7933cdc9aa3d22c9bc5c8d9c9f610af1 (diff)
downloadlwn-39e7e2bf321ac7784823e61abcbdf1459aeaee94.tar.gz
lwn-39e7e2bf321ac7784823e61abcbdf1459aeaee94.zip
KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu
The init_kvm_*mmu functions, with the exception of shadow NPT, do not need to know the full values of CR0/CR4/EFER; they only need to know the bits that make up the "role". This cleanup however will take quite a few incremental steps. As a start, pull the common computation of the struct kvm_mmu_role_regs into their caller: all of them extract the struct from the vcpu as the very first step. Reviewed-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5d5e5b062be0..3e49902561f9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4821,12 +4821,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
return role;
}
-static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
+ const struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
- struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4840,7 +4840,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
- context->root_level = role_regs_to_root_level(&regs);
+ context->root_level = role_regs_to_root_level(regs);
if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -5009,12 +5009,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
-static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
+static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
+ const struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
- struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
- kvm_init_shadow_mmu(vcpu, &regs);
+ kvm_init_shadow_mmu(vcpu, regs);
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
@@ -5038,10 +5038,10 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
return role;
}
-static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
+ const struct kvm_mmu_role_regs *regs)
{
- struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
- union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -5081,12 +5081,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
void kvm_init_mmu(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+
if (mmu_is_nested(vcpu))
- init_kvm_nested_mmu(vcpu);
+ init_kvm_nested_mmu(vcpu, &regs);
else if (tdp_enabled)
- init_kvm_tdp_mmu(vcpu);
+ init_kvm_tdp_mmu(vcpu, &regs);
else
- init_kvm_softmmu(vcpu);
+ init_kvm_softmmu(vcpu, &regs);
}
EXPORT_SYMBOL_GPL(kvm_init_mmu);