summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2022-01-19 23:07:35 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2022-02-10 13:50:41 -0500
commita82070b6e71a6642f87ef9e483ddc062c3571678 (patch)
treee53002d3fcce98c92802661c5366b9842fb1b647
parenta3aca4de0da99699c5b94fc3fc4e1817e756edd1 (diff)
downloadlwn-a82070b6e71a6642f87ef9e483ddc062c3571678.tar.gz
lwn-a82070b6e71a6642f87ef9e483ddc062c3571678.zip
KVM: x86/mmu: Separate TDP MMU shadow page allocation and initialization
Separate the allocation of shadow pages from their initialization. This is in preparation for splitting huge pages outside of the vCPU fault context, which requires a different allocation mechanism. No functional changed intended. Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: David Matlack <dmatlack@google.com> Message-Id: <20220119230739.2234394-15-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 472ea9994e3f..4c9a98a28e1d 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -171,13 +171,19 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
if (kvm_mmu_page_as_id(_root) != _as_id) { \
} else
-static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
- union kvm_mmu_page_role role)
+static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+
+ return sp;
+}
+
+static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, gfn_t gfn,
+ union kvm_mmu_page_role role)
+{
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
sp->role = role;
@@ -185,12 +191,10 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
sp->tdp_mmu_page = true;
trace_kvm_mmu_get_page(sp, true);
-
- return sp;
}
-static struct kvm_mmu_page *tdp_mmu_alloc_child_sp(struct kvm_vcpu *vcpu,
- struct tdp_iter *iter)
+static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
+ struct tdp_iter *iter)
{
struct kvm_mmu_page *parent_sp;
union kvm_mmu_page_role role;
@@ -200,7 +204,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_child_sp(struct kvm_vcpu *vcpu,
role = parent_sp->role;
role.level--;
- return tdp_mmu_alloc_sp(vcpu, iter->gfn, role);
+ tdp_mmu_init_sp(child_sp, iter->gfn, role);
}
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
@@ -221,7 +225,9 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
goto out;
}
- root = tdp_mmu_alloc_sp(vcpu, 0, role);
+ root = tdp_mmu_alloc_sp(vcpu);
+ tdp_mmu_init_sp(root, 0, role);
+
refcount_set(&root->tdp_mmu_root_count, 1);
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
@@ -1042,7 +1048,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (is_removed_spte(iter.old_spte))
break;
- sp = tdp_mmu_alloc_child_sp(vcpu, &iter);
+ sp = tdp_mmu_alloc_sp(vcpu);
+ tdp_mmu_init_child_sp(sp, &iter);
+
if (tdp_mmu_link_sp_atomic(vcpu->kvm, &iter, sp, account_nx)) {
tdp_mmu_free_sp(sp);
break;