summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-07-12 02:07:22 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2022-07-14 11:31:23 -0400
commit79e48cec6cba4eee0bd3a13f31320e33a1729931 (patch)
tree6fbaf53d21b12bde36b5a011c3403c0a181e47b2 /arch/x86/kvm/mmu/mmu.c
parentb184b35d06b2a3de65ff2ef4303f83535572266c (diff)
downloadlwn-79e48cec6cba4eee0bd3a13f31320e33a1729931.tar.gz
lwn-79e48cec6cba4eee0bd3a13f31320e33a1729931.zip
KVM: x86/mmu: Add optimized helper to retrieve an SPTE's index
Add spte_index() to dedup all the code that calculates a SPTE's index into its parent's page table and/or spt array. Opportunistically tweak the calculation to avoid pointer arithmetic, which is subtle (subtract in 8-byte chunks) and less performant (requires the compiler to generate the subtraction). Suggested-by: David Matlack <dmatlack@google.com> Reviewed-by: David Matlack <dmatlack@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220712020724.1262121-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f7fa4c31b7c5..864a32f96082 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1038,7 +1038,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_head *rmap_head;
sp = sptep_to_sp(spte);
- gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
+ gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
/*
* Unlike rmap_add, rmap_remove does not run in the context of a vCPU
@@ -1589,7 +1589,7 @@ static void __rmap_add(struct kvm *kvm,
int rmap_count;
sp = sptep_to_sp(spte);
- kvm_mmu_page_set_translation(sp, spte - sp->spt, gfn, access);
+ kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
kvm_update_page_stats(kvm, sp->role.level, 1);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
@@ -1716,11 +1716,9 @@ static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
static void mark_unsync(u64 *spte)
{
struct kvm_mmu_page *sp;
- unsigned int index;
sp = sptep_to_sp(spte);
- index = spte - sp->spt;
- if (__test_and_set_bit(index, sp->unsync_child_bitmap))
+ if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
return;
if (sp->unsync_children++)
return;
@@ -2203,7 +2201,7 @@ static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsig
*/
if (role.has_4_byte_gpte) {
WARN_ON_ONCE(role.level != PG_LEVEL_4K);
- role.quadrant = (sptep - parent_sp->spt) % 2;
+ role.quadrant = spte_index(sptep) & 1;
}
return role;
@@ -2828,7 +2826,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
rmap_add(vcpu, slot, sptep, gfn, pte_access);
} else {
/* Already rmapped but the pte_access bits may have changed. */
- kvm_mmu_page_set_access(sp, sptep - sp->spt, pte_access);
+ kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
}
return ret;
@@ -2844,7 +2842,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
int i, ret;
gfn_t gfn;
- gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
+ gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
if (!slot)
return -1;
@@ -2870,7 +2868,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
WARN_ON(!sp->role.direct);
- i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
+ i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
@@ -6156,8 +6154,8 @@ static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *hu
unsigned int access;
gfn_t gfn;
- gfn = kvm_mmu_page_get_gfn(huge_sp, huge_sptep - huge_sp->spt);
- access = kvm_mmu_page_get_access(huge_sp, huge_sptep - huge_sp->spt);
+ gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
+ access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
/*
* Note, huge page splitting always uses direct shadow pages, regardless
@@ -6231,7 +6229,7 @@ static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
u64 spte;
/* Grab information for the tracepoint before dropping the MMU lock. */
- gfn = kvm_mmu_page_get_gfn(huge_sp, huge_sptep - huge_sp->spt);
+ gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
level = huge_sp->role.level;
spte = *huge_sptep;