summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@linux.alibaba.com>2021-09-06 20:25:47 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2021-10-01 03:44:46 -0400
commit3e44dce4d0aeb3582b87c7bb8a79d69fa406eee5 (patch)
treef2e1087b2285439ffec11a3ecb136080065a9f0f /arch/x86/kvm/mmu/mmu.c
parent5228eb96a4875f8cf5d61d486e3795ac14df8904 (diff)
downloadlwn-3e44dce4d0aeb3582b87c7bb8a79d69fa406eee5.tar.gz
lwn-3e44dce4d0aeb3582b87c7bb8a79d69fa406eee5.zip
KVM: X86: Move PTE present check from loop body to __shadow_walk_next()
So far, the loop bodies already ensure the PTE is present before calling __shadow_walk_next(): Some loop bodies simply exit with a !PRESENT directly and some other loop bodies, i.e. FNAME(fetch) and __direct_map() do not currently guard their walks with is_shadow_present_pte, but only because they install present non-leaf SPTEs in the loop itself. But checking pte present in __shadow_walk_next() (which is called from shadow_walk_okay()) is more prudent; walking past a !PRESENT SPTE would lead to attempting to read a the next level SPTE from a garbage iter->shadow_addr. It also allows to remove the is_shadow_present_pte() checks from the loop bodies. Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Message-Id: <20210906122547.263316-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 73aa15e89311..7ef9c001d1b6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2220,7 +2220,7 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
u64 spte)
{
- if (is_last_spte(spte, iterator->level)) {
+ if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
iterator->level = 0;
return;
}
@@ -3189,9 +3189,6 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
sptep = iterator.sptep;
*spte = old_spte;
-
- if (!is_shadow_present_pte(old_spte))
- break;
}
return sptep;
@@ -3759,9 +3756,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
spte = mmu_spte_get_lockless(iterator.sptep);
sptes[leaf] = spte;
-
- if (!is_shadow_present_pte(spte))
- break;
}
return leaf;
@@ -3877,11 +3871,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
u64 spte;
walk_shadow_page_lockless_begin(vcpu);
- for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
clear_sp_write_flooding_count(iterator.sptep);
- if (!is_shadow_present_pte(spte))
- break;
- }
walk_shadow_page_lockless_end(vcpu);
}