diff options
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4f0443a097a6..443c1b35fc7a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1228,8 +1228,8 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) return mmu_spte_update(sptep, spte); } -static bool __rmap_write_protect(struct kvm_rmap_head *rmap_head, - bool pt_protect) +static bool rmap_write_protect(struct kvm_rmap_head *rmap_head, + bool pt_protect) { u64 *sptep; struct rmap_iterator iter; @@ -1309,7 +1309,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, while (mask) { rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PG_LEVEL_4K, slot); - __rmap_write_protect(rmap_head, false); + rmap_write_protect(rmap_head, false); /* clear the first set bit */ mask &= mask - 1; @@ -1408,7 +1408,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = gfn_to_rmap(gfn, i, slot); - write_protected |= __rmap_write_protect(rmap_head, true); + write_protected |= rmap_write_protect(rmap_head, true); } } @@ -5798,7 +5798,7 @@ static bool slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) { - return __rmap_write_protect(rmap_head, false); + return rmap_write_protect(rmap_head, false); } void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |