diff options
author | Sean Christopherson <seanjc@google.com> | 2022-04-23 03:47:46 +0000 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-05-12 09:51:42 -0400 |
commit | 5276c616abf149a4dfbc708df3f22da642ec3dde (patch) | |
tree | ffe66fc3428e53f45f6158cafcad7134b08a059a /arch/x86/kvm/mmu/mmu.c | |
parent | 5c64aba5179f6fc6a367f76342c6290a5e06cfc4 (diff) | |
download | lwn-5276c616abf149a4dfbc708df3f22da642ec3dde.tar.gz lwn-5276c616abf149a4dfbc708df3f22da642ec3dde.zip |
KVM: x86/mmu: Add RET_PF_CONTINUE to eliminate bool+int* "returns"
Add RET_PF_CONTINUE and use it in handle_abnormal_pfn() and
kvm_faultin_pfn() to signal that the page fault handler should continue
doing its thing. Aside from being gross and inefficient, using a boolean
return to signal continue vs. stop makes it extremely difficult to add
more helpers and/or move existing code to a helper.
E.g. hypothetically, if nested MMUs were to gain a separate page fault
handler in the future, everything up to the "is self-modifying PTE" check
can be shared by all shadow MMUs, but communicating up the stack whether
to continue on or stop becomes a nightmare.
More concretely, proposed support for private guest memory ran into a
similar issue, where it'll be forced to forego a helper in order to yield
sane code: https://lore.kernel.org/all/YkJbxiL%2FAz7olWlq@google.com.
No functional change intended.
Cc: David Matlack <dmatlack@google.com>
Cc: Chao Peng <chao.p.peng@linux.intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220423034752.1161007-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 51 |
1 files changed, 22 insertions, 29 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index add486220dae..8313c1d8e814 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3007,14 +3007,12 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) return -EFAULT; } -static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, - unsigned int access, int *ret_val) +static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + unsigned int access) { /* The pfn is invalid, report the error! */ - if (unlikely(is_error_pfn(fault->pfn))) { - *ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn); - return true; - } + if (unlikely(is_error_pfn(fault->pfn))) + return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn); if (unlikely(!fault->slot)) { gva_t gva = fault->is_tdp ? 0 : fault->addr; @@ -3032,13 +3030,11 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa * the hardware's). */ if (unlikely(!enable_mmio_caching) || - unlikely(fault->gfn > kvm_mmu_max_gfn())) { - *ret_val = RET_PF_EMULATE; - return true; - } + unlikely(fault->gfn > kvm_mmu_max_gfn())) + return RET_PF_EMULATE; } - return false; + return RET_PF_CONTINUE; } static bool page_fault_can_be_fast(struct kvm_page_fault *fault) @@ -3946,7 +3942,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } -static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r) +static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot = fault->slot; bool async; @@ -3957,7 +3953,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, * be zapped before KVM inserts a new MMIO SPTE for the gfn. */ if (slot && (slot->flags & KVM_MEMSLOT_INVALID)) - goto out_retry; + return RET_PF_RETRY; if (!kvm_is_visible_memslot(slot)) { /* Don't expose private memslots to L2. */ @@ -3965,7 +3961,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, fault->slot = NULL; fault->pfn = KVM_PFN_NOSLOT; fault->map_writable = false; - return false; + return RET_PF_CONTINUE; } /* * If the APIC access page exists but is disabled, go directly @@ -3974,10 +3970,8 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, * when the AVIC is re-enabled. */ if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT && - !kvm_apicv_activated(vcpu->kvm)) { - *r = RET_PF_EMULATE; - return true; - } + !kvm_apicv_activated(vcpu->kvm)) + return RET_PF_EMULATE; } async = false; @@ -3985,26 +3979,23 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, fault->write, &fault->map_writable, &fault->hva); if (!async) - return false; /* *pfn has correct page already */ + return RET_PF_CONTINUE; /* *pfn has correct page already */ if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(fault->addr, fault->gfn); if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { trace_kvm_async_pf_doublefault(fault->addr, fault->gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); - goto out_retry; - } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) - goto out_retry; + return RET_PF_RETRY; + } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) { + return RET_PF_RETRY; + } } fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL, fault->write, &fault->map_writable, &fault->hva); - return false; - -out_retry: - *r = RET_PF_RETRY; - return true; + return RET_PF_CONTINUE; } /* @@ -4059,10 +4050,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - if (kvm_faultin_pfn(vcpu, fault, &r)) + r = kvm_faultin_pfn(vcpu, fault); + if (r != RET_PF_CONTINUE) return r; - if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r)) + r = handle_abnormal_pfn(vcpu, fault, ACC_ALL); + if (r != RET_PF_CONTINUE) return r; r = RET_PF_RETRY; |