summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-05-12 03:16:47 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2024-05-12 03:16:47 -0400
commitf4bc1373d5a6687e08e51d6d21c5c95033ca169f (patch)
treed9442dbe7c54f756370be31f5dae808f80379a9e /virt
parente5f62e27b16601f08b6b04dc964691d48d0a6a91 (diff)
parent2098acaf24455698c149b27f0347eb4ddc6d2058 (diff)
downloadlwn-f4bc1373d5a6687e08e51d6d21c5c95033ca169f.tar.gz
lwn-f4bc1373d5a6687e08e51d6d21c5c95033ca169f.zip
Merge tag 'kvm-x86-generic-6.10' of https://github.com/kvm-x86/linux into HEAD
KVM cleanups for 6.10: - Misc cleanups extracted from the "exit on missing userspace mapping" series, which has been put on hold in anticipation of a "KVM Userfault" approach, which should provide a superset of functionality. - Remove kvm_make_all_cpus_request_except(), which got added to hack around an AVIC bug, and then became dead code when a more robust fix came along. - Fix a goof in the KVM_CREATE_GUEST_MEMFD documentation.
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0583ce03dace..fb86ec20ebc4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -311,8 +311,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
return called;
}
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except)
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
struct kvm_vcpu *vcpu;
struct cpumask *cpus;
@@ -325,22 +324,14 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
cpumask_clear(cpus);
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu == except)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_vcpu_request(vcpu, req, cpus, me);
- }
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
}
-
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
-{
- return kvm_make_all_cpus_request_except(kvm, req, NULL);
-}
EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -2932,7 +2923,7 @@ out:
/*
* Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest
- * @atomic: whether this function can sleep
+ * @atomic: whether this function is forbidden from sleeping
* @interruptible: whether the process can be interrupted by non-fatal signals
* @async: whether this function need to wait IO complete if the
* host page is not in the memory
@@ -3004,16 +2995,12 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
if (hva)
*hva = addr;
- if (addr == KVM_HVA_ERR_RO_BAD) {
- if (writable)
- *writable = false;
- return KVM_PFN_ERR_RO_FAULT;
- }
-
if (kvm_is_error_hva(addr)) {
if (writable)
*writable = false;
- return KVM_PFN_NOSLOT;
+
+ return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
+ KVM_PFN_NOSLOT;
}
/* Do not map writable pfn in the readonly memslot. */
@@ -3277,6 +3264,7 @@ static int next_segment(unsigned long len, int offset)
return len;
}
+/* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, int len)
{
@@ -3378,6 +3366,7 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
+/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)