summaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-08-29 12:14:13 -0700
committerSean Christopherson <seanjc@google.com>2024-09-09 20:15:34 -0700
commit025dde582bbf31e7618f9283594ef5e2408e384b (patch)
treed722a6ec8fd040e4029ec6b43336d31131b88799 /virt/kvm
parentec495f2ab12290b008a691e826b39b895f458945 (diff)
downloadlwn-025dde582bbf31e7618f9283594ef5e2408e384b.tar.gz
lwn-025dde582bbf31e7618f9283594ef5e2408e384b.zip
KVM: Harden guest memory APIs against out-of-bounds accesses
When reading or writing a guest page, WARN and bail if offset+len would result in a read to a different page so that KVM bugs are more likely to be detected, and so that any such bugs are less likely to escalate to an out-of-bounds access. E.g. if userspace isn't using guard pages and the target page is at the end of a memslot. Note, KVM already hardens itself in similar APIs, e.g. in the "cached" variants, it's just the vanilla APIs that are playing with fire. Link: https://lore.kernel.org/r/20240829191413.900740-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 04011b94edec..d51357fd28d7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3275,6 +3275,9 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3348,6 +3351,9 @@ static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3378,6 +3384,9 @@ static int __kvm_write_guest_page(struct kvm *kvm,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;