summaryrefslogtreecommitdiff
path: root/mm/kmsan
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2023-04-13 15:12:20 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-04-18 14:22:13 -0700
commit47ebd0310e89c087f56e58c103c44b72a2f6b216 (patch)
tree1296475fde0380d30eb2e5d97305cfbb1d605f02 /mm/kmsan
parenta101482421a318369eef2d0e03f2fcb40a47abad (diff)
downloadlwn-47ebd0310e89c087f56e58c103c44b72a2f6b216.tar.gz
lwn-47ebd0310e89c087f56e58c103c44b72a2f6b216.zip
mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
As reported by Dipanjan Das, when KMSAN is used together with kernel fault injection (or, generally, even without the latter), calls to kcalloc() or __vmap_pages_range_noflush() may fail, leaving the metadata mappings for the virtual mapping in an inconsistent state. When these metadata mappings are accessed later, the kernel crashes. To address the problem, we return a non-zero error code from kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping failure inside it, and make vmap_pages_range_noflush() return an error if KMSAN fails to allocate the metadata. This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(), as these allocation failures are not fatal anymore. Link: https://lkml.kernel.org/r/20230413131223.4135168-1-glider@google.com Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations") Signed-off-by: Alexander Potapenko <glider@google.com> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/ Reviewed-by: Marco Elver <elver@google.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kmsan')
-rw-r--r--mm/kmsan/shadow.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index a787c04e9583..b8bb95eea5e3 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
kmsan_leave_runtime();
}
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages,
- unsigned int page_shift)
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
{
unsigned long shadow_start, origin_start, shadow_end, origin_end;
struct page **s_pages, **o_pages;
- int nr, mapped;
+ int nr, mapped, err = 0;
if (!kmsan_enabled)
- return;
+ return 0;
shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
if (!shadow_start)
- return;
+ return 0;
nr = (end - start) / PAGE_SIZE;
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
- if (!s_pages || !o_pages)
+ if (!s_pages || !o_pages) {
+ err = -ENOMEM;
goto ret;
+ }
for (int i = 0; i < nr; i++) {
s_pages[i] = shadow_page_for(pages[i]);
o_pages[i] = origin_page_for(pages[i]);
@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
kmsan_enter_runtime();
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
s_pages, page_shift);
- KMSAN_WARN_ON(mapped);
+ if (mapped) {
+ err = mapped;
+ goto ret;
+ }
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
o_pages, page_shift);
- KMSAN_WARN_ON(mapped);
+ if (mapped) {
+ err = mapped;
+ goto ret;
+ }
kmsan_leave_runtime();
flush_tlb_kernel_range(shadow_start, shadow_end);
flush_tlb_kernel_range(origin_start, origin_end);
@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
ret:
kfree(s_pages);
kfree(o_pages);
+ return err;
}
/* Allocate metadata for pages allocated at boot time. */