summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>2026-03-20 22:39:46 +0000
committerAndrew Morton <akpm@linux-foundation.org>2026-04-05 13:53:45 -0700
commit62c65fd740e979a3967db08971b93aefcec510d4 (patch)
treedeb5dbbef8940fa653ba076ae6775a1b5e1dfcb3 /mm
parent933f05f58ac6014eaac387d22a76ace8606891d1 (diff)
downloadlwn-62c65fd740e979a3967db08971b93aefcec510d4.tar.gz
lwn-62c65fd740e979a3967db08971b93aefcec510d4.zip
mm: add mmap_action_map_kernel_pages[_full]()
A user can invoke mmap_action_map_kernel_pages() to specify that the mapping should map kernel pages starting from desc->start of a specified number of pages specified in an array. In order to implement this, adjust mmap_action_prepare() to be able to return an error code, as it makes sense to assert that the specified parameters are valid as quickly as possible as well as updating the VMA flags to include VMA_MIXEDMAP_BIT as necessary. This provides an mmap_prepare equivalent of vm_insert_pages(). We additionally update the existing vm_insert_pages() code to use range_in_vma() and add a new range_in_vma_desc() helper function for the mmap_prepare case, sharing the code between the two in range_is_subset(). We add both mmap_action_map_kernel_pages() and mmap_action_map_kernel_pages_full() to allow for both partial and full VMA mappings. We update the documentation to reflect the new features. Finally, we update the VMA tests accordingly to reflect the changes. Link: https://lkml.kernel.org/r/926ac961690d856e67ec847bee2370ab3c6b9046.1774045440.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Bodo Stroesser <bostroesser@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Clemens Ladisch <clemens@ladisch.de> Cc: David Hildenbrand <david@kernel.org> Cc: David Howells <dhowells@redhat.com> Cc: Dexuan Cui <decui@microsoft.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: K. Y. Srinivasan <kys@microsoft.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Long Li <longli@microsoft.com> Cc: Marc Dionne <marc.dionne@auristor.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Miquel Raynal <miquel.raynal@bootlin.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Vignesh Raghavendra <vigneshr@ti.com> Cc: Wei Liu <wei.liu@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c42
-rw-r--r--mm/util.c7
2 files changed, 45 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c1c323512939..5d032b5293c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2484,13 +2484,14 @@ out:
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num)
{
- const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
+ const unsigned long nr_pages = *num;
+ const unsigned long end = addr + PAGE_SIZE * nr_pages;
- if (addr < vma->vm_start || end_addr >= vma->vm_end)
+ if (!range_in_vma(vma, addr, end))
return -EFAULT;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
- BUG_ON(mmap_read_trylock(vma->vm_mm));
- BUG_ON(vma->vm_flags & VM_PFNMAP);
+ VM_WARN_ON_ONCE(mmap_read_trylock(vma->vm_mm));
+ VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
}
/* Defer page refcount checking till we're about to map that page. */
@@ -2498,6 +2499,39 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(vm_insert_pages);
+int map_kernel_pages_prepare(struct vm_area_desc *desc)
+{
+ const struct mmap_action *action = &desc->action;
+ const unsigned long addr = action->map_kernel.start;
+ unsigned long nr_pages, end;
+
+ if (!vma_desc_test(desc, VMA_MIXEDMAP_BIT)) {
+ VM_WARN_ON_ONCE(mmap_read_trylock(desc->mm));
+ VM_WARN_ON_ONCE(vma_desc_test(desc, VMA_PFNMAP_BIT));
+ vma_desc_set_flags(desc, VMA_MIXEDMAP_BIT);
+ }
+
+ nr_pages = action->map_kernel.nr_pages;
+ end = addr + PAGE_SIZE * nr_pages;
+ if (!range_in_vma_desc(desc, addr, end))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(map_kernel_pages_prepare);
+
+int map_kernel_pages_complete(struct vm_area_struct *vma,
+ struct mmap_action *action)
+{
+ unsigned long nr_pages;
+
+ nr_pages = action->map_kernel.nr_pages;
+ return insert_pages(vma, action->map_kernel.start,
+ action->map_kernel.pages,
+ &nr_pages, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(map_kernel_pages_complete);
+
/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
diff --git a/mm/util.c b/mm/util.c
index 5ae20876ef2c..f063fd4de1e8 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1448,6 +1448,8 @@ int mmap_action_prepare(struct vm_area_desc *desc)
return io_remap_pfn_range_prepare(desc);
case MMAP_SIMPLE_IO_REMAP:
return simple_ioremap_prepare(desc);
+ case MMAP_MAP_KERNEL_PAGES:
+ return map_kernel_pages_prepare(desc);
}
WARN_ON_ONCE(1);
@@ -1475,6 +1477,9 @@ int mmap_action_complete(struct vm_area_struct *vma,
case MMAP_REMAP_PFN:
err = remap_pfn_range_complete(vma, action);
break;
+ case MMAP_MAP_KERNEL_PAGES:
+ err = map_kernel_pages_complete(vma, action);
+ break;
case MMAP_IO_REMAP_PFN:
case MMAP_SIMPLE_IO_REMAP:
/* Should have been delegated. */
@@ -1495,6 +1500,7 @@ int mmap_action_prepare(struct vm_area_desc *desc)
case MMAP_REMAP_PFN:
case MMAP_IO_REMAP_PFN:
case MMAP_SIMPLE_IO_REMAP:
+ case MMAP_MAP_KERNEL_PAGES:
WARN_ON_ONCE(1); /* nommu cannot handle these. */
break;
}
@@ -1514,6 +1520,7 @@ int mmap_action_complete(struct vm_area_struct *vma,
case MMAP_REMAP_PFN:
case MMAP_IO_REMAP_PFN:
case MMAP_SIMPLE_IO_REMAP:
+ case MMAP_MAP_KERNEL_PAGES:
WARN_ON_ONCE(1); /* nommu cannot handle this. */
err = -EINVAL;