summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c85
1 files changed, 60 insertions, 25 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 10a61dd81f97..c1c323512939 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3170,6 +3170,58 @@ int remap_pfn_range_complete(struct vm_area_struct *vma,
return do_remap_pfn_range(vma, start, pfn, size, prot);
}
+static int __simple_ioremap_prep(unsigned long vm_len, pgoff_t vm_pgoff,
+ phys_addr_t start_phys, unsigned long size,
+ unsigned long *pfnp)
+{
+ unsigned long pfn, pages;
+
+ /* Check that the physical memory area passed in looks valid */
+ if (start_phys + size < start_phys)
+ return -EINVAL;
+ /*
+ * You *really* shouldn't map things that aren't page-aligned,
+ * but we've historically allowed it because IO memory might
+ * just have smaller alignment.
+ */
+ size += start_phys & ~PAGE_MASK;
+ pfn = start_phys >> PAGE_SHIFT;
+ pages = (size + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (pfn + pages < pfn)
+ return -EINVAL;
+
+ /* We start the mapping 'vm_pgoff' pages into the area */
+ if (vm_pgoff > pages)
+ return -EINVAL;
+ pfn += vm_pgoff;
+ pages -= vm_pgoff;
+
+ /* Can we fit all of the mapping? */
+ if ((vm_len >> PAGE_SHIFT) > pages)
+ return -EINVAL;
+
+ *pfnp = pfn;
+ return 0;
+}
+
+int simple_ioremap_prepare(struct vm_area_desc *desc)
+{
+ struct mmap_action *action = &desc->action;
+ const phys_addr_t start = action->simple_ioremap.start_phys_addr;
+ const unsigned long size = action->simple_ioremap.size;
+ unsigned long pfn;
+ int err;
+
+ err = __simple_ioremap_prep(vma_desc_size(desc), desc->pgoff,
+ start, size, &pfn);
+ if (err)
+ return err;
+
+ /* The I/O remap logic does the heavy lifting. */
+ mmap_action_ioremap_full(desc, pfn);
+ return io_remap_pfn_range_prepare(desc);
+}
+
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
@@ -3187,32 +3239,15 @@ int remap_pfn_range_complete(struct vm_area_struct *vma,
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
- unsigned long vm_len, pfn, pages;
-
- /* Check that the physical memory area passed in looks valid */
- if (start + len < start)
- return -EINVAL;
- /*
- * You *really* shouldn't map things that aren't page-aligned,
- * but we've historically allowed it because IO memory might
- * just have smaller alignment.
- */
- len += start & ~PAGE_MASK;
- pfn = start >> PAGE_SHIFT;
- pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (pfn + pages < pfn)
- return -EINVAL;
-
- /* We start the mapping 'vm_pgoff' pages into the area */
- if (vma->vm_pgoff > pages)
- return -EINVAL;
- pfn += vma->vm_pgoff;
- pages -= vma->vm_pgoff;
+ const unsigned long vm_start = vma->vm_start;
+ const unsigned long vm_end = vma->vm_end;
+ const unsigned long vm_len = vm_end - vm_start;
+ unsigned long pfn;
+ int err;
- /* Can we fit all of the mapping? */
- vm_len = vma->vm_end - vma->vm_start;
- if (vm_len >> PAGE_SHIFT > pages)
- return -EINVAL;
+ err = __simple_ioremap_prep(vm_len, vma->vm_pgoff, start, len, &pfn);
+ if (err)
+ return err;
/* Ok, let it rip */
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);