diff options
author | Abdiel Janulgue <abdiel.janulgue@linux.intel.com> | 2019-12-31 20:03:56 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-12-31 20:41:50 +0000 |
commit | 1764b992be0f1c4295d2d9572ec46d568bd3fc14 (patch) | |
tree | 788f80935ebd15df9676dc6a66339062f71e382d /drivers/gpu/drm/i915/i915_mm.c | |
parent | cc39da0e4c5d084b96d44c56aba3d712a83f7567 (diff) | |
download | lwn-1764b992be0f1c4295d2d9572ec46d568bd3fc14.tar.gz lwn-1764b992be0f1c4295d2d9572ec46d568bd3fc14.zip |
drm/i915: Introduce remap_io_sg() to prefault discontiguous objects
Provide a way to set the PTE within apply_page_range for discontiguous
objects in addition to the existing method of just incrementing the pfn
for a page range.
Fixes: cc662126b413 ("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191231200356.409475-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_mm.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_mm.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 318562ce64c0..2998689e6d42 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -33,6 +33,8 @@ struct remap_pfn { struct mm_struct *mm; unsigned long pfn; pgprot_t prot; + + struct sgt_iter sgt; }; static int remap_pfn(pte_t *pte, unsigned long addr, void *data) @@ -46,6 +48,30 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data) return 0; } +static inline unsigned long sgt_pfn(const struct sgt_iter *sgt) +{ + return sgt->pfn + (sgt->curr >> PAGE_SHIFT); +} + +static int remap_sg_page(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + if (GEM_WARN_ON(!r->sgt.pfn)) + return -EINVAL; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, + pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt), r->prot))); + r->pfn++; /* track insertions in case we need to unwind later */ + + r->sgt.curr += PAGE_SIZE; + if (r->sgt.curr >= r->sgt.max) + r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), false); + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -80,3 +106,36 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } + +/** + * remap_io_sg_page - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @size: size of map area + * @sgl: Start sg entry + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_sg_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl) +{ + struct remap_pfn r = { + .mm = vma->vm_mm, + .prot = vma->vm_page_prot, + .sgt = __sgt_iter(sgl, false), + }; + int err; + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + flush_cache_range(vma, addr, size); + err = apply_to_page_range(r.mm, addr, size, remap_sg_page, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); + return err; + } + + return 0; +} |