From 0adec22702d497385dbdc52abb165f379a00efba Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 13 Jun 2023 16:51:33 +0200 Subject: drm: Remove struct drm_driver.gem_prime_mmap All drivers initialize this field with drm_gem_prime_mmap(). Call the function directly and remove the field. Simplifies the code and resolves a long-standing TODO item. Signed-off-by: Thomas Zimmermann Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20230613150441.17720-3-tzimmermann@suse.de --- include/drm/drm_drv.h | 14 -------------- include/drm/drm_gem_dma_helper.h | 6 ++---- include/drm/drm_gem_shmem_helper.h | 1 - include/drm/drm_gem_vram_helper.h | 1 - 4 files changed, 2 insertions(+), 20 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 89e2706cac56..870278ecd8ba 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -343,20 +343,6 @@ struct drm_driver { struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); - /** - * @gem_prime_mmap: - * - * mmap hook for GEM drivers, used to implement dma-buf mmap in the - * PRIME helpers. - * - * This hook only exists for historical reasons. Drivers must use - * drm_gem_prime_mmap() to implement it. - * - * FIXME: Convert all drivers to implement mmap in struct - * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into - * its callers. This hook should be removed afterwards. - */ - int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * @dumb_create: diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h index 8a043235dad8..61da596780b6 100644 --- a/include/drm/drm_gem_dma_helper.h +++ b/include/drm/drm_gem_dma_helper.h @@ -169,8 +169,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, .dumb_create = (dumb_create_func), \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table /** * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations @@ -207,8 +206,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, .dumb_create = dumb_create_func, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \ - .gem_prime_mmap = drm_gem_prime_mmap + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap /** * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 5994fed5e327..46eb46e69063 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -293,7 +293,6 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index f4aab64411d8..6b265cb9f45a 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -160,7 +160,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( .debugfs_init = drm_vram_mm_debugfs_init, \ .dumb_create = drm_gem_vram_driver_dumb_create, \ .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle -- cgit v1.2.3 From 21aa27ddc58269349597c6d243212bcc4065d277 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 30 May 2023 01:39:35 +0300 Subject: drm/shmem-helper: Switch to reservation lock Replace all drm-shmem locks with a GEM reservation lock. This makes locks consistent with dma-buf locking convention where importers are responsible for holding reservation lock for all operations performed over dma-bufs, preventing deadlock between dma-buf importers and exporters. Suggested-by: Daniel Vetter Acked-by: Thomas Zimmermann Reviewed-by: Emil Velikov Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20230529223935.2672495-7-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 210 ++++++++++------------- drivers/gpu/drm/lima/lima_gem.c | 8 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 7 +- drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 +- include/drm/drm_gem_shmem_helper.h | 14 +- 6 files changed, 116 insertions(+), 148 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4ea6507a77e5..a783d2245599 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; - mutex_init(&shmem->pages_lock); - mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); if (!private) { @@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, shmem->vmap_use_count); - if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); } else { + dma_resv_lock(shmem->base.resv, NULL); + + drm_WARN_ON(obj->dev, shmem->vmap_use_count); + if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -154,22 +154,24 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } if (shmem->pages) drm_gem_shmem_put_pages(shmem); - } - drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, shmem->pages_use_count); + + dma_resv_unlock(shmem->base.resv); + } drm_gem_object_release(obj); - mutex_destroy(&shmem->pages_lock); - mutex_destroy(&shmem->vmap_lock); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; + dma_resv_assert_held(shmem->base.resv); + if (shmem->pages_use_count++ > 0) return 0; @@ -197,35 +199,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) } /* - * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * - * This function makes sure that backing pages exists for the shmem GEM object - * and increases the use count. - * - * Returns: - * 0 on success or a negative error code on failure. + * This function decreases the use count and puts the backing pages when use drops to zero. */ -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - int ret; - drm_WARN_ON(obj->dev, obj->import_attach); - - ret = mutex_lock_interruptible(&shmem->pages_lock); - if (ret) - return ret; - ret = drm_gem_shmem_get_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_get_pages); - -static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) -{ - struct drm_gem_object *obj = &shmem->base; + dma_resv_assert_held(shmem->base.resv); if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; @@ -243,20 +226,25 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages = NULL; } +EXPORT_SYMBOL(drm_gem_shmem_put_pages); -/* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object - * @shmem: shmem GEM object - * - * This function decreases the use count and puts the backing pages when use drops to zero. - */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { - mutex_lock(&shmem->pages_lock); - drm_gem_shmem_put_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); + int ret; + + dma_resv_assert_held(shmem->base.resv); + + ret = drm_gem_shmem_get_pages(shmem); + + return ret; +} + +static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) +{ + dma_resv_assert_held(shmem->base.resv); + + drm_gem_shmem_put_pages(shmem); } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object @@ -271,10 +259,17 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; + int ret; drm_WARN_ON(obj->dev, obj->import_attach); - return drm_gem_shmem_get_pages(shmem); + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); + if (ret) + return ret; + ret = drm_gem_shmem_pin_locked(shmem); + dma_resv_unlock(shmem->base.resv); + + return ret; } EXPORT_SYMBOL(drm_gem_shmem_pin); @@ -291,12 +286,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, obj->import_attach); - drm_gem_shmem_put_pages(shmem); + dma_resv_lock(shmem->base.resv, NULL); + drm_gem_shmem_unpin_locked(shmem); + dma_resv_unlock(shmem->base.resv); } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +/* + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. + * + * This function makes sure that a contiguous kernel virtual address mapping + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. + * + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; @@ -312,6 +324,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, } else { pgprot_t prot = PAGE_KERNEL; + dma_resv_assert_held(shmem->base.resv); + if (shmem->vmap_use_count++ > 0) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; @@ -346,45 +360,30 @@ err_zero_use: return ret; } +EXPORT_SYMBOL(drm_gem_shmem_vmap); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object - * @map: Returns the kernel virtual address of the SHMEM GEM object's backing - * store. - * - * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. It hides the differences - * between dma-buf imported and natively allocated objects. + * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to + * zero. * - * Returns: - * 0 on success or a negative error code on failure. + * This function hides the differences between dma-buf imported and natively + * allocated objects. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - int ret; - - ret = mutex_lock_interruptible(&shmem->vmap_lock); - if (ret) - return ret; - ret = drm_gem_shmem_vmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_vmap); - -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { + dma_resv_assert_held(shmem->base.resv); + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) return; @@ -397,26 +396,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, shmem->vaddr = NULL; } - -/* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object - * @shmem: shmem GEM object - * @map: Kernel virtual address where the SHMEM GEM object was mapped - * - * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. - * - * This function hides the differences between dma-buf imported and natively - * allocated objects. - */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); -} EXPORT_SYMBOL(drm_gem_shmem_vunmap); static int @@ -447,24 +426,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, */ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - mutex_lock(&shmem->pages_lock); + dma_resv_assert_held(shmem->base.resv); if (shmem->madv >= 0) shmem->madv = madv; madv = shmem->madv; - mutex_unlock(&shmem->pages_lock); - return (madv >= 0); } EXPORT_SYMBOL(drm_gem_shmem_madvise); -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; + dma_resv_assert_held(shmem->base.resv); + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -472,7 +451,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages_locked(shmem); + drm_gem_shmem_put_pages(shmem); shmem->madv = -1; @@ -488,17 +467,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge_locked); - -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) -{ - if (!mutex_trylock(&shmem->pages_lock)) - return false; - drm_gem_shmem_purge_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return true; -} EXPORT_SYMBOL(drm_gem_shmem_purge); /** @@ -551,7 +519,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); if (page_offset >= num_pages || drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || @@ -563,7 +531,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); return ret; } @@ -575,7 +543,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) drm_WARN_ON(obj->dev, obj->import_attach); - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); /* * We should have already pinned the pages when the buffer was first @@ -585,7 +553,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); drm_gem_vm_open(vma); } @@ -595,7 +563,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_put_pages(shmem); + dma_resv_unlock(shmem->base.resv); + drm_gem_vm_close(vma); } @@ -633,7 +604,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return ret; } + dma_resv_lock(shmem->base.resv, NULL); ret = drm_gem_shmem_get_pages(shmem); + dma_resv_unlock(shmem->base.resv); + if (ret) return ret; @@ -699,7 +673,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ drm_WARN_ON(obj->dev, obj->import_attach); - ret = drm_gem_shmem_get_pages_locked(shmem); + ret = drm_gem_shmem_get_pages(shmem); if (ret) return ERR_PTR(ret); @@ -721,7 +695,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages_locked(shmem); + drm_gem_shmem_put_pages(shmem); return ERR_PTR(ret); } @@ -746,11 +720,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) int ret; struct sg_table *sgt; - ret = mutex_lock_interruptible(&shmem->pages_lock); + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) return ERR_PTR(ret); sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); return sgt; } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 10252dc11a22..4f9736e5f929 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) new_size = min(new_size, bo->base.base.size); - mutex_lock(&bo->base.pages_lock); + dma_resv_lock(bo->base.base.resv, NULL); if (bo->base.pages) { pages = bo->base.pages; @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return -ENOMEM; } @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) struct page *page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return PTR_ERR(page); } pages[i] = page; } - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index d2916bf43547..598ad1dbe6e1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, bo = to_panfrost_bo(gem_obj); + ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); + if (ret) + goto out_put_object; + mutex_lock(&pfdev->shrinker_lock); mutex_lock(&bo->mappings.lock); if (args->madv == PANFROST_MADV_DONTNEED) { @@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, out_unlock_mappings: mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); - + dma_resv_unlock(bo->base.base.resv); +out_put_object: drm_gem_object_put(gem_obj); return ret; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index bf0170782f25..6a71a2555f85 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) if (!mutex_trylock(&bo->mappings.lock)) return false; - if (!mutex_trylock(&shmem->pages_lock)) + if (!dma_resv_trylock(shmem->base.resv)) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(&bo->base); + drm_gem_shmem_purge(&bo->base); ret = true; - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); unlock_mappings: mutex_unlock(&bo->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e961fa27702c..c0123d09f699 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; + struct drm_gem_object *obj; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; @@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, page_offset = addr >> PAGE_SHIFT; page_offset -= bomapping->mmnode.start; - mutex_lock(&bo->base.pages_lock); + obj = &bo->base.base; + + dma_resv_lock(obj->resv, NULL); if (!bo->base.pages) { bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (!pages) { kvfree(bo->sgts); bo->sgts = NULL; - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } bo->base.pages = pages; bo->base.pages_use_count = 1; @@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = bo->base.pages; if (pages[page_offset]) { /* Pages are already mapped, bail out. */ - mutex_unlock(&bo->base.pages_lock); goto out; } } @@ -502,15 +502,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { pages[i] = shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) { - mutex_unlock(&bo->base.pages_lock); ret = PTR_ERR(pages[i]); pages[i] = NULL; goto err_pages; } } - mutex_unlock(&bo->base.pages_lock); - sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); @@ -529,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); out: + dma_resv_unlock(obj->resv); + panfrost_gem_mapping_put(bomapping); return 0; @@ -537,6 +536,8 @@ err_map: sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); +err_unlock: + dma_resv_unlock(obj->resv); err_bo: panfrost_gem_mapping_put(bomapping); return ret; diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 46eb46e69063..2867d2aba88b 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -26,11 +26,6 @@ struct drm_gem_shmem_object { */ struct drm_gem_object base; - /** - * @pages_lock: Protects the page table and use count - */ - struct mutex pages_lock; - /** * @pages: Page table */ @@ -65,11 +60,6 @@ struct drm_gem_shmem_object { */ struct sg_table *sgt; - /** - * @vmap_lock: Protects the vmap address and use count - */ - struct mutex vmap_lock; - /** * @vaddr: Kernel virtual address of the backing memory */ @@ -109,7 +99,6 @@ struct drm_gem_shmem_object { struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); @@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem !shmem->base.dma_buf && !shmem->base.import_attach; } -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); -- cgit v1.2.3 From 0cf8d292ba5ed90c7873ea84270deaecc1988f05 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Tue, 20 Jun 2023 17:42:42 +0000 Subject: drm/sysfs: rename drm_sysfs_connector_status_event() Rename drm_sysfs_connector_status_event() to drm_sysfs_connector_property_event(). Indeed, "status" is a bit vague: it can easily be confused with the connected/disconnected status of the connector. This function has nothing to do with connected/disconnected: it merely sends a notification that a connector's property has changed (e.g. HDCP, privacy screen, etc). Signed-off-by: Simon Ser Cc: Manasi Navare Cc: Sam Ravnborg Cc: Harry Wentland Cc: Daniel Vetter Reviewed-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20230620174231.260335-1-contact@emersion.fr --- drivers/gpu/drm/display/drm_hdcp_helper.c | 4 ++-- drivers/gpu/drm/drm_connector.c | 8 ++++---- drivers/gpu/drm/drm_sysfs.c | 23 +++++++++++++---------- include/drm/drm_sysfs.h | 4 ++-- 4 files changed, 21 insertions(+), 18 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/display/drm_hdcp_helper.c b/drivers/gpu/drm/display/drm_hdcp_helper.c index e78999c72bd7..a3f0e6d96105 100644 --- a/drivers/gpu/drm/display/drm_hdcp_helper.c +++ b/drivers/gpu/drm/display/drm_hdcp_helper.c @@ -415,7 +415,7 @@ void drm_hdcp_update_content_protection(struct drm_connector *connector, return; state->content_protection = val; - drm_sysfs_connector_status_event(connector, - dev->mode_config.content_protection_property); + drm_sysfs_connector_property_event(connector, + dev->mode_config.content_protection_property); } EXPORT_SYMBOL(drm_hdcp_update_content_protection); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3ed4cfcb350c..bf8371dc2a61 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2730,10 +2730,10 @@ static int drm_connector_privacy_screen_notifier( drm_connector_update_privacy_screen_properties(connector, true); drm_modeset_unlock(&dev->mode_config.connection_mutex); - drm_sysfs_connector_status_event(connector, - connector->privacy_screen_sw_state_property); - drm_sysfs_connector_status_event(connector, - connector->privacy_screen_hw_state_property); + drm_sysfs_connector_property_event(connector, + connector->privacy_screen_sw_state_property); + drm_sysfs_connector_property_event(connector, + connector->privacy_screen_hw_state_property); return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index f62767ff34b2..b169b3e44a92 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -487,17 +487,17 @@ void drm_sysfs_connector_hotplug_event(struct drm_connector *connector) EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event); /** - * drm_sysfs_connector_status_event - generate a DRM uevent for connector - * property status change - * @connector: connector on which property status changed - * @property: connector property whose status changed. + * drm_sysfs_connector_property_event - generate a DRM uevent for connector + * property change + * @connector: connector on which property changed + * @property: connector property which has changed. * - * Send a uevent for the DRM device specified by @dev. Currently we + * Send a uevent for the specified DRM connector and property. Currently we * set HOTPLUG=1 and connector id along with the attached property id - * related to the status change. + * related to the change. */ -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property) +void drm_sysfs_connector_property_event(struct drm_connector *connector, + struct drm_property *property) { struct drm_device *dev = connector->dev; char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21]; @@ -511,11 +511,14 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector, snprintf(prop_id, ARRAY_SIZE(prop_id), "PROPERTY=%u", property->base.id); - DRM_DEBUG("generating connector status event\n"); + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] generating connector property event for [PROP:%d:%s]\n", + connector->base.id, connector->name, + property->base.id, property->name); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); } -EXPORT_SYMBOL(drm_sysfs_connector_status_event); +EXPORT_SYMBOL(drm_sysfs_connector_property_event); struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) { diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index 6273cac44e47..96a5d858404b 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); void drm_sysfs_connector_hotplug_event(struct drm_connector *connector); -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property); +void drm_sysfs_connector_property_event(struct drm_connector *connector, + struct drm_property *property); #endif -- cgit v1.2.3 From 6b85aa68d9d5a27556b8b1015e7e515a371e77de Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:57 +0200 Subject: drm: Enable PRIME import/export for all drivers Call drm_gem_prime_handle_to_fd() and drm_gem_prime_fd_to_handle() by default if no PRIME import/export helpers have been set. Both functions are the default for almost all drivers. DRM drivers implement struct drm_driver.gem_prime_import_sg_table to import dma-buf objects from other drivers. Having the function drm_gem_prime_fd_to_handle() functions set by default allows each driver to import dma-buf objects to itself, even without support for other drivers. For drm_gem_prime_handle_to_fd() it is similar: using it by default allows each driver to export to itself, even without support for other drivers. This functionality enables userspace to share per-driver buffers across process boundaries via PRIME (e.g., wlroots requires this functionality). The patch generalizes a pattern that has previously been implemented by GEM VRAM helpers [1] to work with any driver. For example, gma500 can now run the wlroots-based sway compositor. v2: * clean up docs and TODO comments (Simon, Zack) * clean up style in drm_getcap() Signed-off-by: Thomas Zimmermann Link: https://lore.kernel.org/dri-devel/20230302143502.500661-1-contact@emersion.fr/ # 1 Reviewed-by: Simon Ser Acked-by: Alex Deucher Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_ioctl.c | 3 +-- drivers/gpu/drm/drm_prime.c | 21 ++++++++++++--------- include/drm/drm_drv.h | 12 ++---------- 3 files changed, 15 insertions(+), 21 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 7c9d66ee917d..8e9afe7af19c 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -245,8 +245,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value = 1; return 0; case DRM_CAP_PRIME: - req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; - req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; + req->value = DRM_PRIME_CAP_IMPORT | DRM_PRIME_CAP_EXPORT; return 0; case DRM_CAP_SYNCOBJ: req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index f9d4e228b09e..7b48e0b0284a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -372,11 +372,12 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, { struct drm_prime_handle *args = data; - if (!dev->driver->prime_fd_to_handle) - return -ENOSYS; + if (dev->driver->prime_fd_to_handle) { + return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, + &args->handle); + } - return dev->driver->prime_fd_to_handle(dev, file_priv, - args->fd, &args->handle); + return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } static struct dma_buf *export_and_register_object(struct drm_device *dev, @@ -518,15 +519,17 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, { struct drm_prime_handle *args = data; - if (!dev->driver->prime_handle_to_fd) - return -ENOSYS; - /* check flags are valid */ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, args->flags, &args->fd); + if (dev->driver->prime_handle_to_fd) { + return dev->driver->prime_handle_to_fd(dev, file_priv, + args->handle, args->flags, + &args->fd); + } + return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, + args->flags, &args->fd); } /** diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 870278ecd8ba..b77f2c7275b7 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -304,22 +304,14 @@ struct drm_driver { /** * @prime_handle_to_fd: * - * Main PRIME export function. Should be implemented with - * drm_gem_prime_handle_to_fd() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * - * Main PRIME import function. Should be implemented with - * drm_gem_prime_fd_to_handle() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); -- cgit v1.2.3 From 71e801b9b44f86ce8c816b06960c705f901c50e5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:58 +0200 Subject: drm: Clear fd/handle callbacks in struct drm_driver Clear all assignments of struct drm_driver's fd/handle callbacks to drm_gem_prime_fd_to_handle() and drm_gem_prime_handle_to_fd(). These functions are called by default. Add a TODO item to convert vmwgfx to the defaults as well. v2: * remove TODO item (Zack) * also update amdgpu's amdgpu_partition_driver Signed-off-by: Thomas Zimmermann Reviewed-by: Simon Ser Acked-by: Alex Deucher Acked-by: Jeffrey Hugo # qaic Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-3-tzimmermann@suse.de --- drivers/accel/ivpu/ivpu_drv.c | 2 -- drivers/accel/qaic/qaic_drv.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ---- drivers/gpu/drm/armada/armada_drv.c | 2 -- drivers/gpu/drm/drm_prime.c | 13 ++++--------- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 -- drivers/gpu/drm/exynos/exynos_drm_drv.c | 2 -- drivers/gpu/drm/i915/i915_driver.c | 2 -- drivers/gpu/drm/lima/lima_drv.c | 2 -- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 2 -- drivers/gpu/drm/msm/msm_drv.c | 2 -- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 -- drivers/gpu/drm/omapdrm/omap_drv.c | 2 -- drivers/gpu/drm/panfrost/panfrost_drv.c | 2 -- drivers/gpu/drm/pl111/pl111_drv.c | 2 -- drivers/gpu/drm/qxl/qxl_drv.c | 2 -- drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c | 2 -- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 2 -- drivers/gpu/drm/tegra/drm.c | 2 -- drivers/gpu/drm/v3d/v3d_drv.c | 2 -- drivers/gpu/drm/virtio/virtgpu_drv.c | 2 -- drivers/gpu/drm/xen/xen_drm_front.c | 2 -- include/drm/drm_gem_dma_helper.h | 8 ++------ include/drm/drm_gem_shmem_helper.h | 4 +--- include/drm/drm_gem_vram_helper.h | 8 +++----- 26 files changed, 10 insertions(+), 68 deletions(-) (limited to 'include/drm') diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 9f2b9fdcc549..5167a65cf7bb 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -373,8 +373,6 @@ static const struct drm_driver driver = { .open = ivpu_open, .postclose = ivpu_postclose, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = ivpu_gem_prime_import, .ioctls = ivpu_drm_ioctls, diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index b5ba550a0c04..b5de82e6eb4d 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -165,7 +165,6 @@ static const struct drm_driver qaic_accel_driver = { .ioctls = qaic_drm_ioctls, .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls), - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = qaic_gem_prime_import, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 07e16ad465d0..56dc69bc8b89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2850,8 +2850,6 @@ static const struct drm_driver amdgpu_kms_driver = { .show_fdinfo = amdgpu_show_fdinfo, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, .name = DRIVER_NAME, @@ -2876,8 +2874,6 @@ const struct drm_driver amdgpu_partition_driver = { .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index e120144d4b47..e8d2fe955909 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -37,8 +37,6 @@ static const struct drm_ioctl_desc armada_ioctls[] = { DEFINE_DRM_GEM_FOPS(armada_drm_fops); static const struct drm_driver armada_drm_driver = { - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, .major = 1, diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7b48e0b0284a..834a5e28abbe 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -51,15 +51,10 @@ MODULE_IMPORT_NS(DMA_BUF); * between applications, they can't be guessed like the globally unique GEM * names. * - * Drivers that support the PRIME API implement the - * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations. - * GEM based drivers must use drm_gem_prime_handle_to_fd() and - * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the - * actual driver interfaces is provided through the &drm_gem_object_funcs.export - * and &drm_driver.gem_prime_import hooks. - * - * &dma_buf_ops implementations for GEM drivers are all individually exported - * for drivers which need to overwrite or reimplement some of them. + * Drivers that support the PRIME API implement the drm_gem_object_funcs.export + * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for + * drivers are all individually exported for drivers which need to overwrite + * or reimplement some of them. * * Reference Counting for GEM Drivers * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 00223a874909..ea55f6b7b744 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -481,8 +481,6 @@ static const struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = etnaviv_open, .postclose = etnaviv_postclose, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = etnaviv_debugfs_init, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index c9e3c88fb329..8399256cb5c9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -109,8 +109,6 @@ static const struct drm_driver exynos_drm_driver = { .open = exynos_drm_open, .postclose = exynos_drm_postclose, .dumb_create = exynos_drm_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, .ioctls = exynos_ioctls, diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 75cbc43b326d..171f4db9e5e3 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1818,8 +1818,6 @@ static const struct drm_driver i915_drm_driver = { .postclose = i915_driver_postclose, .show_fdinfo = i915_drm_client_fdinfo, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = i915_gem_prime_import, .dumb_create = i915_gem_dumb_create, diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 65c31dc38049..3dd078f443bb 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -276,9 +276,7 @@ static const struct drm_driver lima_drm_driver = { .patchlevel = 0, .gem_create_object = lima_gem_create_object, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, }; struct lima_block_reader { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 5693bb8d29ce..7fb65eb95c55 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -556,8 +556,6 @@ static const struct drm_driver mtk_drm_driver = { .dumb_create = mtk_drm_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .fops = &mtk_drm_fops, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 47efa3c4492c..2a0e3529598b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1086,8 +1086,6 @@ static const struct drm_driver msm_driver = { .postclose = msm_postclose, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 51f1918b44d3..ca3bb8075357 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1240,8 +1240,6 @@ driver_stub = { .num_ioctls = ARRAY_SIZE(nouveau_ioctls), .fops = &nouveau_driver_fops, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, .dumb_create = nouveau_display_dumb_create, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 671d26b9d339..e2697fe80e62 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -655,8 +655,6 @@ static const struct drm_driver omap_drm_driver = { #ifdef CONFIG_DEBUG_FS .debugfs_init = omap_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = omap_gem_prime_import, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 598ad1dbe6e1..49b51f0db9b4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -544,8 +544,6 @@ static const struct drm_driver panfrost_drm_driver = { .minor = 2, .gem_create_object = panfrost_gem_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, }; diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index c4b8357ea999..ba3b5b5f0cdf 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -224,8 +224,6 @@ static const struct drm_driver pl111_drm_driver = { .minor = 0, .patchlevel = 0, .dumb_create = drm_gem_dma_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = pl111_gem_import_sg_table, #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index a3b83f89e061..b30ede1cf62d 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -290,8 +290,6 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .fops = &qxl_fops, .ioctls = qxl_ioctls, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cf1b960c4200..39cdede460b5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -604,8 +604,6 @@ static const struct drm_driver kms_driver = { .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index 4280ff5fa91f..a4f3615f3291 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -605,8 +605,6 @@ DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops); static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rcar_du_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table, .fops = &rcar_du_fops, .name = "rcar-du", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index b8cf89f0cc56..e35be6ea2849 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -224,8 +224,6 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops); static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = rockchip_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 35ff303c6674..ff36171c8fb7 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -887,8 +887,6 @@ static const struct drm_driver tegra_drm_driver = { .debugfs_init = tegra_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = tegra_gem_prime_import, .dumb_create = tegra_bo_dumb_create, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 845a36e36450..ffbbe9d527d3 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -171,8 +171,6 @@ static const struct drm_driver v3d_drm_driver = { #endif .gem_create_object = v3d_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = v3d_prime_import_sg_table, .ioctls = v3d_drm_ioctls, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 91ace7a44f2a..a7ec5a3770da 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -186,8 +186,6 @@ static const struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = virtgpu_gem_prime_import, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 62c3c13b3a17..7e9431c50c5a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -474,8 +474,6 @@ DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h index 61da596780b6..a827bde494f6 100644 --- a/include/drm/drm_gem_dma_helper.h +++ b/include/drm/drm_gem_dma_helper.h @@ -166,9 +166,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = (dumb_create_func), \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .dumb_create = (dumb_create_func), \ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table /** @@ -203,9 +201,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = dumb_create_func, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .dumb_create = (dumb_create_func), \ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap /** diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 2867d2aba88b..bf0c31aa8fbe 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -278,9 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, * the &drm_driver structure. */ #define DRM_GEM_SHMEM_DRIVER_OPS \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .dumb_create = drm_gem_shmem_dumb_create + .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 6b265cb9f45a..e18429f09e53 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -157,11 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( * &struct drm_driver with default functions. */ #define DRM_GEM_VRAM_DRIVER \ - .debugfs_init = drm_vram_mm_debugfs_init, \ - .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle + .debugfs_init = drm_vram_mm_debugfs_init, \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset /* * VRAM memory manager -- cgit v1.2.3 From 71a7974ac7019afeec105a54447ae1dc7216cbb3 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:59 +0200 Subject: drm/prime: Unexport helpers for fd/handle conversion Unexport drm_gem_prime_fd_to_handle() and drm_gem_prime_handle_to_fd(). Both are only used internally within the PRIME code. v2: * reword docs as functions are now unexported (Simon) Signed-off-by: Thomas Zimmermann Reviewed-by: Simon Ser Acked-by: Alex Deucher Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-4-tzimmermann@suse.de --- drivers/gpu/drm/drm_prime.c | 33 +++++++++++++++------------------ include/drm/drm_prime.h | 7 ------- 2 files changed, 15 insertions(+), 25 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 834a5e28abbe..63b709a67471 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) } EXPORT_SYMBOL(drm_gem_dmabuf_release); -/** +/* * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure @@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release); * * Returns 0 on success or a negative error code on failure. */ -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle) +static int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; @@ -360,7 +360,6 @@ out_put: dma_buf_put(dma_buf); return ret; } -EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -409,7 +408,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, return dmabuf; } -/** +/* * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure @@ -422,10 +421,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ -int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, - uint32_t flags, - int *prime_fd) +static int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, + int *prime_fd) { struct drm_gem_object *obj; int ret = 0; @@ -507,7 +506,6 @@ out_unlock: return ret; } -EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -866,9 +864,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size); * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * - * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers - * using the PRIME helpers. It is used as the default in - * drm_gem_prime_handle_to_fd(). + * This is the implementation of the &drm_gem_object_funcs.export functions + * for GEM drivers using the PRIME helpers. It is used as the default for + * drivers that do not set their own. */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) @@ -964,10 +962,9 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); * @dev: drm_device to import into * @dma_buf: dma-buf object to import * - * This is the implementation of the gem_prime_import functions for GEM drivers - * using the PRIME helpers. Drivers can use this as their - * &drm_driver.gem_prime_import implementation. It is used as the default - * implementation in drm_gem_prime_fd_to_handle(). + * This is the implementation of the gem_prime_import functions for GEM + * drivers using the PRIME helpers. It is the default for drivers that do + * not set their own &drm_driver.gem_prime_import. * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 2a1d01e5b56b..a7abf9f3e697 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -60,19 +60,12 @@ enum dma_data_direction; struct drm_device; struct drm_gem_object; -struct drm_file; /* core prime functions */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info); void drm_gem_dmabuf_release(struct dma_buf *dma_buf); -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, uint32_t *handle); -int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, uint32_t flags, - int *prime_fd); - /* helper functions for exporting */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); -- cgit v1.2.3 From 7ed40ff1d134bf3a4aef706eed478b926f35b404 Mon Sep 17 00:00:00 2001 From: Adrián Larumbe Date: Sun, 25 Jun 2023 15:17:15 +0100 Subject: drm/bridge: dw-hdmi: change YUV420 selection logic at clock setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now clocking value selection code is prioritising RGB, YUV444 modes over YUV420 for HDMI2 sinks. However, because of the bus format selection procedure in dw-hdmi, for HDMI2 sinks YUV420 is the format that will always be picked during the drm bridge chain check stage. Later on dw_hdmi_setup will configure a colour space based on the bus format that doesn't match the pixel value we had calculated as described above. Fix it by bringing back dw-hdmi bus format check when picking the right pixel clock. Signed-off-by: Adrián Larumbe Acked-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/6230bfae2cd97cf6527fc62ba5c850464919ccf8.1687702042.git.adrian.larumbe@collabora.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 6 ++++++ drivers/gpu/drm/meson/meson_dw_hdmi.c | 4 ++-- include/drm/bridge/dw_hdmi.h | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 9d6dcaf317a1..8e1a9854ebc0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3346,6 +3346,12 @@ static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi) return 0; } +bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi) +{ + return hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format); +} +EXPORT_SYMBOL_GPL(dw_hdmi_bus_fmt_is_420); + struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) { diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 411219d53b14..18a0f4f4638c 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -379,8 +379,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, mode->clock > 340000 ? 40 : 10); if (drm_mode_is_420_only(display, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display, mode))) + (!is_hdmi2_sink && drm_mode_is_420_also(display, mode)) || + dw_hdmi_bus_fmt_is_420(hdmi)) mode_is_420 = true; /* Enable clocks */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index f668e75fbabe..6a46baa0737c 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, bool force, bool disabled, bool rxsense); void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); +bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi); + #endif /* __IMX_HDMI_H__ */ -- cgit v1.2.3 From 09593216bff15866f95c8ad406cb7fdcec1ee40a Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Apr 2022 08:17:51 +0200 Subject: drm: execution context for GEM buffers v7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds the infrastructure for an execution context for GEM buffers which is similar to the existing TTMs execbuf util and intended to replace it in the long term. The basic functionality is that we abstracts the necessary loop to lock many different GEM buffers with automated deadlock and duplicate handling. v2: drop xarray and use dynamic resized array instead, the locking overhead is unnecessary and measurable. v3: drop duplicate tracking, radeon is really the only one needing that. v4: fixes issues pointed out by Danilo, some typos in comments and a helper for lock arrays of GEM objects. v5: some suggestions by Boris Brezillon, especially just use one retry macro, drop loop in prepare_array, use flags instead of bool v6: minor changes suggested by Thomas, Boris and Danilo v7: minor typos pointed out by checkpatch.pl fixed Signed-off-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Danilo Krummrich Tested-by: Danilo Krummrich Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-2-christian.koenig@amd.com --- Documentation/gpu/drm-mm.rst | 12 ++ drivers/gpu/drm/Kconfig | 6 + drivers/gpu/drm/Makefile | 2 + drivers/gpu/drm/drm_exec.c | 333 +++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_exec.h | 123 ++++++++++++++++ 5 files changed, 476 insertions(+) create mode 100644 drivers/gpu/drm/drm_exec.c create mode 100644 include/drm/drm_exec.h (limited to 'include/drm') diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index a79fd3549ff8..a52e6f4117d6 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -493,6 +493,18 @@ DRM Sync Objects .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c :export: +DRM Execution context +===================== + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :doc: Overview + +.. kernel-doc:: include/drm/drm_exec.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :export: + GPU Scheduler ============= diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cc9c9947fdef..c0b4063a3ee6 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -194,6 +194,12 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_EXEC + tristate + depends on DRM + help + Execution context for command submissions + config DRM_BUDDY tristate depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1c0f5204e47b..021b3f0ac152 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -78,6 +78,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o # # Memory-management helpers # +# +obj-$(CONFIG_DRM_EXEC) += drm_exec.o obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c new file mode 100644 index 000000000000..ff69cf0fb42a --- /dev/null +++ b/drivers/gpu/drm/drm_exec.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +#include +#include +#include + +/** + * DOC: Overview + * + * This component mainly abstracts the retry loop necessary for locking + * multiple GEM objects while preparing hardware operations (e.g. command + * submissions, page table updates etc..). + * + * If a contention is detected while locking a GEM object the cleanup procedure + * unlocks all previously locked GEM objects and locks the contended one first + * before locking any further objects. + * + * After an object is locked fences slots can optionally be reserved on the + * dma_resv object inside the GEM object. + * + * A typical usage pattern should look like this:: + * + * struct drm_gem_object *obj; + * struct drm_exec exec; + * unsigned long index; + * int ret; + * + * drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + * drm_exec_until_all_locked(&exec) { + * ret = drm_exec_prepare_obj(&exec, boA, 1); + * drm_exec_retry_on_contention(&exec); + * if (ret) + * goto error; + * + * ret = drm_exec_prepare_obj(&exec, boB, 1); + * drm_exec_retry_on_contention(&exec); + * if (ret) + * goto error; + * } + * + * drm_exec_for_each_locked_object(&exec, index, obj) { + * dma_resv_add_fence(obj->resv, fence, DMA_RESV_USAGE_READ); + * ... + * } + * drm_exec_fini(&exec); + * + * See struct dma_exec for more details. + */ + +/* Dummy value used to initially enter the retry loop */ +#define DRM_EXEC_DUMMY ((void *)~0) + +/* Unlock all objects and drop references */ +static void drm_exec_unlock_all(struct drm_exec *exec) +{ + struct drm_gem_object *obj; + unsigned long index; + + drm_exec_for_each_locked_object(exec, index, obj) { + dma_resv_unlock(obj->resv); + drm_gem_object_put(obj); + } + + drm_gem_object_put(exec->prelocked); + exec->prelocked = NULL; +} + +/** + * drm_exec_init - initialize a drm_exec object + * @exec: the drm_exec object to initialize + * @flags: controls locking behavior, see DRM_EXEC_* defines + * + * Initialize the object and make sure that we can track locked objects. + */ +void drm_exec_init(struct drm_exec *exec, uint32_t flags) +{ + exec->flags = flags; + exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); + + /* If allocation here fails, just delay that till the first use */ + exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; + exec->num_objects = 0; + exec->contended = DRM_EXEC_DUMMY; + exec->prelocked = NULL; +} +EXPORT_SYMBOL(drm_exec_init); + +/** + * drm_exec_fini - finalize a drm_exec object + * @exec: the drm_exec object to finalize + * + * Unlock all locked objects, drop the references to objects and free all memory + * used for tracking the state. + */ +void drm_exec_fini(struct drm_exec *exec) +{ + drm_exec_unlock_all(exec); + kvfree(exec->objects); + if (exec->contended != DRM_EXEC_DUMMY) { + drm_gem_object_put(exec->contended); + ww_acquire_fini(&exec->ticket); + } +} +EXPORT_SYMBOL(drm_exec_fini); + +/** + * drm_exec_cleanup - cleanup when contention is detected + * @exec: the drm_exec object to cleanup + * + * Cleanup the current state and return true if we should stay inside the retry + * loop, false if there wasn't any contention detected and we can keep the + * objects locked. + */ +bool drm_exec_cleanup(struct drm_exec *exec) +{ + if (likely(!exec->contended)) { + ww_acquire_done(&exec->ticket); + return false; + } + + if (likely(exec->contended == DRM_EXEC_DUMMY)) { + exec->contended = NULL; + ww_acquire_init(&exec->ticket, &reservation_ww_class); + return true; + } + + drm_exec_unlock_all(exec); + exec->num_objects = 0; + return true; +} +EXPORT_SYMBOL(drm_exec_cleanup); + +/* Track the locked object in the array */ +static int drm_exec_obj_locked(struct drm_exec *exec, + struct drm_gem_object *obj) +{ + if (unlikely(exec->num_objects == exec->max_objects)) { + size_t size = exec->max_objects * sizeof(void *); + void *tmp; + + tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE, + GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + exec->objects = tmp; + exec->max_objects += PAGE_SIZE / sizeof(void *); + } + drm_gem_object_get(obj); + exec->objects[exec->num_objects++] = obj; + + return 0; +} + +/* Make sure the contended object is locked first */ +static int drm_exec_lock_contended(struct drm_exec *exec) +{ + struct drm_gem_object *obj = exec->contended; + int ret; + + if (likely(!obj)) + return 0; + + /* Always cleanup the contention so that error handling can kick in */ + exec->contended = NULL; + if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) { + ret = dma_resv_lock_slow_interruptible(obj->resv, + &exec->ticket); + if (unlikely(ret)) + goto error_dropref; + } else { + dma_resv_lock_slow(obj->resv, &exec->ticket); + } + + ret = drm_exec_obj_locked(exec, obj); + if (unlikely(ret)) + goto error_unlock; + + exec->prelocked = obj; + return 0; + +error_unlock: + dma_resv_unlock(obj->resv); + +error_dropref: + drm_gem_object_put(obj); + return ret; +} + +/** + * drm_exec_lock_obj - lock a GEM object for use + * @exec: the drm_exec object with the state + * @obj: the GEM object to lock + * + * Lock a GEM object for use and grab a reference to it. + * + * Returns: -EDEADLK if a contention is detected, -EALREADY when object is + * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES + * flag), -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj) +{ + int ret; + + ret = drm_exec_lock_contended(exec); + if (unlikely(ret)) + return ret; + + if (exec->prelocked == obj) { + drm_gem_object_put(exec->prelocked); + exec->prelocked = NULL; + return 0; + } + + if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) + ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket); + else + ret = dma_resv_lock(obj->resv, &exec->ticket); + + if (unlikely(ret == -EDEADLK)) { + drm_gem_object_get(obj); + exec->contended = obj; + return -EDEADLK; + } + + if (unlikely(ret == -EALREADY) && + exec->flags & DRM_EXEC_IGNORE_DUPLICATES) + return 0; + + if (unlikely(ret)) + return ret; + + ret = drm_exec_obj_locked(exec, obj); + if (ret) + goto error_unlock; + + return 0; + +error_unlock: + dma_resv_unlock(obj->resv); + return ret; +} +EXPORT_SYMBOL(drm_exec_lock_obj); + +/** + * drm_exec_unlock_obj - unlock a GEM object in this exec context + * @exec: the drm_exec object with the state + * @obj: the GEM object to unlock + * + * Unlock the GEM object and remove it from the collection of locked objects. + * Should only be used to unlock the most recently locked objects. It's not time + * efficient to unlock objects locked long ago. + */ +void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj) +{ + unsigned int i; + + for (i = exec->num_objects; i--;) { + if (exec->objects[i] == obj) { + dma_resv_unlock(obj->resv); + for (++i; i < exec->num_objects; ++i) + exec->objects[i - 1] = exec->objects[i]; + --exec->num_objects; + drm_gem_object_put(obj); + return; + } + + } +} +EXPORT_SYMBOL(drm_exec_unlock_obj); + +/** + * drm_exec_prepare_obj - prepare a GEM object for use + * @exec: the drm_exec object with the state + * @obj: the GEM object to prepare + * @num_fences: how many fences to reserve + * + * Prepare a GEM object for use by locking it and reserving fence slots. + * + * Returns: -EDEADLK if a contention is detected, -EALREADY when object is + * already locked, -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences) +{ + int ret; + + ret = drm_exec_lock_obj(exec, obj); + if (ret) + return ret; + + ret = dma_resv_reserve_fences(obj->resv, num_fences); + if (ret) { + drm_exec_unlock_obj(exec, obj); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_exec_prepare_obj); + +/** + * drm_exec_prepare_array - helper to prepare an array of objects + * @exec: the drm_exec object with the state + * @objects: array of GEM object to prepare + * @num_objects: number of GEM objects in the array + * @num_fences: number of fences to reserve on each GEM object + * + * Prepares all GEM objects in an array, aborts on first error. + * Reserves @num_fences on each GEM object after locking it. + * + * Returns: -EDEADLOCK on contention, -EALREADY when object is already locked, + * -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_prepare_array(struct drm_exec *exec, + struct drm_gem_object **objects, + unsigned int num_objects, + unsigned int num_fences) +{ + int ret; + + for (unsigned int i = 0; i < num_objects; ++i) { + ret = drm_exec_prepare_obj(exec, objects[i], num_fences); + if (unlikely(ret)) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_exec_prepare_array); + +MODULE_DESCRIPTION("DRM execution context"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h new file mode 100644 index 000000000000..73205afec162 --- /dev/null +++ b/include/drm/drm_exec.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef __DRM_EXEC_H__ +#define __DRM_EXEC_H__ + +#include + +#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0) +#define DRM_EXEC_IGNORE_DUPLICATES BIT(1) + +struct drm_gem_object; + +/** + * struct drm_exec - Execution context + */ +struct drm_exec { + /** + * @flags: Flags to control locking behavior + */ + uint32_t flags; + + /** + * @ticket: WW ticket used for acquiring locks + */ + struct ww_acquire_ctx ticket; + + /** + * @num_objects: number of objects locked + */ + unsigned int num_objects; + + /** + * @max_objects: maximum objects in array + */ + unsigned int max_objects; + + /** + * @objects: array of the locked objects + */ + struct drm_gem_object **objects; + + /** + * @contended: contended GEM object we backed off for + */ + struct drm_gem_object *contended; + + /** + * @prelocked: already locked GEM object due to contention + */ + struct drm_gem_object *prelocked; +}; + +/** + * drm_exec_for_each_locked_object - iterate over all the locked objects + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object. + */ +#define drm_exec_for_each_locked_object(exec, index, obj) \ + for (index = 0, obj = (exec)->objects[0]; \ + index < (exec)->num_objects; \ + ++index, obj = (exec)->objects[index]) + +/** + * drm_exec_until_all_locked - loop until all GEM objects are locked + * @exec: drm_exec object + * + * Core functionality of the drm_exec object. Loops until all GEM objects are + * locked and no more contention exists. At the beginning of the loop it is + * guaranteed that no GEM object is locked. + * + * Since labels can't be defined local to the loops body we use a jump pointer + * to make sure that the retry is only used from within the loops body. + */ +#define drm_exec_until_all_locked(exec) \ + for (void *__drm_exec_retry_ptr; ({ \ + __label__ __drm_exec_retry; \ +__drm_exec_retry: \ + __drm_exec_retry_ptr = &&__drm_exec_retry; \ + (void)__drm_exec_retry_ptr; \ + drm_exec_cleanup(exec); \ + });) + +/** + * drm_exec_retry_on_contention - restart the loop to grap all locks + * @exec: drm_exec object + * + * Control flow helper to continue when a contention was detected and we need to + * clean up and re-start the loop to prepare all GEM objects. + */ +#define drm_exec_retry_on_contention(exec) \ + do { \ + if (unlikely(drm_exec_is_contended(exec))) \ + goto *__drm_exec_retry_ptr; \ + } while (0) + +/** + * drm_exec_is_contended - check for contention + * @exec: drm_exec object + * + * Returns true if the drm_exec object has run into some contention while + * locking a GEM object and needs to clean up. + */ +static inline bool drm_exec_is_contended(struct drm_exec *exec) +{ + return !!exec->contended; +} + +void drm_exec_init(struct drm_exec *exec, uint32_t flags); +void drm_exec_fini(struct drm_exec *exec); +bool drm_exec_cleanup(struct drm_exec *exec); +int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences); +int drm_exec_prepare_array(struct drm_exec *exec, + struct drm_gem_object **objects, + unsigned int num_objects, + unsigned int num_fences); + +#endif -- cgit v1.2.3 From 03fad56ab746c23c9bf93bbfe3b0941dd298180e Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 14 Jul 2023 10:46:04 +0000 Subject: drm/file: use explicit values for enum drm_minor_type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it clearer that the values cannot be changed because they are ABI. Signed-off-by: Simon Ser Reviewed-by:James Zhu Reviewed-by: Thomas Zimmermann Cc: Christian König Cc: Marek Olšák Cc: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20230714104557.518457-2-contact@emersion.fr --- include/drm/drm_file.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 966912053cb0..010239392adf 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -50,16 +50,16 @@ struct file; * header include loops we need it here for now. */ -/* Note that the order of this enum is ABI (it determines +/* Note that the values of this enum are ABI (it determines * /dev/dri/renderD* numbers). * * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to * be implemented before we hit any future */ enum drm_minor_type { - DRM_MINOR_PRIMARY, - DRM_MINOR_CONTROL, - DRM_MINOR_RENDER, + DRM_MINOR_PRIMARY = 0, + DRM_MINOR_CONTROL = 1, + DRM_MINOR_RENDER = 2, DRM_MINOR_ACCEL = 32, }; -- cgit v1.2.3 From e6303f323b1ad9c02ae813fc3dedeaa9dadfd3b0 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Thu, 20 Jul 2023 02:14:22 +0200 Subject: drm: manager to keep track of GPUs VA mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add infrastructure to keep track of GPU virtual address (VA) mappings with a decicated VA space manager implementation. New UAPIs, motivated by Vulkan sparse memory bindings graphics drivers start implementing, allow userspace applications to request multiple and arbitrary GPU VA mappings of buffer objects. The DRM GPU VA manager is intended to serve the following purposes in this context. 1) Provide infrastructure to track GPU VA allocations and mappings, using an interval tree (RB-tree). 2) Generically connect GPU VA mappings to their backing buffers, in particular DRM GEM objects. 3) Provide a common implementation to perform more complex mapping operations on the GPU VA space. In particular splitting and merging of GPU VA mappings, e.g. for intersecting mapping requests or partial unmap requests. Acked-by: Thomas Hellström Acked-by: Matthew Brost Reviewed-by: Boris Brezillon Tested-by: Matthew Brost Tested-by: Donald Robson Suggested-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20230720001443.2380-2-dakr@redhat.com --- Documentation/gpu/drm-mm.rst | 36 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/drm_gem.c | 3 + drivers/gpu/drm/drm_gpuva_mgr.c | 1725 +++++++++++++++++++++++++++++++++++++++ include/drm/drm_drv.h | 6 + include/drm/drm_gem.h | 79 ++ include/drm/drm_gpuva_mgr.h | 706 ++++++++++++++++ 7 files changed, 2556 insertions(+) create mode 100644 drivers/gpu/drm/drm_gpuva_mgr.c create mode 100644 include/drm/drm_gpuva_mgr.h (limited to 'include/drm') diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index a52e6f4117d6..3d5dc9dc1bfe 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -466,6 +466,42 @@ DRM MM Range Allocator Function References .. kernel-doc:: drivers/gpu/drm/drm_mm.c :export: +DRM GPU VA Manager +================== + +Overview +-------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Overview + +Split and Merge +--------------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Split and Merge + +Locking +------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Locking + +Examples +-------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Examples + +DRM GPU VA Manager Function References +-------------------------------------- + +.. kernel-doc:: include/drm/drm_gpuva_mgr.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :export: + DRM Buddy Allocator =================== diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 021b3f0ac152..215e78e79125 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -45,6 +45,7 @@ drm-y := \ drm_vblank.o \ drm_vblank_work.o \ drm_vma_manager.o \ + drm_gpuva_mgr.o \ drm_writeback.o drm-$(CONFIG_DRM_LEGACY) += \ drm_agpsupport.o \ diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c18686f434d4..dfe76c70bcb3 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -164,6 +164,9 @@ void drm_gem_private_object_init(struct drm_device *dev, if (!obj->resv) obj->resv = &obj->_resv; + if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) + drm_gem_gpuva_init(obj); + drm_vma_node_reset(&obj->vma_node); INIT_LIST_HEAD(&obj->lru_node); } diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c new file mode 100644 index 000000000000..0b80177592a6 --- /dev/null +++ b/drivers/gpu/drm/drm_gpuva_mgr.c @@ -0,0 +1,1725 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Danilo Krummrich + * + */ + +#include + +#include +#include + +/** + * DOC: Overview + * + * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track + * of a GPU's virtual address (VA) space and manages the corresponding virtual + * mappings represented by &drm_gpuva objects. It also keeps track of the + * mapping's backing &drm_gem_object buffers. + * + * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing + * all existent GPU VA mappings using this &drm_gem_object as backing buffer. + * + * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also + * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. + * + * The GPU VA manager internally uses a rb-tree to manage the + * &drm_gpuva mappings within a GPU's virtual address space. + * + * The &drm_gpuva_manager contains a special &drm_gpuva representing the + * portion of VA space reserved by the kernel. This node is initialized together + * with the GPU VA manager instance and removed when the GPU VA manager is + * destroyed. + * + * In a typical application drivers would embed struct drm_gpuva_manager and + * struct drm_gpuva within their own driver specific structures, there won't be + * any memory allocations of its own nor memory allocations of &drm_gpuva + * entries. + * + * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager + * are contained within struct drm_gpuva already. Hence, for inserting + * &drm_gpuva entries from within dma-fence signalling critical sections it is + * enough to pre-allocate the &drm_gpuva structures. + */ + +/** + * DOC: Split and Merge + * + * Besides its capability to manage and represent a GPU VA space, the + * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager + * calculate a sequence of operations to satisfy a given map or unmap request. + * + * Therefore the DRM GPU VA manager provides an algorithm implementing splitting + * and merging of existent GPU VA mappings with the ones that are requested to + * be mapped or unmapped. This feature is required by the Vulkan API to + * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this + * as VM BIND. + * + * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks + * containing map, unmap and remap operations for a given newly requested + * mapping. The sequence of callbacks represents the set of operations to + * execute in order to integrate the new mapping cleanly into the current state + * of the GPU VA space. + * + * Depending on how the new GPU VA mapping intersects with the existent mappings + * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary + * amount of unmap operations, a maximum of two remap operations and a single + * map operation. The caller might receive no callback at all if no operation is + * required, e.g. if the requested mapping already exists in the exact same way. + * + * The single map operation represents the original map operation requested by + * the caller. + * + * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the + * &drm_gpuva to unmap is physically contiguous with the original mapping + * request. Optionally, if 'keep' is set, drivers may keep the actual page table + * entries for this &drm_gpuva, adding the missing page table entries only and + * update the &drm_gpuva_manager's view of things accordingly. + * + * Drivers may do the same optimization, namely delta page table updates, also + * for remap operations. This is possible since &drm_gpuva_op_remap consists of + * one unmap operation and one or two map operations, such that drivers can + * derive the page table update delta accordingly. + * + * Note that there can't be more than two existent mappings to split up, one at + * the beginning and one at the end of the new mapping, hence there is a + * maximum of two remap operations. + * + * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops + * to call back into the driver in order to unmap a range of GPU VA space. The + * logic behind this function is way simpler though: For all existent mappings + * enclosed by the given range unmap operations are created. For mappings which + * are only partically located within the given range, remap operations are + * created such that those mappings are split up and re-mapped partically. + * + * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(), + * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used + * to directly obtain an instance of struct drm_gpuva_ops containing a list of + * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list + * contains the &drm_gpuva_ops analogous to the callbacks one would receive when + * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires + * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to + * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory + * allocations are possible (e.g. to allocate GPU page tables) and once in the + * dma-fence signalling critical path. + * + * To update the &drm_gpuva_manager's view of the GPU VA space + * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can + * safely be used from &drm_gpuva_fn_ops callbacks originating from + * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more + * convenient to use the provided helper functions drm_gpuva_map(), + * drm_gpuva_remap() and drm_gpuva_unmap() instead. + * + * The following diagram depicts the basic relationships of existent GPU VA + * mappings, a newly requested mapping and the resulting mappings as implemented + * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these. + * + * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs + * could be kept. + * + * :: + * + * 0 a 1 + * old: |-----------| (bo_offset=n) + * + * 0 a 1 + * req: |-----------| (bo_offset=n) + * + * 0 a 1 + * new: |-----------| (bo_offset=n) + * + * + * 2) Requested mapping is identical, except for the BO offset, hence replace + * the mapping. + * + * :: + * + * 0 a 1 + * old: |-----------| (bo_offset=n) + * + * 0 a 1 + * req: |-----------| (bo_offset=m) + * + * 0 a 1 + * new: |-----------| (bo_offset=m) + * + * + * 3) Requested mapping is identical, except for the backing BO, hence replace + * the mapping. + * + * :: + * + * 0 a 1 + * old: |-----------| (bo_offset=n) + * + * 0 b 1 + * req: |-----------| (bo_offset=n) + * + * 0 b 1 + * new: |-----------| (bo_offset=n) + * + * + * 4) Existent mapping is a left aligned subset of the requested one, hence + * replace the existent one. + * + * :: + * + * 0 a 1 + * old: |-----| (bo_offset=n) + * + * 0 a 2 + * req: |-----------| (bo_offset=n) + * + * 0 a 2 + * new: |-----------| (bo_offset=n) + * + * .. note:: + * We expect to see the same result for a request with a different BO + * and/or non-contiguous BO offset. + * + * + * 5) Requested mapping's range is a left aligned subset of the existent one, + * but backed by a different BO. Hence, map the requested mapping and split + * the existent one adjusting its BO offset. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 0 b 1 + * req: |-----| (bo_offset=n) + * + * 0 b 1 a' 2 + * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1) + * + * .. note:: + * We expect to see the same result for a request with a different BO + * and/or non-contiguous BO offset. + * + * + * 6) Existent mapping is a superset of the requested mapping. Split it up, but + * indicate that the backing PTEs could be kept. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 0 a 1 + * req: |-----| (bo_offset=n) + * + * 0 a 1 a' 2 + * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) + * + * + * 7) Requested mapping's range is a right aligned subset of the existent one, + * but backed by a different BO. Hence, map the requested mapping and split + * the existent one, without adjusting the BO offset. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 1 b 2 + * req: |-----| (bo_offset=m) + * + * 0 a 1 b 2 + * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m) + * + * + * 8) Existent mapping is a superset of the requested mapping. Split it up, but + * indicate that the backing PTEs could be kept. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 1 a 2 + * req: |-----| (bo_offset=n+1) + * + * 0 a' 1 a 2 + * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1) + * + * + * 9) Existent mapping is overlapped at the end by the requested mapping backed + * by a different BO. Hence, map the requested mapping and split up the + * existent one, without adjusting the BO offset. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 1 b 3 + * req: |-----------| (bo_offset=m) + * + * 0 a 1 b 3 + * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m) + * + * + * 10) Existent mapping is overlapped by the requested mapping, both having the + * same backing BO with a contiguous offset. Indicate the backing PTEs of + * the old mapping could be kept. + * + * :: + * + * 0 a 2 + * old: |-----------| (bo_offset=n) + * + * 1 a 3 + * req: |-----------| (bo_offset=n+1) + * + * 0 a' 1 a 3 + * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) + * + * + * 11) Requested mapping's range is a centered subset of the existent one + * having a different backing BO. Hence, map the requested mapping and split + * up the existent one in two mappings, adjusting the BO offset of the right + * one accordingly. + * + * :: + * + * 0 a 3 + * old: |-----------------| (bo_offset=n) + * + * 1 b 2 + * req: |-----| (bo_offset=m) + * + * 0 a 1 b 2 a' 3 + * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) + * + * + * 12) Requested mapping is a contiguous subset of the existent one. Split it + * up, but indicate that the backing PTEs could be kept. + * + * :: + * + * 0 a 3 + * old: |-----------------| (bo_offset=n) + * + * 1 a 2 + * req: |-----| (bo_offset=n+1) + * + * 0 a' 1 a 2 a'' 3 + * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2) + * + * + * 13) Existent mapping is a right aligned subset of the requested one, hence + * replace the existent one. + * + * :: + * + * 1 a 2 + * old: |-----| (bo_offset=n+1) + * + * 0 a 2 + * req: |-----------| (bo_offset=n) + * + * 0 a 2 + * new: |-----------| (bo_offset=n) + * + * .. note:: + * We expect to see the same result for a request with a different bo + * and/or non-contiguous bo_offset. + * + * + * 14) Existent mapping is a centered subset of the requested one, hence + * replace the existent one. + * + * :: + * + * 1 a 2 + * old: |-----| (bo_offset=n+1) + * + * 0 a 3 + * req: |----------------| (bo_offset=n) + * + * 0 a 3 + * new: |----------------| (bo_offset=n) + * + * .. note:: + * We expect to see the same result for a request with a different bo + * and/or non-contiguous bo_offset. + * + * + * 15) Existent mappings is overlapped at the beginning by the requested mapping + * backed by a different BO. Hence, map the requested mapping and split up + * the existent one, adjusting its BO offset accordingly. + * + * :: + * + * 1 a 3 + * old: |-----------| (bo_offset=n) + * + * 0 b 2 + * req: |-----------| (bo_offset=m) + * + * 0 b 2 a' 3 + * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) + */ + +/** + * DOC: Locking + * + * Generally, the GPU VA manager does not take care of locking itself, it is + * the drivers responsibility to take care about locking. Drivers might want to + * protect the following operations: inserting, removing and iterating + * &drm_gpuva objects as well as generating all kinds of operations, such as + * split / merge or prefetch. + * + * The GPU VA manager also does not take care of the locking of the backing + * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to + * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively + * a driver specific external lock. For the latter see also + * drm_gem_gpuva_set_lock(). + * + * However, the GPU VA manager contains lockdep checks to ensure callers of its + * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is + * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). + */ + +/** + * DOC: Examples + * + * This section gives two examples on how to let the DRM GPUVA Manager generate + * &drm_gpuva_op in order to satisfy a given map or unmap request and how to + * make use of them. + * + * The below code is strictly limited to illustrate the generic usage pattern. + * To maintain simplicitly, it doesn't make use of any abstractions for common + * code, different (asyncronous) stages with fence signalling critical paths, + * any other helpers or error handling in terms of freeing memory and dropping + * previously taken locks. + * + * 1) Obtain a list of &drm_gpuva_op to create a new mapping:: + * + * // Allocates a new &drm_gpuva. + * struct drm_gpuva * driver_gpuva_alloc(void); + * + * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva + * // structure in individual driver structures and lock the dma-resv with + * // drm_exec or similar helpers. + * int driver_mapping_create(struct drm_gpuva_manager *mgr, + * u64 addr, u64 range, + * struct drm_gem_object *obj, u64 offset) + * { + * struct drm_gpuva_ops *ops; + * struct drm_gpuva_op *op + * + * driver_lock_va_space(); + * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range, + * obj, offset); + * if (IS_ERR(ops)) + * return PTR_ERR(ops); + * + * drm_gpuva_for_each_op(op, ops) { + * struct drm_gpuva *va; + * + * switch (op->op) { + * case DRM_GPUVA_OP_MAP: + * va = driver_gpuva_alloc(); + * if (!va) + * ; // unwind previous VA space updates, + * // free memory and unlock + * + * driver_vm_map(); + * drm_gpuva_map(mgr, va, &op->map); + * drm_gpuva_link(va); + * + * break; + * case DRM_GPUVA_OP_REMAP: { + * struct drm_gpuva *prev = NULL, *next = NULL; + * + * va = op->remap.unmap->va; + * + * if (op->remap.prev) { + * prev = driver_gpuva_alloc(); + * if (!prev) + * ; // unwind previous VA space + * // updates, free memory and + * // unlock + * } + * + * if (op->remap.next) { + * next = driver_gpuva_alloc(); + * if (!next) + * ; // unwind previous VA space + * // updates, free memory and + * // unlock + * } + * + * driver_vm_remap(); + * drm_gpuva_remap(prev, next, &op->remap); + * + * drm_gpuva_unlink(va); + * if (prev) + * drm_gpuva_link(prev); + * if (next) + * drm_gpuva_link(next); + * + * break; + * } + * case DRM_GPUVA_OP_UNMAP: + * va = op->unmap->va; + * + * driver_vm_unmap(); + * drm_gpuva_unlink(va); + * drm_gpuva_unmap(&op->unmap); + * + * break; + * default: + * break; + * } + * } + * driver_unlock_va_space(); + * + * return 0; + * } + * + * 2) Receive a callback for each &drm_gpuva_op to create a new mapping:: + * + * struct driver_context { + * struct drm_gpuva_manager *mgr; + * struct drm_gpuva *new_va; + * struct drm_gpuva *prev_va; + * struct drm_gpuva *next_va; + * }; + * + * // ops to pass to drm_gpuva_manager_init() + * static const struct drm_gpuva_fn_ops driver_gpuva_ops = { + * .sm_step_map = driver_gpuva_map, + * .sm_step_remap = driver_gpuva_remap, + * .sm_step_unmap = driver_gpuva_unmap, + * }; + * + * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva + * // structure in individual driver structures and lock the dma-resv with + * // drm_exec or similar helpers. + * int driver_mapping_create(struct drm_gpuva_manager *mgr, + * u64 addr, u64 range, + * struct drm_gem_object *obj, u64 offset) + * { + * struct driver_context ctx; + * struct drm_gpuva_ops *ops; + * struct drm_gpuva_op *op; + * int ret = 0; + * + * ctx.mgr = mgr; + * + * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); + * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); + * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); + * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { + * ret = -ENOMEM; + * goto out; + * } + * + * driver_lock_va_space(); + * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset); + * driver_unlock_va_space(); + * + * out: + * kfree(ctx.new_va); + * kfree(ctx.prev_va); + * kfree(ctx.next_va); + * return ret; + * } + * + * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx) + * { + * struct driver_context *ctx = __ctx; + * + * drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map); + * + * drm_gpuva_link(ctx->new_va); + * + * // prevent the new GPUVA from being freed in + * // driver_mapping_create() + * ctx->new_va = NULL; + * + * return 0; + * } + * + * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) + * { + * struct driver_context *ctx = __ctx; + * + * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); + * + * drm_gpuva_unlink(op->remap.unmap->va); + * kfree(op->remap.unmap->va); + * + * if (op->remap.prev) { + * drm_gpuva_link(ctx->prev_va); + * ctx->prev_va = NULL; + * } + * + * if (op->remap.next) { + * drm_gpuva_link(ctx->next_va); + * ctx->next_va = NULL; + * } + * + * return 0; + * } + * + * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx) + * { + * drm_gpuva_unlink(op->unmap.va); + * drm_gpuva_unmap(&op->unmap); + * kfree(op->unmap.va); + * + * return 0; + * } + */ + +#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) + +#define GPUVA_START(node) ((node)->va.addr) +#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1) + +/* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain + * about this. + */ +INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last, + GPUVA_START, GPUVA_LAST, static __maybe_unused, + drm_gpuva_it) + +static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr, + struct drm_gpuva *va); +static void __drm_gpuva_remove(struct drm_gpuva *va); + +static bool +drm_gpuva_check_overflow(u64 addr, u64 range) +{ + u64 end; + + return WARN(check_add_overflow(addr, range, &end), + "GPUVA address limited to %lu bytes.\n", sizeof(end)); +} + +static bool +drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range) +{ + u64 end = addr + range; + u64 mm_start = mgr->mm_start; + u64 mm_end = mm_start + mgr->mm_range; + + return addr >= mm_start && end <= mm_end; +} + +static bool +drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range) +{ + u64 end = addr + range; + u64 kstart = mgr->kernel_alloc_node.va.addr; + u64 krange = mgr->kernel_alloc_node.va.range; + u64 kend = kstart + krange; + + return krange && addr < kend && kstart < end; +} + +static bool +drm_gpuva_range_valid(struct drm_gpuva_manager *mgr, + u64 addr, u64 range) +{ + return !drm_gpuva_check_overflow(addr, range) && + drm_gpuva_in_mm_range(mgr, addr, range) && + !drm_gpuva_in_kernel_node(mgr, addr, range); +} + +/** + * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager + * @mgr: pointer to the &drm_gpuva_manager to initialize + * @name: the name of the GPU VA space + * @start_offset: the start offset of the GPU VA space + * @range: the size of the GPU VA space + * @reserve_offset: the start of the kernel reserved GPU VA area + * @reserve_range: the size of the kernel reserved GPU VA area + * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap + * + * The &drm_gpuva_manager must be initialized with this function before use. + * + * Note that @mgr must be cleared to 0 before calling this function. The given + * &name is expected to be managed by the surrounding driver structures. + */ +void +drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, + const char *name, + u64 start_offset, u64 range, + u64 reserve_offset, u64 reserve_range, + const struct drm_gpuva_fn_ops *ops) +{ + mgr->rb.tree = RB_ROOT_CACHED; + INIT_LIST_HEAD(&mgr->rb.list); + + drm_gpuva_check_overflow(start_offset, range); + mgr->mm_start = start_offset; + mgr->mm_range = range; + + mgr->name = name ? name : "unknown"; + mgr->ops = ops; + + memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); + + if (reserve_range) { + mgr->kernel_alloc_node.va.addr = reserve_offset; + mgr->kernel_alloc_node.va.range = reserve_range; + + if (likely(!drm_gpuva_check_overflow(reserve_offset, + reserve_range))) + __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node); + } +} +EXPORT_SYMBOL_GPL(drm_gpuva_manager_init); + +/** + * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager + * @mgr: pointer to the &drm_gpuva_manager to clean up + * + * Note that it is a bug to call this function on a manager that still + * holds GPU VA mappings. + */ +void +drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr) +{ + mgr->name = NULL; + + if (mgr->kernel_alloc_node.va.range) + __drm_gpuva_remove(&mgr->kernel_alloc_node); + + WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root), + "GPUVA tree is not empty, potentially leaking memory."); +} +EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy); + +static int +__drm_gpuva_insert(struct drm_gpuva_manager *mgr, + struct drm_gpuva *va) +{ + struct rb_node *node; + struct list_head *head; + + if (drm_gpuva_it_iter_first(&mgr->rb.tree, + GPUVA_START(va), + GPUVA_LAST(va))) + return -EEXIST; + + va->mgr = mgr; + + drm_gpuva_it_insert(va, &mgr->rb.tree); + + node = rb_prev(&va->rb.node); + if (node) + head = &(to_drm_gpuva(node))->rb.entry; + else + head = &mgr->rb.list; + + list_add(&va->rb.entry, head); + + return 0; +} + +/** + * drm_gpuva_insert() - insert a &drm_gpuva + * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in + * @va: the &drm_gpuva to insert + * + * Insert a &drm_gpuva with a given address and range into a + * &drm_gpuva_manager. + * + * It is safe to use this function using the safe versions of iterating the GPU + * VA space, such as drm_gpuva_for_each_va_safe() and + * drm_gpuva_for_each_va_range_safe(). + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuva_insert(struct drm_gpuva_manager *mgr, + struct drm_gpuva *va) +{ + u64 addr = va->va.addr; + u64 range = va->va.range; + + if (unlikely(!drm_gpuva_range_valid(mgr, addr, range))) + return -EINVAL; + + return __drm_gpuva_insert(mgr, va); +} +EXPORT_SYMBOL_GPL(drm_gpuva_insert); + +static void +__drm_gpuva_remove(struct drm_gpuva *va) +{ + drm_gpuva_it_remove(va, &va->mgr->rb.tree); + list_del_init(&va->rb.entry); +} + +/** + * drm_gpuva_remove() - remove a &drm_gpuva + * @va: the &drm_gpuva to remove + * + * This removes the given &va from the underlaying tree. + * + * It is safe to use this function using the safe versions of iterating the GPU + * VA space, such as drm_gpuva_for_each_va_safe() and + * drm_gpuva_for_each_va_range_safe(). + */ +void +drm_gpuva_remove(struct drm_gpuva *va) +{ + struct drm_gpuva_manager *mgr = va->mgr; + + if (unlikely(va == &mgr->kernel_alloc_node)) { + WARN(1, "Can't destroy kernel reserved node.\n"); + return; + } + + __drm_gpuva_remove(va); +} +EXPORT_SYMBOL_GPL(drm_gpuva_remove); + +/** + * drm_gpuva_link() - link a &drm_gpuva + * @va: the &drm_gpuva to link + * + * This adds the given &va to the GPU VA list of the &drm_gem_object it is + * associated with. + * + * This function expects the caller to protect the GEM's GPUVA list against + * concurrent access using the GEMs dma_resv lock. + */ +void +drm_gpuva_link(struct drm_gpuva *va) +{ + struct drm_gem_object *obj = va->gem.obj; + + if (unlikely(!obj)) + return; + + drm_gem_gpuva_assert_lock_held(obj); + + list_add_tail(&va->gem.entry, &obj->gpuva.list); +} +EXPORT_SYMBOL_GPL(drm_gpuva_link); + +/** + * drm_gpuva_unlink() - unlink a &drm_gpuva + * @va: the &drm_gpuva to unlink + * + * This removes the given &va from the GPU VA list of the &drm_gem_object it is + * associated with. + * + * This function expects the caller to protect the GEM's GPUVA list against + * concurrent access using the GEMs dma_resv lock. + */ +void +drm_gpuva_unlink(struct drm_gpuva *va) +{ + struct drm_gem_object *obj = va->gem.obj; + + if (unlikely(!obj)) + return; + + drm_gem_gpuva_assert_lock_held(obj); + + list_del_init(&va->gem.entry); +} +EXPORT_SYMBOL_GPL(drm_gpuva_unlink); + +/** + * drm_gpuva_find_first() - find the first &drm_gpuva in the given range + * @mgr: the &drm_gpuva_manager to search in + * @addr: the &drm_gpuvas address + * @range: the &drm_gpuvas range + * + * Returns: the first &drm_gpuva within the given range + */ +struct drm_gpuva * +drm_gpuva_find_first(struct drm_gpuva_manager *mgr, + u64 addr, u64 range) +{ + u64 last = addr + range - 1; + + return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last); +} +EXPORT_SYMBOL_GPL(drm_gpuva_find_first); + +/** + * drm_gpuva_find() - find a &drm_gpuva + * @mgr: the &drm_gpuva_manager to search in + * @addr: the &drm_gpuvas address + * @range: the &drm_gpuvas range + * + * Returns: the &drm_gpuva at a given &addr and with a given &range + */ +struct drm_gpuva * +drm_gpuva_find(struct drm_gpuva_manager *mgr, + u64 addr, u64 range) +{ + struct drm_gpuva *va; + + va = drm_gpuva_find_first(mgr, addr, range); + if (!va) + goto out; + + if (va->va.addr != addr || + va->va.range != range) + goto out; + + return va; + +out: + return NULL; +} +EXPORT_SYMBOL_GPL(drm_gpuva_find); + +/** + * drm_gpuva_find_prev() - find the &drm_gpuva before the given address + * @mgr: the &drm_gpuva_manager to search in + * @start: the given GPU VA's start address + * + * Find the adjacent &drm_gpuva before the GPU VA with given &start address. + * + * Note that if there is any free space between the GPU VA mappings no mapping + * is returned. + * + * Returns: a pointer to the found &drm_gpuva or NULL if none was found + */ +struct drm_gpuva * +drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start) +{ + if (!drm_gpuva_range_valid(mgr, start - 1, 1)) + return NULL; + + return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start); +} +EXPORT_SYMBOL_GPL(drm_gpuva_find_prev); + +/** + * drm_gpuva_find_next() - find the &drm_gpuva after the given address + * @mgr: the &drm_gpuva_manager to search in + * @end: the given GPU VA's end address + * + * Find the adjacent &drm_gpuva after the GPU VA with given &end address. + * + * Note that if there is any free space between the GPU VA mappings no mapping + * is returned. + * + * Returns: a pointer to the found &drm_gpuva or NULL if none was found + */ +struct drm_gpuva * +drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end) +{ + if (!drm_gpuva_range_valid(mgr, end, 1)) + return NULL; + + return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1); +} +EXPORT_SYMBOL_GPL(drm_gpuva_find_next); + +/** + * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space + * is empty + * @mgr: the &drm_gpuva_manager to check the range for + * @addr: the start address of the range + * @range: the range of the interval + * + * Returns: true if the interval is empty, false otherwise + */ +bool +drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range) +{ + return !drm_gpuva_find_first(mgr, addr, range); +} +EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty); + +/** + * drm_gpuva_map() - helper to insert a &drm_gpuva according to a + * &drm_gpuva_op_map + * @mgr: the &drm_gpuva_manager + * @va: the &drm_gpuva to insert + * @op: the &drm_gpuva_op_map to initialize @va with + * + * Initializes the @va from the @op and inserts it into the given @mgr. + */ +void +drm_gpuva_map(struct drm_gpuva_manager *mgr, + struct drm_gpuva *va, + struct drm_gpuva_op_map *op) +{ + drm_gpuva_init_from_op(va, op); + drm_gpuva_insert(mgr, va); +} +EXPORT_SYMBOL_GPL(drm_gpuva_map); + +/** + * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a + * &drm_gpuva_op_remap + * @prev: the &drm_gpuva to remap when keeping the start of a mapping + * @next: the &drm_gpuva to remap when keeping the end of a mapping + * @op: the &drm_gpuva_op_remap to initialize @prev and @next with + * + * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or + * @next. + */ +void +drm_gpuva_remap(struct drm_gpuva *prev, + struct drm_gpuva *next, + struct drm_gpuva_op_remap *op) +{ + struct drm_gpuva *curr = op->unmap->va; + struct drm_gpuva_manager *mgr = curr->mgr; + + drm_gpuva_remove(curr); + + if (op->prev) { + drm_gpuva_init_from_op(prev, op->prev); + drm_gpuva_insert(mgr, prev); + } + + if (op->next) { + drm_gpuva_init_from_op(next, op->next); + drm_gpuva_insert(mgr, next); + } +} +EXPORT_SYMBOL_GPL(drm_gpuva_remap); + +/** + * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a + * &drm_gpuva_op_unmap + * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove + * + * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap. + */ +void +drm_gpuva_unmap(struct drm_gpuva_op_unmap *op) +{ + drm_gpuva_remove(op->va); +} +EXPORT_SYMBOL_GPL(drm_gpuva_unmap); + +static int +op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv, + u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset) +{ + struct drm_gpuva_op op = {}; + + op.op = DRM_GPUVA_OP_MAP; + op.map.va.addr = addr; + op.map.va.range = range; + op.map.gem.obj = obj; + op.map.gem.offset = offset; + + return fn->sm_step_map(&op, priv); +} + +static int +op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, + struct drm_gpuva_op_map *prev, + struct drm_gpuva_op_map *next, + struct drm_gpuva_op_unmap *unmap) +{ + struct drm_gpuva_op op = {}; + struct drm_gpuva_op_remap *r; + + op.op = DRM_GPUVA_OP_REMAP; + r = &op.remap; + r->prev = prev; + r->next = next; + r->unmap = unmap; + + return fn->sm_step_remap(&op, priv); +} + +static int +op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, + struct drm_gpuva *va, bool merge) +{ + struct drm_gpuva_op op = {}; + + op.op = DRM_GPUVA_OP_UNMAP; + op.unmap.va = va; + op.unmap.keep = merge; + + return fn->sm_step_unmap(&op, priv); +} + +static int +__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, + const struct drm_gpuva_fn_ops *ops, void *priv, + u64 req_addr, u64 req_range, + struct drm_gem_object *req_obj, u64 req_offset) +{ + struct drm_gpuva *va, *next, *prev = NULL; + u64 req_end = req_addr + req_range; + int ret; + + if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) + return -EINVAL; + + drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { + struct drm_gem_object *obj = va->gem.obj; + u64 offset = va->gem.offset; + u64 addr = va->va.addr; + u64 range = va->va.range; + u64 end = addr + range; + bool merge = !!va->gem.obj; + + if (addr == req_addr) { + merge &= obj == req_obj && + offset == req_offset; + + if (end == req_end) { + ret = op_unmap_cb(ops, priv, va, merge); + if (ret) + return ret; + break; + } + + if (end < req_end) { + ret = op_unmap_cb(ops, priv, va, merge); + if (ret) + return ret; + goto next; + } + + if (end > req_end) { + struct drm_gpuva_op_map n = { + .va.addr = req_end, + .va.range = range - req_range, + .gem.obj = obj, + .gem.offset = offset + req_range, + }; + struct drm_gpuva_op_unmap u = { + .va = va, + .keep = merge, + }; + + ret = op_remap_cb(ops, priv, NULL, &n, &u); + if (ret) + return ret; + break; + } + } else if (addr < req_addr) { + u64 ls_range = req_addr - addr; + struct drm_gpuva_op_map p = { + .va.addr = addr, + .va.range = ls_range, + .gem.obj = obj, + .gem.offset = offset, + }; + struct drm_gpuva_op_unmap u = { .va = va }; + + merge &= obj == req_obj && + offset + ls_range == req_offset; + u.keep = merge; + + if (end == req_end) { + ret = op_remap_cb(ops, priv, &p, NULL, &u); + if (ret) + return ret; + break; + } + + if (end < req_end) { + ret = op_remap_cb(ops, priv, &p, NULL, &u); + if (ret) + return ret; + goto next; + } + + if (end > req_end) { + struct drm_gpuva_op_map n = { + .va.addr = req_end, + .va.range = end - req_end, + .gem.obj = obj, + .gem.offset = offset + ls_range + + req_range, + }; + + ret = op_remap_cb(ops, priv, &p, &n, &u); + if (ret) + return ret; + break; + } + } else if (addr > req_addr) { + merge &= obj == req_obj && + offset == req_offset + + (addr - req_addr); + + if (end == req_end) { + ret = op_unmap_cb(ops, priv, va, merge); + if (ret) + return ret; + break; + } + + if (end < req_end) { + ret = op_unmap_cb(ops, priv, va, merge); + if (ret) + return ret; + goto next; + } + + if (end > req_end) { + struct drm_gpuva_op_map n = { + .va.addr = req_end, + .va.range = end - req_end, + .gem.obj = obj, + .gem.offset = offset + req_end - addr, + }; + struct drm_gpuva_op_unmap u = { + .va = va, + .keep = merge, + }; + + ret = op_remap_cb(ops, priv, NULL, &n, &u); + if (ret) + return ret; + break; + } + } +next: + prev = va; + } + + return op_map_cb(ops, priv, + req_addr, req_range, + req_obj, req_offset); +} + +static int +__drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, + const struct drm_gpuva_fn_ops *ops, void *priv, + u64 req_addr, u64 req_range) +{ + struct drm_gpuva *va, *next; + u64 req_end = req_addr + req_range; + int ret; + + if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) + return -EINVAL; + + drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { + struct drm_gpuva_op_map prev = {}, next = {}; + bool prev_split = false, next_split = false; + struct drm_gem_object *obj = va->gem.obj; + u64 offset = va->gem.offset; + u64 addr = va->va.addr; + u64 range = va->va.range; + u64 end = addr + range; + + if (addr < req_addr) { + prev.va.addr = addr; + prev.va.range = req_addr - addr; + prev.gem.obj = obj; + prev.gem.offset = offset; + + prev_split = true; + } + + if (end > req_end) { + next.va.addr = req_end; + next.va.range = end - req_end; + next.gem.obj = obj; + next.gem.offset = offset + (req_end - addr); + + next_split = true; + } + + if (prev_split || next_split) { + struct drm_gpuva_op_unmap unmap = { .va = va }; + + ret = op_remap_cb(ops, priv, + prev_split ? &prev : NULL, + next_split ? &next : NULL, + &unmap); + if (ret) + return ret; + } else { + ret = op_unmap_cb(ops, priv, va, false); + if (ret) + return ret; + } + } + + return 0; +} + +/** + * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @req_addr: the start address of the new mapping + * @req_range: the range of the new mapping + * @req_obj: the &drm_gem_object to map + * @req_offset: the offset within the &drm_gem_object + * @priv: pointer to a driver private data structure + * + * This function iterates the given range of the GPU VA space. It utilizes the + * &drm_gpuva_fn_ops to call back into the driver providing the split and merge + * steps. + * + * Drivers may use these callbacks to update the GPU VA space right away within + * the callback. In case the driver decides to copy and store the operations for + * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to + * be called before the &drm_gpuva_manager's view of the GPU VA space was + * updated with the previous set of operations. To update the + * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * A sequence of callbacks can contain map, unmap and remap operations, but + * the sequence of callbacks might also be empty if no operation is required, + * e.g. if the requested mapping already exists in the exact same way. + * + * There can be an arbitrary amount of unmap operations, a maximum of two remap + * operations and a single map operation. The latter one represents the original + * map operation requested by the caller. + * + * Returns: 0 on success or a negative error code + */ +int +drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, + u64 req_addr, u64 req_range, + struct drm_gem_object *req_obj, u64 req_offset) +{ + const struct drm_gpuva_fn_ops *ops = mgr->ops; + + if (unlikely(!(ops && ops->sm_step_map && + ops->sm_step_remap && + ops->sm_step_unmap))) + return -EINVAL; + + return __drm_gpuva_sm_map(mgr, ops, priv, + req_addr, req_range, + req_obj, req_offset); +} +EXPORT_SYMBOL_GPL(drm_gpuva_sm_map); + +/** + * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @priv: pointer to a driver private data structure + * @req_addr: the start address of the range to unmap + * @req_range: the range of the mappings to unmap + * + * This function iterates the given range of the GPU VA space. It utilizes the + * &drm_gpuva_fn_ops to call back into the driver providing the operations to + * unmap and, if required, split existent mappings. + * + * Drivers may use these callbacks to update the GPU VA space right away within + * the callback. In case the driver decides to copy and store the operations for + * later processing neither this function nor &drm_gpuva_sm_map is allowed to be + * called before the &drm_gpuva_manager's view of the GPU VA space was updated + * with the previous set of operations. To update the &drm_gpuva_manager's view + * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or + * drm_gpuva_destroy_unlocked() should be used. + * + * A sequence of callbacks can contain unmap and remap operations, depending on + * whether there are actual overlapping mappings to split. + * + * There can be an arbitrary amount of unmap operations and a maximum of two + * remap operations. + * + * Returns: 0 on success or a negative error code + */ +int +drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, + u64 req_addr, u64 req_range) +{ + const struct drm_gpuva_fn_ops *ops = mgr->ops; + + if (unlikely(!(ops && ops->sm_step_remap && + ops->sm_step_unmap))) + return -EINVAL; + + return __drm_gpuva_sm_unmap(mgr, ops, priv, + req_addr, req_range); +} +EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap); + +static struct drm_gpuva_op * +gpuva_op_alloc(struct drm_gpuva_manager *mgr) +{ + const struct drm_gpuva_fn_ops *fn = mgr->ops; + struct drm_gpuva_op *op; + + if (fn && fn->op_alloc) + op = fn->op_alloc(); + else + op = kzalloc(sizeof(*op), GFP_KERNEL); + + if (unlikely(!op)) + return NULL; + + return op; +} + +static void +gpuva_op_free(struct drm_gpuva_manager *mgr, + struct drm_gpuva_op *op) +{ + const struct drm_gpuva_fn_ops *fn = mgr->ops; + + if (fn && fn->op_free) + fn->op_free(op); + else + kfree(op); +} + +static int +drm_gpuva_sm_step(struct drm_gpuva_op *__op, + void *priv) +{ + struct { + struct drm_gpuva_manager *mgr; + struct drm_gpuva_ops *ops; + } *args = priv; + struct drm_gpuva_manager *mgr = args->mgr; + struct drm_gpuva_ops *ops = args->ops; + struct drm_gpuva_op *op; + + op = gpuva_op_alloc(mgr); + if (unlikely(!op)) + goto err; + + memcpy(op, __op, sizeof(*op)); + + if (op->op == DRM_GPUVA_OP_REMAP) { + struct drm_gpuva_op_remap *__r = &__op->remap; + struct drm_gpuva_op_remap *r = &op->remap; + + r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), + GFP_KERNEL); + if (unlikely(!r->unmap)) + goto err_free_op; + + if (__r->prev) { + r->prev = kmemdup(__r->prev, sizeof(*r->prev), + GFP_KERNEL); + if (unlikely(!r->prev)) + goto err_free_unmap; + } + + if (__r->next) { + r->next = kmemdup(__r->next, sizeof(*r->next), + GFP_KERNEL); + if (unlikely(!r->next)) + goto err_free_prev; + } + } + + list_add_tail(&op->entry, &ops->list); + + return 0; + +err_free_unmap: + kfree(op->remap.unmap); +err_free_prev: + kfree(op->remap.prev); +err_free_op: + gpuva_op_free(mgr, op); +err: + return -ENOMEM; +} + +static const struct drm_gpuva_fn_ops gpuva_list_ops = { + .sm_step_map = drm_gpuva_sm_step, + .sm_step_remap = drm_gpuva_sm_step, + .sm_step_unmap = drm_gpuva_sm_step, +}; + +/** + * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @req_addr: the start address of the new mapping + * @req_range: the range of the new mapping + * @req_obj: the &drm_gem_object to map + * @req_offset: the offset within the &drm_gem_object + * + * This function creates a list of operations to perform splitting and merging + * of existent mapping(s) with the newly requested one. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain map, unmap and remap operations, but it + * also can be empty if no operation is required, e.g. if the requested mapping + * already exists is the exact same way. + * + * There can be an arbitrary amount of unmap operations, a maximum of two remap + * operations and a single map operation. The latter one represents the original + * map operation requested by the caller. + * + * Note that before calling this function again with another mapping request it + * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The + * previously obtained operations must be either processed or abandoned. To + * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, + u64 req_addr, u64 req_range, + struct drm_gem_object *req_obj, u64 req_offset) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuva_manager *mgr; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.mgr = mgr; + args.ops = ops; + + ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args, + req_addr, req_range, + req_obj, req_offset); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(mgr, ops); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create); + +/** + * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on + * unmap + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @req_addr: the start address of the range to unmap + * @req_range: the range of the mappings to unmap + * + * This function creates a list of operations to perform unmapping and, if + * required, splitting of the mappings overlapping the unmap range. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain unmap and remap operations, depending on + * whether there are actual overlapping mappings to split. + * + * There can be an arbitrary amount of unmap operations and a maximum of two + * remap operations. + * + * Note that before calling this function again with another range to unmap it + * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The + * previously obtained operations must be processed or abandoned. To update the + * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, + u64 req_addr, u64 req_range) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuva_manager *mgr; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.mgr = mgr; + args.ops = ops; + + ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args, + req_addr, req_range); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(mgr, ops); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create); + +/** + * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @addr: the start address of the range to prefetch + * @range: the range of the mappings to prefetch + * + * This function creates a list of operations to perform prefetching. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain prefetch operations. + * + * There can be an arbitrary amount of prefetch operations. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, + u64 addr, u64 range) +{ + struct drm_gpuva_ops *ops; + struct drm_gpuva_op *op; + struct drm_gpuva *va; + u64 end = addr + range; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (!ops) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + drm_gpuva_for_each_va_range(va, mgr, addr, end) { + op = gpuva_op_alloc(mgr); + if (!op) { + ret = -ENOMEM; + goto err_free_ops; + } + + op->op = DRM_GPUVA_OP_PREFETCH; + op->prefetch.va = va; + list_add_tail(&op->entry, &ops->list); + } + + return ops; + +err_free_ops: + drm_gpuva_ops_free(mgr, ops); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create); + +/** + * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * @obj: the &drm_gem_object to unmap + * + * This function creates a list of operations to perform unmapping for every + * GPUVA attached to a GEM. + * + * The list can be iterated with &drm_gpuva_for_each_op and consists out of an + * arbitrary amount of unmap operations. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * It is the callers responsibility to protect the GEMs GPUVA list against + * concurrent access using the GEMs dma_resv lock. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, + struct drm_gem_object *obj) +{ + struct drm_gpuva_ops *ops; + struct drm_gpuva_op *op; + struct drm_gpuva *va; + int ret; + + drm_gem_gpuva_assert_lock_held(obj); + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (!ops) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + drm_gem_for_each_gpuva(va, obj) { + op = gpuva_op_alloc(mgr); + if (!op) { + ret = -ENOMEM; + goto err_free_ops; + } + + op->op = DRM_GPUVA_OP_UNMAP; + op->unmap.va = va; + list_add_tail(&op->entry, &ops->list); + } + + return ops; + +err_free_ops: + drm_gpuva_ops_free(mgr, ops); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create); + +/** + * drm_gpuva_ops_free() - free the given &drm_gpuva_ops + * @mgr: the &drm_gpuva_manager the ops were created for + * @ops: the &drm_gpuva_ops to free + * + * Frees the given &drm_gpuva_ops structure including all the ops associated + * with it. + */ +void +drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, + struct drm_gpuva_ops *ops) +{ + struct drm_gpuva_op *op, *next; + + drm_gpuva_for_each_op_safe(op, next, ops) { + list_del(&op->entry); + + if (op->op == DRM_GPUVA_OP_REMAP) { + kfree(op->remap.prev); + kfree(op->remap.next); + kfree(op->remap.unmap); + } + + gpuva_op_free(mgr, op); + } + + kfree(ops); +} +EXPORT_SYMBOL_GPL(drm_gpuva_ops_free); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index b77f2c7275b7..9813fa759b75 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -104,6 +104,12 @@ enum drm_driver_feature { * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), + /** + * @DRIVER_GEM_GPUVA: + * + * Driver supports user defined GPU VA bindings for GEM objects. + */ + DRIVER_GEM_GPUVA = BIT(8), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index bbc721870c13..c0b13c43b459 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -36,6 +36,8 @@ #include #include +#include +#include #include @@ -379,6 +381,22 @@ struct drm_gem_object { */ struct dma_resv _resv; + /** + * @gpuva: + * + * Provides the list of GPU VAs attached to this GEM object. + * + * Drivers should lock list accesses with the GEMs &dma_resv lock + * (&drm_gem_object.resv) or a custom lock if one is provided. + */ + struct { + struct list_head list; + +#ifdef CONFIG_LOCKDEP + struct lockdep_map *lock_dep_map; +#endif + } gpuva; + /** * @funcs: * @@ -526,4 +544,65 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, int drm_gem_evict(struct drm_gem_object *obj); +#ifdef CONFIG_LOCKDEP +/** + * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. + * @obj: the &drm_gem_object + * @lock: the lock used to protect the gpuva list. The locking primitive + * must contain a dep_map field. + * + * Call this if you're not proctecting access to the gpuva list + * with the dma-resv lock, otherwise, drm_gem_gpuva_init() takes care + * of initializing lock_dep_map for you. + */ +#define drm_gem_gpuva_set_lock(obj, lock) \ + if (!(obj)->gpuva.lock_dep_map) \ + (obj)->gpuva.lock_dep_map = &(lock)->dep_map +#define drm_gem_gpuva_assert_lock_held(obj) \ + lockdep_assert(lock_is_held((obj)->gpuva.lock_dep_map)) +#else +#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) +#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) +#endif + +/** + * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object + * @obj: the &drm_gem_object + * + * This initializes the &drm_gem_object's &drm_gpuva list. + * + * Calling this function is only necessary for drivers intending to support the + * &drm_driver_feature DRIVER_GEM_GPUVA. + */ +static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) +{ + INIT_LIST_HEAD(&obj->gpuva.list); + drm_gem_gpuva_set_lock(obj, &obj->resv->lock.base); +} + +/** + * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas + * @entry__: &drm_gpuva structure to assign to in each iteration step + * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuva_manager. + */ +#define drm_gem_for_each_gpuva(entry__, obj__) \ + list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry) + +/** + * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of + * gpuvas + * @entry__: &drm_gpuva structure to assign to in each iteration step + * @next__: &next &drm_gpuva to store the next step + * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + */ +#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \ + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry) + #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h new file mode 100644 index 000000000000..ed8d50200cc3 --- /dev/null +++ b/include/drm/drm_gpuva_mgr.h @@ -0,0 +1,706 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __DRM_GPUVA_MGR_H__ +#define __DRM_GPUVA_MGR_H__ + +/* + * Copyright (c) 2022 Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include + +struct drm_gpuva_manager; +struct drm_gpuva_fn_ops; + +/** + * enum drm_gpuva_flags - flags for struct drm_gpuva + */ +enum drm_gpuva_flags { + /** + * @DRM_GPUVA_INVALIDATED: + * + * Flag indicating that the &drm_gpuva's backing GEM is invalidated. + */ + DRM_GPUVA_INVALIDATED = (1 << 0), + + /** + * @DRM_GPUVA_SPARSE: + * + * Flag indicating that the &drm_gpuva is a sparse mapping. + */ + DRM_GPUVA_SPARSE = (1 << 1), + + /** + * @DRM_GPUVA_USERBITS: user defined bits + */ + DRM_GPUVA_USERBITS = (1 << 2), +}; + +/** + * struct drm_gpuva - structure to track a GPU VA mapping + * + * This structure represents a GPU VA mapping and is associated with a + * &drm_gpuva_manager. + * + * Typically, this structure is embedded in bigger driver structures. + */ +struct drm_gpuva { + /** + * @mgr: the &drm_gpuva_manager this object is associated with + */ + struct drm_gpuva_manager *mgr; + + /** + * @flags: the &drm_gpuva_flags for this mapping + */ + enum drm_gpuva_flags flags; + + /** + * @va: structure containing the address and range of the &drm_gpuva + */ + struct { + /** + * @addr: the start address + */ + u64 addr; + + /* + * @range: the range + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and it's offset + */ + struct { + /** + * @offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @obj: the mapped &drm_gem_object + */ + struct drm_gem_object *obj; + + /** + * @entry: the &list_head to attach this object to a &drm_gem_object + */ + struct list_head entry; + } gem; + + /** + * @rb: structure containing data to store &drm_gpuvas in a rb-tree + */ + struct { + /** + * @rb: the rb-tree node + */ + struct rb_node node; + + /** + * @entry: The &list_head to additionally connect &drm_gpuvas + * in the same order they appear in the interval tree. This is + * useful to keep iterating &drm_gpuvas from a start node found + * through the rb-tree while doing modifications on the rb-tree + * itself. + */ + struct list_head entry; + + /** + * @__subtree_last: needed by the interval tree, holding last-in-subtree + */ + u64 __subtree_last; + } rb; +}; + +int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va); +void drm_gpuva_remove(struct drm_gpuva *va); + +void drm_gpuva_link(struct drm_gpuva *va); +void drm_gpuva_unlink(struct drm_gpuva *va); + +struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start); +struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end); + +bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range); + +static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset) +{ + va->va.addr = addr; + va->va.range = range; + va->gem.obj = obj; + va->gem.offset = offset; +} + +/** + * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is + * invalidated + * @va: the &drm_gpuva to set the invalidate flag for + * @invalidate: indicates whether the &drm_gpuva is invalidated + */ +static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) +{ + if (invalidate) + va->flags |= DRM_GPUVA_INVALIDATED; + else + va->flags &= ~DRM_GPUVA_INVALIDATED; +} + +/** + * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva + * is invalidated + * @va: the &drm_gpuva to check + */ +static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) +{ + return va->flags & DRM_GPUVA_INVALIDATED; +} + +/** + * struct drm_gpuva_manager - DRM GPU VA Manager + * + * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using + * &maple_tree structures. Typically, this structure is embedded in bigger + * driver structures. + * + * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or + * pages. + * + * There should be one manager instance per GPU virtual address space. + */ +struct drm_gpuva_manager { + /** + * @name: the name of the DRM GPU VA space + */ + const char *name; + + /** + * @mm_start: start of the VA space + */ + u64 mm_start; + + /** + * @mm_range: length of the VA space + */ + u64 mm_range; + + /** + * @rb: structures to track &drm_gpuva entries + */ + struct { + /** + * @tree: the rb-tree to track GPU VA mappings + */ + struct rb_root_cached tree; + + /** + * @list: the &list_head to track GPU VA mappings + */ + struct list_head list; + } rb; + + /** + * @kernel_alloc_node: + * + * &drm_gpuva representing the address space cutout reserved for + * the kernel + */ + struct drm_gpuva kernel_alloc_node; + + /** + * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers + */ + const struct drm_gpuva_fn_ops *ops; +}; + +void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, + const char *name, + u64 start_offset, u64 range, + u64 reserve_offset, u64 reserve_range, + const struct drm_gpuva_fn_ops *ops); +void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr); + +static inline struct drm_gpuva * +__drm_gpuva_next(struct drm_gpuva *va) +{ + if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list)) + return list_next_entry(va, rb.entry); + + return NULL; +} + +/** + * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas + * @va__: &drm_gpuva structure to assign to in each iteration step + * @mgr__: &drm_gpuva_manager to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie + * between @start__ and @end__. It is implemented similarly to list_for_each(), + * but is using the &drm_gpuva_manager's internal interval tree to accelerate + * the search for the starting &drm_gpuva, and hence isn't safe against removal + * of elements. It assumes that @end__ is within (or is the upper limit of) the + * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's + * @kernel_alloc_node. + */ +#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \ + for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \ + va__ && (va__->va.addr < (end__)); \ + va__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of + * &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @mgr__: &drm_gpuva_manager to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie + * between @start__ and @end__. It is implemented similarly to + * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval + * tree to accelerate the search for the starting &drm_gpuva, and hence is safe + * against removal of elements. It assumes that @end__ is within (or is the + * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the + * &drm_gpuva_manager's @kernel_alloc_node. + */ +#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \ + for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \ + next__ = __drm_gpuva_next(va__); \ + va__ && (va__->va.addr < (end__)); \ + va__ = next__, next__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @mgr__: &drm_gpuva_manager to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuva_manager. + */ +#define drm_gpuva_for_each_va(va__, mgr__) \ + list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry) + +/** + * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @mgr__: &drm_gpuva_manager to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and + * hence safe against the removal of elements. + */ +#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \ + list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry) + +/** + * enum drm_gpuva_op_type - GPU VA operation type + * + * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager. + */ +enum drm_gpuva_op_type { + /** + * @DRM_GPUVA_OP_MAP: the map op type + */ + DRM_GPUVA_OP_MAP, + + /** + * @DRM_GPUVA_OP_REMAP: the remap op type + */ + DRM_GPUVA_OP_REMAP, + + /** + * @DRM_GPUVA_OP_UNMAP: the unmap op type + */ + DRM_GPUVA_OP_UNMAP, + + /** + * @DRM_GPUVA_OP_PREFETCH: the prefetch op type + */ + DRM_GPUVA_OP_PREFETCH, +}; + +/** + * struct drm_gpuva_op_map - GPU VA map operation + * + * This structure represents a single map operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_map { + /** + * @va: structure containing address and range of a map + * operation + */ + struct { + /** + * @addr: the base address of the new mapping + */ + u64 addr; + + /** + * @range: the range of the new mapping + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and it's offset + */ + struct { + /** + * @offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @obj: the &drm_gem_object to map + */ + struct drm_gem_object *obj; + } gem; +}; + +/** + * struct drm_gpuva_op_unmap - GPU VA unmap operation + * + * This structure represents a single unmap operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_unmap { + /** + * @va: the &drm_gpuva to unmap + */ + struct drm_gpuva *va; + + /** + * @keep: + * + * Indicates whether this &drm_gpuva is physically contiguous with the + * original mapping request. + * + * Optionally, if &keep is set, drivers may keep the actual page table + * mappings for this &drm_gpuva, adding the missing page table entries + * only and update the &drm_gpuva_manager accordingly. + */ + bool keep; +}; + +/** + * struct drm_gpuva_op_remap - GPU VA remap operation + * + * This represents a single remap operation generated by the DRM GPU VA manager. + * + * A remap operation is generated when an existing GPU VA mmapping is split up + * by inserting a new GPU VA mapping or by partially unmapping existent + * mapping(s), hence it consists of a maximum of two map and one unmap + * operation. + * + * The @unmap operation takes care of removing the original existing mapping. + * @prev is used to remap the preceding part, @next the subsequent part. + * + * If either a new mapping's start address is aligned with the start address + * of the old mapping or the new mapping's end address is aligned with the + * end address of the old mapping, either @prev or @next is NULL. + * + * Note, the reason for a dedicated remap operation, rather than arbitrary + * unmap and map operations, is to give drivers the chance of extracting driver + * specific data for creating the new mappings from the unmap operations's + * &drm_gpuva structure which typically is embedded in larger driver specific + * structures. + */ +struct drm_gpuva_op_remap { + /** + * @prev: the preceding part of a split mapping + */ + struct drm_gpuva_op_map *prev; + + /** + * @next: the subsequent part of a split mapping + */ + struct drm_gpuva_op_map *next; + + /** + * @unmap: the unmap operation for the original existing mapping + */ + struct drm_gpuva_op_unmap *unmap; +}; + +/** + * struct drm_gpuva_op_prefetch - GPU VA prefetch operation + * + * This structure represents a single prefetch operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_prefetch { + /** + * @va: the &drm_gpuva to prefetch + */ + struct drm_gpuva *va; +}; + +/** + * struct drm_gpuva_op - GPU VA operation + * + * This structure represents a single generic operation. + * + * The particular type of the operation is defined by @op. + */ +struct drm_gpuva_op { + /** + * @entry: + * + * The &list_head used to distribute instances of this struct within + * &drm_gpuva_ops. + */ + struct list_head entry; + + /** + * @op: the type of the operation + */ + enum drm_gpuva_op_type op; + + union { + /** + * @map: the map operation + */ + struct drm_gpuva_op_map map; + + /** + * @remap: the remap operation + */ + struct drm_gpuva_op_remap remap; + + /** + * @unmap: the unmap operation + */ + struct drm_gpuva_op_unmap unmap; + + /** + * @prefetch: the prefetch operation + */ + struct drm_gpuva_op_prefetch prefetch; + }; +}; + +/** + * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op + */ +struct drm_gpuva_ops { + /** + * @list: the &list_head + */ + struct list_head list; +}; + +/** + * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. + */ +#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @next: &next &drm_gpuva_op to store the next step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. It is + * implemented with list_for_each_safe(), so save against removal of elements. + */ +#define drm_gpuva_for_each_op_safe(op, next, ops) \ + list_for_each_entry_safe(op, next, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations beginning + * from the given operation in reverse order. + */ +#define drm_gpuva_for_each_op_from_reverse(op, ops) \ + list_for_each_entry_from_reverse(op, &(ops)->list, entry) + +/** + * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from + */ +#define drm_gpuva_first_op(ops) \ + list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from + */ +#define drm_gpuva_last_op(ops) \ + list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_prev_op(op) list_prev_entry(op, entry) + +/** + * drm_gpuva_next_op() - next &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_next_op(op) list_next_entry(op, entry) + +struct drm_gpuva_ops * +drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, + u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset); +struct drm_gpuva_ops * +drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, + struct drm_gem_object *obj); + +void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, + struct drm_gpuva_ops *ops); + +static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, + struct drm_gpuva_op_map *op) +{ + drm_gpuva_init(va, op->va.addr, op->va.range, + op->gem.obj, op->gem.offset); +} + +/** + * struct drm_gpuva_fn_ops - callbacks for split/merge steps + * + * This structure defines the callbacks used by &drm_gpuva_sm_map and + * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap + * operations to drivers. + */ +struct drm_gpuva_fn_ops { + /** + * @op_alloc: called when the &drm_gpuva_manager allocates + * a struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuva_op *(*op_alloc)(void); + + /** + * @op_free: called when the &drm_gpuva_manager frees a + * struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*op_free)(struct drm_gpuva_op *op); + + /** + * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the + * mapping once all previous steps were completed + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * + * Can be NULL if &drm_gpuva_sm_map is used. + */ + int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_remap: called from &drm_gpuva_sm_map and + * &drm_gpuva_sm_unmap to split up an existent mapping + * + * This callback is called when existent mapping needs to be split up. + * This is the case when either a newly requested mapping overlaps or + * is enclosed by an existent mapping or a partial unmap of an existent + * mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is + * used. + */ + int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_unmap: called from &drm_gpuva_sm_map and + * &drm_gpuva_sm_unmap to unmap an existent mapping + * + * This callback is called when existent mapping needs to be unmapped. + * This is the case when either a newly requested mapping encloses an + * existent mapping or an unmap of an existent mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is + * used. + */ + int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); +}; + +int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, + u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset); + +int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, + u64 addr, u64 range); + +void drm_gpuva_map(struct drm_gpuva_manager *mgr, + struct drm_gpuva *va, + struct drm_gpuva_op_map *op); + +void drm_gpuva_remap(struct drm_gpuva *prev, + struct drm_gpuva *next, + struct drm_gpuva_op_remap *op); + +void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); + +#endif /* __DRM_GPUVA_MGR_H__ */ -- cgit v1.2.3 From 4f66feeab173bd73e71028b8c2e1dcea07e32dd5 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Thu, 20 Jul 2023 02:14:23 +0200 Subject: drm: debugfs: provide infrastructure to dump a DRM GPU VA space This commit adds a function to dump a DRM GPU VA space and a macro for drivers to register the struct drm_info_list 'gpuvas' entry. Most likely, most drivers might maintain one DRM GPU VA space per struct drm_file, but there might also be drivers not having a fixed relation between DRM GPU VA spaces and a DRM core infrastructure, hence we need the indirection via the driver iterating it's maintained DRM GPU VA spaces. Reviewed-by: Boris Brezillon Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20230720001443.2380-3-dakr@redhat.com --- drivers/gpu/drm/drm_debugfs.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/drm/drm_debugfs.h | 25 +++++++++++++++++++++++++ 2 files changed, 65 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 4855230ba2c6..c90dbcffa0dc 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -175,6 +176,45 @@ static const struct file_operations drm_debugfs_fops = { .release = single_release, }; +/** + * drm_debugfs_gpuva_info - dump the given DRM GPU VA space + * @m: pointer to the &seq_file to write + * @mgr: the &drm_gpuva_manager representing the GPU VA space + * + * Dumps the GPU VA mappings of a given DRM GPU VA manager. + * + * For each DRM GPU VA space drivers should call this function from their + * &drm_info_list's show callback. + * + * Returns: 0 on success, -ENODEV if the &mgr is not initialized + */ +int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuva_manager *mgr) +{ + struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node; + + if (!mgr->name) + return -ENODEV; + + seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n", + mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range); + seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n", + kva->va.addr, kva->va.addr + kva->va.range); + seq_puts(m, "\n"); + seq_puts(m, " VAs | start | range | end | object | object offset\n"); + seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n"); + drm_gpuva_for_each_va(va, mgr) { + if (unlikely(va == kva)) + continue; + + seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n", + va->va.addr, va->va.range, va->va.addr + va->va.range, + (u64)va->gem.obj, va->gem.offset); + } + + return 0; +} +EXPORT_SYMBOL(drm_debugfs_gpuva_info); /** * drm_debugfs_create_files - Initialize a given set of debugfs files for DRM diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h index 7616f457ce70..cb2c1956a214 100644 --- a/include/drm/drm_debugfs.h +++ b/include/drm/drm_debugfs.h @@ -34,6 +34,22 @@ #include #include + +#include + +/** + * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space + * @show: the &drm_info_list's show callback + * @data: driver private data + * + * Drivers should use this macro to define a &drm_info_list entry to provide a + * debugfs file for dumping the GPU VA space regions and mappings. + * + * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from + * their @show callback. + */ +#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data} + /** * struct drm_info_list - debugfs info list entry * @@ -134,6 +150,9 @@ void drm_debugfs_add_file(struct drm_device *dev, const char *name, void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count); + +int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuva_manager *mgr); #else static inline void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, @@ -155,6 +174,12 @@ static inline void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count) {} + +static inline int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuva_manager *mgr) +{ + return 0; +} #endif #endif /* _DRM_DEBUGFS_H_ */ -- cgit v1.2.3 From c7a472297169156252a50d76965eb36b081186e2 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 14 Jul 2023 11:13:03 +0000 Subject: drm/syncobj: add IOCTL to register an eventfd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a new DRM_IOCTL_SYNCOBJ_EVENTFD IOCTL which signals an eventfd from a syncobj. This is useful for Wayland compositors to handle wait-before-submit. Wayland clients can send a timeline point to the compositor before the point has materialized yet, then compositors can wait for the point to materialize via this new IOCTL. The existing DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT IOCTL is not suitable because it blocks. Compositors want to integrate the wait with their poll(2)-based event loop. Requirements for new uAPI: - User-space patch: https://gitlab.freedesktop.org/wlroots/wlroots/-/merge_requests/4262 - IGT: https://lists.freedesktop.org/archives/igt-dev/2023-July/057893.html v2: - Wait for fence when flags is zero - Improve documentation (Pekka) - Rename IOCTL (Christian) - Fix typo in drm_syncobj_add_eventfd() (Christian) v3: - Link user-space + IGT patches - Add reference from overview docs v4: fix IOCTL number conflict with GETFB2 (Nicholas Choi, Vitaly Prosyak) Signed-off-by: Simon Ser Reviewed-by: Christian König Acked-by: Pekka Paalanen Cc: Jason Ekstrand Cc: Daniel Vetter Cc: Bas Nieuwenhuizen Cc: Daniel Stone Cc: James Jones Cc: Austin Shafer Cc: Vitaly Prosyak Link: https://patchwork.freedesktop.org/patch/msgid/20230714111257.11940-1-contact@emersion.fr --- drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 148 +++++++++++++++++++++++++++++++++++++++-- include/drm/drm_syncobj.h | 6 +- include/uapi/drm/drm.h | 23 +++++++ 5 files changed, 174 insertions(+), 7 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index d7e023bbb0d5..ba12acd55139 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -245,6 +245,8 @@ int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 8e9afe7af19c..f03ffbacfe9b 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -701,6 +701,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl, + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 0c2be8360525..be3e8787d207 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -136,6 +136,10 @@ * requirement is inherited from the wait-before-signal behavior required by * the Vulkan timeline semaphore API. * + * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without + * blocking: an eventfd will be signaled when the syncobj is. This is useful to + * integrate the wait in an event loop. + * * * Import/export of syncobjs * ------------------------- @@ -185,6 +189,7 @@ #include #include +#include #include #include #include @@ -212,6 +217,20 @@ struct syncobj_wait_entry { static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, struct syncobj_wait_entry *wait); +struct syncobj_eventfd_entry { + struct list_head node; + struct dma_fence *fence; + struct dma_fence_cb fence_cb; + struct drm_syncobj *syncobj; + struct eventfd_ctx *ev_fd_ctx; + u64 point; + u32 flags; +}; + +static void +syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, + struct syncobj_eventfd_entry *entry); + /** * drm_syncobj_find - lookup and reference a sync object. * @file_private: drm file private pointer @@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, spin_unlock(&syncobj->lock); } +static void +syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry) +{ + eventfd_ctx_put(entry->ev_fd_ctx); + dma_fence_put(entry->fence); + /* This happens either inside the syncobj lock, or after the node has + * already been removed from the list. + */ + list_del(&entry->node); + kfree(entry); +} + +static void +drm_syncobj_add_eventfd(struct drm_syncobj *syncobj, + struct syncobj_eventfd_entry *entry) +{ + spin_lock(&syncobj->lock); + list_add_tail(&entry->node, &syncobj->ev_fd_list); + syncobj_eventfd_entry_func(syncobj, entry); + spin_unlock(&syncobj->lock); +} + /** * drm_syncobj_add_point - add new timeline point to the syncobj * @syncobj: sync object to add timeline point do @@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj, struct dma_fence *fence, uint64_t point) { - struct syncobj_wait_entry *cur, *tmp; + struct syncobj_wait_entry *wait_cur, *wait_tmp; + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; struct dma_fence *prev; dma_fence_get(fence); @@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj, dma_fence_chain_init(chain, prev, fence, point); rcu_assign_pointer(syncobj->fence, &chain->base); - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) - syncobj_wait_syncobj_func(syncobj, cur); + list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) + syncobj_wait_syncobj_func(syncobj, wait_cur); + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) + syncobj_eventfd_entry_func(syncobj, ev_fd_cur); spin_unlock(&syncobj->lock); /* Walk the chain once to trigger garbage collection */ @@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence) { struct dma_fence *old_fence; - struct syncobj_wait_entry *cur, *tmp; + struct syncobj_wait_entry *wait_cur, *wait_tmp; + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; if (fence) dma_fence_get(fence); @@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, rcu_assign_pointer(syncobj->fence, fence); if (fence != old_fence) { - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) - syncobj_wait_syncobj_func(syncobj, cur); + list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) + syncobj_wait_syncobj_func(syncobj, wait_cur); + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) + syncobj_eventfd_entry_func(syncobj, ev_fd_cur); } spin_unlock(&syncobj->lock); @@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref) struct drm_syncobj *syncobj = container_of(kref, struct drm_syncobj, refcount); + struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; + drm_syncobj_replace_fence(syncobj, NULL); + + list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) + syncobj_eventfd_entry_free(ev_fd_cur); + kfree(syncobj); } EXPORT_SYMBOL(drm_syncobj_free); @@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, kref_init(&syncobj->refcount); INIT_LIST_HEAD(&syncobj->cb_list); + INIT_LIST_HEAD(&syncobj->ev_fd_list); spin_lock_init(&syncobj->lock); if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { @@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, return ret; } +static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct syncobj_eventfd_entry *entry = + container_of(cb, struct syncobj_eventfd_entry, fence_cb); + + eventfd_signal(entry->ev_fd_ctx, 1); + syncobj_eventfd_entry_free(entry); +} + +static void +syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, + struct syncobj_eventfd_entry *entry) +{ + int ret; + struct dma_fence *fence; + + /* This happens inside the syncobj lock */ + fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); + ret = dma_fence_chain_find_seqno(&fence, entry->point); + if (ret != 0 || !fence) { + dma_fence_put(fence); + return; + } + + list_del_init(&entry->node); + entry->fence = fence; + + if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) { + eventfd_signal(entry->ev_fd_ctx, 1); + syncobj_eventfd_entry_free(entry); + } else { + ret = dma_fence_add_callback(fence, &entry->fence_cb, + syncobj_eventfd_entry_fence_func); + if (ret == -ENOENT) { + eventfd_signal(entry->ev_fd_ctx, 1); + syncobj_eventfd_entry_free(entry); + } + } +} + +int +drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_eventfd *args = data; + struct drm_syncobj *syncobj; + struct eventfd_ctx *ev_fd_ctx; + struct syncobj_eventfd_entry *entry; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) + return -EOPNOTSUPP; + + if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) + return -EINVAL; + + if (args->pad) + return -EINVAL; + + syncobj = drm_syncobj_find(file_private, args->handle); + if (!syncobj) + return -ENOENT; + + ev_fd_ctx = eventfd_ctx_fdget(args->fd); + if (IS_ERR(ev_fd_ctx)) + return PTR_ERR(ev_fd_ctx); + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + eventfd_ctx_put(ev_fd_ctx); + return -ENOMEM; + } + entry->syncobj = syncobj; + entry->ev_fd_ctx = ev_fd_ctx; + entry->point = args->point; + entry->flags = args->flags; + + drm_syncobj_add_eventfd(syncobj, entry); + drm_syncobj_put(syncobj); + + return 0; +} int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 6cf7243a1dc5..b40052132e52 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -54,7 +54,11 @@ struct drm_syncobj { */ struct list_head cb_list; /** - * @lock: Protects &cb_list and write-locks &fence. + * @ev_fd_list: List of registered eventfd. + */ + struct list_head ev_fd_list; + /** + * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence. */ spinlock_t lock; /** diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index a87bbbbca2d4..863e47200911 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -909,6 +909,27 @@ struct drm_syncobj_timeline_wait { __u32 pad; }; +/** + * struct drm_syncobj_eventfd + * @handle: syncobj handle. + * @flags: Zero to wait for the point to be signalled, or + * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be + * available for the point. + * @point: syncobj timeline point (set to zero for binary syncobjs). + * @fd: Existing eventfd to sent events to. + * @pad: Must be zero. + * + * Register an eventfd to be signalled by a syncobj. The eventfd counter will + * be incremented by one. + */ +struct drm_syncobj_eventfd { + __u32 handle; + __u32 flags; + __u64 point; + __s32 fd; + __u32 pad; +}; + struct drm_syncobj_array { __u64 handles; @@ -1169,6 +1190,8 @@ extern "C" { */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) +#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. -- cgit v1.2.3 From 9bba6b192663f375e35dd7a72eedc10845c30727 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 17 Jul 2023 15:23:20 +0200 Subject: drm: Spelling s/sempahore/semaphore/ Fix misspellings of "semaphore". Signed-off-by: Geert Uytterhoeven Reviewed-by: Hamza Mahfooz Acked-by: Rodrigo Vivi Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/8b0542c12a2427f34a792c41ac2d2a2922874bfa.1689600102.git.geert+renesas@glider.be --- drivers/gpu/drm/i915/i915_request.c | 2 +- drivers/gpu/drm/radeon/cik.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- include/drm/task_barrier.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 894068bb37b6..32323bb801a1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1220,7 +1220,7 @@ emit_semaphore_wait(struct i915_request *to, /* * If this or its dependents are waiting on an external fence * that may fail catastrophically, then we want to avoid using - * sempahores as they bypass the fence signaling metadata, and we + * semaphores as they bypass the fence signaling metadata, and we * lose the fence->error propagation. */ if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 5819737c21c6..5d6b81a6578e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3603,7 +3603,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, * @rdev: radeon_device pointer * @ring: radeon ring buffer object * @semaphore: radeon semaphore object - * @emit_wait: Is this a sempahore wait? + * @emit_wait: Is this a semaphore wait? * * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP * from running ahead of semaphore waits. diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 382795a8b3c0..a17b95eec65f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2918,7 +2918,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, * @rdev: radeon_device pointer * @ring: radeon ring buffer object * @semaphore: radeon semaphore object - * @emit_wait: Is this a sempahore wait? + * @emit_wait: Is this a semaphore wait? * * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP * from running ahead of semaphore waits. diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h index 087e3f649c52..217c1cf21c1a 100644 --- a/include/drm/task_barrier.h +++ b/include/drm/task_barrier.h @@ -25,7 +25,7 @@ /* * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. - * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ + * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/ */ -- cgit v1.2.3 From c9155a3c31400afacd294c3f6c2458d4150c4fa0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 17 Jul 2023 15:23:58 +0200 Subject: drm: Spelling s/randevouz/rendez-vous/ Fix a misspelling of "rendez-vous". Signed-off-by: Geert Uytterhoeven Reviewed-by: Hamza Mahfooz Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/084bf178dd676a4f07933eb9fcd04d3e30a779ba.1689600209.git.geert+renesas@glider.be --- include/drm/task_barrier.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/drm') diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h index 217c1cf21c1a..f6e6ed529681 100644 --- a/include/drm/task_barrier.h +++ b/include/drm/task_barrier.h @@ -24,7 +24,7 @@ #include /* - * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. + * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks. * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/ */ -- cgit v1.2.3 From 81ed7d737621ff91571daea8fbf277e187e3a207 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 19 Jul 2023 17:23:36 +0200 Subject: drm: Remove references to removed transitional helpers The transitional helpers were removed a long time ago, but some references stuck. Remove them. Fixes: 21ebe615c16994f3 ("drm: Remove transitional helpers") Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Signed-off-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/ad4a2f1f9fa7da083132f6c35469c77a3f9e2f0e.1689779916.git.geert+renesas@glider.be --- drivers/gpu/drm/drm_plane_helper.c | 12 ++------ include/drm/drm_crtc.h | 5 ---- include/drm/drm_modeset_helper_vtables.h | 48 ++++++++++++++------------------ 3 files changed, 23 insertions(+), 42 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index c91e454eba09..5e95089676ff 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -40,8 +40,8 @@ /** * DOC: overview * - * This helper library has two parts. The first part has support to implement - * primary plane support on top of the normal CRTC configuration interface. + * This helper library contains helpers to implement primary plane support on + * top of the normal CRTC configuration interface. * Since the legacy &drm_mode_config_funcs.set_config interface ties the primary * plane together with the CRTC state this does not allow userspace to disable * the primary plane itself. The default primary plane only expose XRBG8888 and @@ -51,14 +51,6 @@ * planes, and newly merged drivers must not rely upon these transitional * helpers. * - * The second part also implements transitional helpers which allow drivers to - * gradually switch to the atomic helper infrastructure for plane updates. Once - * that switch is complete drivers shouldn't use these any longer, instead using - * the proper legacy implementations for update and disable plane hooks provided - * by the atomic helpers. - * - * Again drivers are strongly urged to switch to the new interfaces. - * * The plane helpers share the function table structures with other helpers, * specifically also the atomic helpers. See &struct drm_plane_helper_funcs for * the details. diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 8e1cbc75143e..8b48a1974da3 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -77,11 +77,6 @@ struct drm_plane_helper_funcs; * intended to indicate whether a full modeset is needed, rather than strictly * describing what has changed in a commit. See also: * drm_atomic_crtc_needs_modeset() - * - * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or - * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control - * state like @plane_mask so drivers not converted over to atomic helpers should - * not rely on these being accurate! */ struct drm_crtc_state { /** @crtc: backpointer to the CRTC */ diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 965faf082a6d..e3c3ac615909 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -59,8 +59,8 @@ enum mode_set_atomic { /** * struct drm_crtc_helper_funcs - helper operations for CRTCs * - * These hooks are used by the legacy CRTC helpers, the transitional plane - * helpers and the new atomic modesetting helpers. + * These hooks are used by the legacy CRTC helpers and the new atomic + * modesetting helpers. */ struct drm_crtc_helper_funcs { /** @@ -216,9 +216,7 @@ struct drm_crtc_helper_funcs { * * This callback is used to update the display mode of a CRTC without * changing anything of the primary plane configuration. This fits the - * requirement of atomic and hence is used by the atomic helpers. It is - * also used by the transitional plane helpers to implement a - * @mode_set hook in drm_helper_crtc_mode_set(). + * requirement of atomic and hence is used by the atomic helpers. * * Note that the display pipe is completely off when this function is * called. Atomic drivers which need hardware to be running before they @@ -333,8 +331,8 @@ struct drm_crtc_helper_funcs { * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. * * NOTE: * @@ -373,8 +371,8 @@ struct drm_crtc_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*atomic_begin)(struct drm_crtc *crtc, struct drm_atomic_state *state); @@ -397,8 +395,8 @@ struct drm_crtc_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*atomic_flush)(struct drm_crtc *crtc, struct drm_atomic_state *state); @@ -507,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc, /** * struct drm_encoder_helper_funcs - helper operations for encoders * - * These hooks are used by the legacy CRTC helpers, the transitional plane - * helpers and the new atomic modesetting helpers. + * These hooks are used by the legacy CRTC helpers and the new atomic + * modesetting helpers. */ struct drm_encoder_helper_funcs { /** @@ -1185,8 +1183,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, /** * struct drm_plane_helper_funcs - helper operations for planes * - * These functions are used by the atomic helpers and by the transitional plane - * helpers. + * These functions are used by the atomic helpers. */ struct drm_plane_helper_funcs { /** @@ -1221,9 +1218,8 @@ struct drm_plane_helper_funcs { * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. See @begin_fb_access - * for preparing per-commit resources. + * This callback is used by the atomic modeset helpers, but it is + * optional. See @begin_fb_access for preparing per-commit resources. * * RETURNS: * @@ -1240,8 +1236,8 @@ struct drm_plane_helper_funcs { * This hook is called to clean up any resources allocated for the given * framebuffer and plane configuration in @prepare_fb. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*cleanup_fb)(struct drm_plane *plane, struct drm_plane_state *old_state); @@ -1295,8 +1291,8 @@ struct drm_plane_helper_funcs { * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. * * NOTE: * @@ -1326,8 +1322,7 @@ struct drm_plane_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is optional. */ void (*atomic_update)(struct drm_plane *plane, struct drm_atomic_state *state); @@ -1376,9 +1371,8 @@ struct drm_plane_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. It's intended to - * reverse the effects of @atomic_enable. + * This callback is used by the atomic modeset helpers, but it is + * optional. It's intended to reverse the effects of @atomic_enable. */ void (*atomic_disable)(struct drm_plane *plane, struct drm_atomic_state *state); -- cgit v1.2.3 From 6e193f9fbbb02e1bde88510a71823e5bf83c2010 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 28 Jul 2023 11:06:18 +0200 Subject: drm/tests: helpers: Create a helper to allocate a locking ctx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we get more and more tests, the locking context initialisation creates more and more boilerplate, both at creation and destruction. Let's create a helper that will allocate, initialise a context, and register kunit actions to clean up once the test is done. Reviewed-by: Javier Martinez Canillas Reviewed-by: Maíra Canal Link: https://lore.kernel.org/r/20230728-kms-kunit-actions-rework-v3-5-952565ccccfe@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_kunit_helpers.c | 41 +++++++++++++++++++++++++++++++ include/drm/drm_kunit_helpers.h | 2 ++ 2 files changed, 43 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index cdf35421e641..35ea4a34909d 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -146,5 +146,46 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test, } EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver); +static void action_drm_release_context(void *ptr) +{ + struct drm_modeset_acquire_ctx *ctx = ptr; + + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); +} + +/** + * drm_kunit_helper_context_alloc - Allocates an acquire context + * @test: The test context object + * + * Allocates and initializes a modeset acquire context. + * + * The context is tied to the kunit test context, so we must not call + * drm_modeset_acquire_fini() on it, it will be done so automatically. + * + * Returns: + * An ERR_PTR on error, a pointer to the newly allocated context otherwise + */ +struct drm_modeset_acquire_ctx * +drm_kunit_helper_acquire_ctx_alloc(struct kunit *test) +{ + struct drm_modeset_acquire_ctx *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ctx); + + drm_modeset_acquire_init(ctx, 0); + + ret = kunit_add_action_or_reset(test, + action_drm_release_context, + ctx); + if (ret) + return ERR_PTR(ret); + + return ctx; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc); + MODULE_AUTHOR("Maxime Ripard "); MODULE_LICENSE("GPL"); diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index ed013fdcc1ff..4ba5e10653c6 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -87,5 +87,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, sizeof(_type), \ offsetof(_type, _member), \ _feat)) +struct drm_modeset_acquire_ctx * +drm_kunit_helper_acquire_ctx_alloc(struct kunit *test); #endif // DRM_KUNIT_HELPERS_H_ -- cgit v1.2.3 From 394ba10e476d092111ebb3419f771bde419a037e Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 28 Jul 2023 11:06:19 +0200 Subject: drm/tests: helpers: Create a helper to allocate an atomic state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we gain more tests, boilerplate to allocate an atomic state and free it starts to be there more and more as well. In order to reduce the allocation boilerplate, we can create a helper to create that atomic state, and call an action when the test is done. This will also clean up the exit path. Reviewed-by: Javier Martinez Canillas Reviewed-by: Maíra Canal Link: https://lore.kernel.org/r/20230728-kms-kunit-actions-rework-v3-6-952565ccccfe@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_kunit_helpers.c | 46 +++++++++++++++++++++++++++++++ include/drm/drm_kunit_helpers.h | 5 ++++ 2 files changed, 51 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index 35ea4a34909d..3d624ff2f651 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include @@ -187,5 +188,50 @@ drm_kunit_helper_acquire_ctx_alloc(struct kunit *test) } EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc); +static void kunit_action_drm_atomic_state_put(void *ptr) +{ + struct drm_atomic_state *state = ptr; + + drm_atomic_state_put(state); +} + +/** + * drm_kunit_helper_atomic_state_alloc - Allocates an atomic state + * @test: The test context object + * @drm: The device to alloc the state for + * @ctx: Locking context for that atomic update + * + * Allocates a empty atomic state. + * + * The state is tied to the kunit test context, so we must not call + * drm_atomic_state_put() on it, it will be done so automatically. + * + * Returns: + * An ERR_PTR on error, a pointer to the newly allocated state otherwise + */ +struct drm_atomic_state * +drm_kunit_helper_atomic_state_alloc(struct kunit *test, + struct drm_device *drm, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + int ret; + + state = drm_atomic_state_alloc(drm); + if (!state) + return ERR_PTR(-ENOMEM); + + ret = kunit_add_action_or_reset(test, + kunit_action_drm_atomic_state_put, + state); + if (ret) + return ERR_PTR(ret); + + state->acquire_ctx = ctx; + + return state; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc); + MODULE_AUTHOR("Maxime Ripard "); MODULE_LICENSE("GPL"); diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 4ba5e10653c6..514c8a7a32f0 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -90,4 +90,9 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, struct drm_modeset_acquire_ctx * drm_kunit_helper_acquire_ctx_alloc(struct kunit *test); +struct drm_atomic_state * +drm_kunit_helper_atomic_state_alloc(struct kunit *test, + struct drm_device *drm, + struct drm_modeset_acquire_ctx *ctx); + #endif // DRM_KUNIT_HELPERS_H_ -- cgit v1.2.3 From d2aacaf07395bd798373cbec6af05fff4147aff3 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Thu, 27 Jul 2023 10:16:29 -0700 Subject: drm/panel: Check for already prepared/enabled in drm_panel In a whole pile of panel drivers, we have code to make the prepare/unprepare/enable/disable callbacks behave as no-ops if they've already been called. It's silly to have this code duplicated everywhere. Add it to the core instead so that we can eventually delete it from all the drivers. Note: to get some idea of the duplicated code, try: git grep 'if.*>prepared' -- drivers/gpu/drm/panel git grep 'if.*>enabled' -- drivers/gpu/drm/panel NOTE: arguably, the right thing to do here is actually to skip this patch and simply remove all the extra checks from the individual drivers. Perhaps the checks were needed at some point in time in the past but maybe they no longer are? Certainly as we continue transitioning over to "panel_bridge" then we expect there to be much less variety in how these calls are made. When we're called as part of the bridge chain, things should be pretty simple. In fact, there was some discussion in the past about these checks [1], including a discussion about whether the checks were needed and whether the calls ought to be refcounted. At the time, I decided not to mess with it because it felt too risky. Looking closer at it now, I'm fairly certain that nothing in the existing codebase is expecting these calls to be refcounted. The only real question is whether someone is already doing something to ensure prepare()/unprepare() match and enabled()/disable() match. I would say that, even if there is something else ensuring that things match, there's enough complexity that adding an extra bool and an extra double-check here is a good idea. Let's add a drm_warn() to let people know that it's considered a minor error to take advantage of drm_panel's double-checking but we'll still make things work fine. We'll also add an entry to the official DRM todo list to remove the now pointless check from the panels after this patch lands and, eventually, fixup anyone who is triggering the new warning. [1] https://lore.kernel.org/r/20210416153909.v4.27.I502f2a92ddd36c3d28d014dd75e170c2d405a0a5@changeid Acked-by: Neil Armstrong Reviewed-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20230727101636.v4.2.I59b417d4c29151cc2eff053369ec4822b606f375@changeid --- Documentation/gpu/todo.rst | 24 ++++++++++++++++++++++ drivers/gpu/drm/drm_panel.c | 49 +++++++++++++++++++++++++++++++++++++++------ include/drm/drm_panel.h | 14 +++++++++++++ 3 files changed, 81 insertions(+), 6 deletions(-) (limited to 'include/drm') diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 02de2ff6b84f..aa0052f9b93b 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -460,6 +460,30 @@ Contact: Thomas Zimmermann Level: Starter +Clean up checks for already prepared/enabled in panels +------------------------------------------------------ + +In a whole pile of panel drivers, we have code to make the +prepare/unprepare/enable/disable callbacks behave as no-ops if they've already +been called. To get some idea of the duplicated code, try: + git grep 'if.*>prepared' -- drivers/gpu/drm/panel + git grep 'if.*>enabled' -- drivers/gpu/drm/panel + +In the patch ("drm/panel: Check for already prepared/enabled in drm_panel") +we've moved this check to the core. Now we can most definitely remove the +check from the individual panels and save a pile of code. + +In adition to removing the check from the individual panels, it is believed +that even the core shouldn't need this check and that should be considered +an error if other code ever relies on this check. The check in the core +currently prints a warning whenever something is relying on this check with +dev_warn(). After a little while, we likely want to promote this to a +WARN(1) to help encourage folks not to rely on this behavior. + +Contact: Douglas Anderson + +Level: Starter/Intermediate + Core refactorings ================= diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index f634371c717a..4e1c4e42575b 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -105,11 +105,22 @@ EXPORT_SYMBOL(drm_panel_remove); */ int drm_panel_prepare(struct drm_panel *panel) { + int ret; + if (!panel) return -EINVAL; - if (panel->funcs && panel->funcs->prepare) - return panel->funcs->prepare(panel); + if (panel->prepared) { + dev_warn(panel->dev, "Skipping prepare of already prepared panel\n"); + return 0; + } + + if (panel->funcs && panel->funcs->prepare) { + ret = panel->funcs->prepare(panel); + if (ret < 0) + return ret; + } + panel->prepared = true; return 0; } @@ -128,11 +139,22 @@ EXPORT_SYMBOL(drm_panel_prepare); */ int drm_panel_unprepare(struct drm_panel *panel) { + int ret; + if (!panel) return -EINVAL; - if (panel->funcs && panel->funcs->unprepare) - return panel->funcs->unprepare(panel); + if (!panel->prepared) { + dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n"); + return 0; + } + + if (panel->funcs && panel->funcs->unprepare) { + ret = panel->funcs->unprepare(panel); + if (ret < 0) + return ret; + } + panel->prepared = false; return 0; } @@ -155,11 +177,17 @@ int drm_panel_enable(struct drm_panel *panel) if (!panel) return -EINVAL; + if (panel->enabled) { + dev_warn(panel->dev, "Skipping enable of already enabled panel\n"); + return 0; + } + if (panel->funcs && panel->funcs->enable) { ret = panel->funcs->enable(panel); if (ret < 0) return ret; } + panel->enabled = true; ret = backlight_enable(panel->backlight); if (ret < 0) @@ -187,13 +215,22 @@ int drm_panel_disable(struct drm_panel *panel) if (!panel) return -EINVAL; + if (!panel->enabled) { + dev_warn(panel->dev, "Skipping disable of already disabled panel\n"); + return 0; + } + ret = backlight_disable(panel->backlight); if (ret < 0) DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n", ret); - if (panel->funcs && panel->funcs->disable) - return panel->funcs->disable(panel); + if (panel->funcs && panel->funcs->disable) { + ret = panel->funcs->disable(panel); + if (ret < 0) + return ret; + } + panel->enabled = false; return 0; } diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 432fab2347eb..c6cf75909389 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -198,6 +198,20 @@ struct drm_panel { * the panel is powered up. */ bool prepare_prev_first; + + /** + * @prepared: + * + * If true then the panel has been prepared. + */ + bool prepared; + + /** + * @enabled: + * + * If true then the panel has been enabled. + */ + bool enabled; }; void drm_panel_init(struct drm_panel *panel, struct device *dev, -- cgit v1.2.3 From de0874165b830c2b08abe87f4ffc6908f2a00cf0 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Thu, 27 Jul 2023 10:16:30 -0700 Subject: drm/panel: Add a way for other devices to follow panel state These days, it's fairly common to see panels that have touchscreens attached to them. The panel and the touchscreen can somewhat be thought of as totally separate devices and, historically, this is how Linux has treated them. However, treating them as separate isn't necessarily the best way to model the two devices, it was just that there was no better way. Specifically, there is little practical reason to have the touchscreen powered on when the panel is turned off, but if we model the devices separately we have no way to keep the two devices' power states in sync with each other. The issue described above makes it sound as if the problem here is just about efficiency. We're wasting power keeping the touchscreen powered up when the screen is off. While that's true, the problem can go deeper. Specifically, hardware designers see that there's no reason to have the touchscreen on while the screen is off and then build hardware assuming that software would never turn the touchscreen on while the screen is off. In the very simplest case of hardware designs like this, the touchscreen and the panel share some power rails. In most cases, this turns out not to be terrible and is, again, just a little less efficient. Specifically if we tell Linux that the touchscreen and the panel are using the same rails then Linux will keep the rails on when _either_ device is turned on. That ends to work OK-ish, but now if you turn the panel off not only will the touchscreen remain powered, but the power rails for the panel itself won't be switched off, burning extra power. The above two inefficiencies are _extra_ minor when you consider the fact that laptops rarely spend much time with the screen off. The main use case would be when an external screen (and presumably a power supply) is attached. Unfortunately, it gets worse from here. On sc7180-trogdor-homestar, for instance, the display's TCON (timing controller) sometimes crashes if you don't power cycle it whenever you stop and restart the video stream (like during a modeset). The touchscreen keeping the power rails on causes real problems. One proposal in the homestar timeframe was to move the touchscreen to an always-on rail, dedicating the main power rail to the panel. That caused _different_ problems as talked about in commit 557e05fa9fdd ("HID: i2c-hid: goodix: Stop tying the reset line to the regulator"). The end result of all of this was to add an extra regulator to the board, increasing cost. Recently, Cong Yang posted a patch [1] where things are even worse. The panel and touch controller on that system seem even more intimately tied together and really can't be thought of separately. To address this issue, let's start allowing devices to register themselves as "panel followers". These devices will get called after a panel has been powered on and before a panel is powered off. This makes the panel the primary device in charge of the power state, which matches how userspace uses it. The panel follower API should be fairly straightforward to use. The current code assumes that panel followers are using device tree and have a "panel" property pointing to the panel to follow. More flexibility and non-DT implementations could be added as needed. Right now, panel followers can follow the prepare/unprepare functions. There could be arguments made that, instead, they should follow enable/disable. I've chosen prepare/unprepare for now since those functions are guaranteed to power up/power down the panel and it seems better to start the process earlier. A bit of explaining about why this is a roll-your-own API instead of using something more standard: 1. In standard APIs in Linux, parent devices are automatically powered on when a child needs power. Applying that here, it would mean that we'd force the panel on any time someone was listening to the touchscreen. That, unfortunately, would have broken homestar's need (if we hadn't changed the hardware, as per above) where the panel absolutely needs to be able to power cycle itself. While one could argue that homestar is broken hardware and we shouldn't have the API do backflips for it, _officially_ the eDP timing guidelines agree with homestar's needs and the panel power sequencing diagrams show power going off. It's nice to be able to support this. 2. We could, conceibably, try to add a new flag to device_link causing the parent to be in charge of power. Then we could at least use normal pm_runtime APIs. This sounds great, except that we run into problems with initial probe. As talked about in the later patch ("HID: i2c-hid: Support being a panel follower") the initial power on of a panel follower might need to do things (like add sub-devices) that aren't allowed in a runtime_resume function. The above complexities explain why this API isn't using common functions. That being said, this patch is very small and self-contained, so if someone was later able to adapt it to using more common APIs while solving the above issues then that could happen in the future. [1] https://lore.kernel.org/r/20230519032316.3464732-1-yangcong5@huaqin.corp-partner.google.com Reviewed-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20230727101636.v4.3.Icd5f96342d2242051c754364f4bee13ef2b986d4@changeid --- drivers/gpu/drm/drm_panel.c | 173 +++++++++++++++++++++++++++++++++++++++++++- include/drm/drm_panel.h | 82 +++++++++++++++++++++ 2 files changed, 251 insertions(+), 4 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 4e1c4e42575b..e814020bbcd3 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -58,6 +58,8 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type) { INIT_LIST_HEAD(&panel->list); + INIT_LIST_HEAD(&panel->followers); + mutex_init(&panel->follower_lock); panel->dev = dev; panel->funcs = funcs; panel->connector_type = connector_type; @@ -105,6 +107,7 @@ EXPORT_SYMBOL(drm_panel_remove); */ int drm_panel_prepare(struct drm_panel *panel) { + struct drm_panel_follower *follower; int ret; if (!panel) @@ -115,14 +118,27 @@ int drm_panel_prepare(struct drm_panel *panel) return 0; } + mutex_lock(&panel->follower_lock); + if (panel->funcs && panel->funcs->prepare) { ret = panel->funcs->prepare(panel); if (ret < 0) - return ret; + goto exit; } panel->prepared = true; - return 0; + list_for_each_entry(follower, &panel->followers, list) { + ret = follower->funcs->panel_prepared(follower); + if (ret < 0) + dev_info(panel->dev, "%ps failed: %d\n", + follower->funcs->panel_prepared, ret); + } + + ret = 0; +exit: + mutex_unlock(&panel->follower_lock); + + return ret; } EXPORT_SYMBOL(drm_panel_prepare); @@ -139,6 +155,7 @@ EXPORT_SYMBOL(drm_panel_prepare); */ int drm_panel_unprepare(struct drm_panel *panel) { + struct drm_panel_follower *follower; int ret; if (!panel) @@ -149,14 +166,27 @@ int drm_panel_unprepare(struct drm_panel *panel) return 0; } + mutex_lock(&panel->follower_lock); + + list_for_each_entry(follower, &panel->followers, list) { + ret = follower->funcs->panel_unpreparing(follower); + if (ret < 0) + dev_info(panel->dev, "%ps failed: %d\n", + follower->funcs->panel_unpreparing, ret); + } + if (panel->funcs && panel->funcs->unprepare) { ret = panel->funcs->unprepare(panel); if (ret < 0) - return ret; + goto exit; } panel->prepared = false; - return 0; + ret = 0; +exit: + mutex_unlock(&panel->follower_lock); + + return ret; } EXPORT_SYMBOL(drm_panel_unprepare); @@ -342,6 +372,141 @@ int of_drm_get_panel_orientation(const struct device_node *np, EXPORT_SYMBOL(of_drm_get_panel_orientation); #endif +/** + * drm_is_panel_follower() - Check if the device is a panel follower + * @dev: The 'struct device' to check + * + * This checks to see if a device needs to be power sequenced together with + * a panel using the panel follower API. + * At the moment panels can only be followed on device tree enabled systems. + * The "panel" property of the follower points to the panel to be followed. + * + * Return: true if we should be power sequenced with a panel; false otherwise. + */ +bool drm_is_panel_follower(struct device *dev) +{ + /* + * The "panel" property is actually a phandle, but for simplicity we + * don't bother trying to parse it here. We just need to know if the + * property is there. + */ + return of_property_read_bool(dev->of_node, "panel"); +} +EXPORT_SYMBOL(drm_is_panel_follower); + +/** + * drm_panel_add_follower() - Register something to follow panel state. + * @follower_dev: The 'struct device' for the follower. + * @follower: The panel follower descriptor for the follower. + * + * A panel follower is called right after preparing the panel and right before + * unpreparing the panel. It's primary intention is to power on an associated + * touchscreen, though it could be used for any similar devices. Multiple + * devices are allowed the follow the same panel. + * + * If a follower is added to a panel that's already been turned on, the + * follower's prepare callback is called right away. + * + * At the moment panels can only be followed on device tree enabled systems. + * The "panel" property of the follower points to the panel to be followed. + * + * Return: 0 or an error code. Note that -ENODEV means that we detected that + * follower_dev is not actually following a panel. The caller may + * choose to ignore this return value if following a panel is optional. + */ +int drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + struct device_node *panel_np; + struct drm_panel *panel; + int ret; + + panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0); + if (!panel_np) + return -ENODEV; + + panel = of_drm_find_panel(panel_np); + of_node_put(panel_np); + if (IS_ERR(panel)) + return PTR_ERR(panel); + + get_device(panel->dev); + follower->panel = panel; + + mutex_lock(&panel->follower_lock); + + list_add_tail(&follower->list, &panel->followers); + if (panel->prepared) { + ret = follower->funcs->panel_prepared(follower); + if (ret < 0) + dev_info(panel->dev, "%ps failed: %d\n", + follower->funcs->panel_prepared, ret); + } + + mutex_unlock(&panel->follower_lock); + + return 0; +} +EXPORT_SYMBOL(drm_panel_add_follower); + +/** + * drm_panel_remove_follower() - Reverse drm_panel_add_follower(). + * @follower: The panel follower descriptor for the follower. + * + * Undo drm_panel_add_follower(). This includes calling the follower's + * unprepare function if we're removed from a panel that's currently prepared. + * + * Return: 0 or an error code. + */ +void drm_panel_remove_follower(struct drm_panel_follower *follower) +{ + struct drm_panel *panel = follower->panel; + int ret; + + mutex_lock(&panel->follower_lock); + + if (panel->prepared) { + ret = follower->funcs->panel_unpreparing(follower); + if (ret < 0) + dev_info(panel->dev, "%ps failed: %d\n", + follower->funcs->panel_unpreparing, ret); + } + list_del_init(&follower->list); + + mutex_unlock(&panel->follower_lock); + + put_device(panel->dev); +} +EXPORT_SYMBOL(drm_panel_remove_follower); + +static void drm_panel_remove_follower_void(void *follower) +{ + drm_panel_remove_follower(follower); +} + +/** + * devm_drm_panel_add_follower() - devm version of drm_panel_add_follower() + * @follower_dev: The 'struct device' for the follower. + * @follower: The panel follower descriptor for the follower. + * + * Handles calling drm_panel_remove_follower() using devm on the follower_dev. + * + * Return: 0 or an error code. + */ +int devm_drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + int ret; + + ret = drm_panel_add_follower(follower_dev, follower); + if (ret) + return ret; + + return devm_add_action_or_reset(follower_dev, + drm_panel_remove_follower_void, follower); +} +EXPORT_SYMBOL(devm_drm_panel_add_follower); + #if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE) /** * drm_panel_of_backlight - use backlight device node for backlight diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index c6cf75909389..d6c73f79c131 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -27,12 +27,14 @@ #include #include #include +#include struct backlight_device; struct dentry; struct device_node; struct drm_connector; struct drm_device; +struct drm_panel_follower; struct drm_panel; struct display_timing; @@ -144,6 +146,45 @@ struct drm_panel_funcs { void (*debugfs_init)(struct drm_panel *panel, struct dentry *root); }; +struct drm_panel_follower_funcs { + /** + * @panel_prepared: + * + * Called after the panel has been powered on. + */ + int (*panel_prepared)(struct drm_panel_follower *follower); + + /** + * @panel_unpreparing: + * + * Called before the panel is powered off. + */ + int (*panel_unpreparing)(struct drm_panel_follower *follower); +}; + +struct drm_panel_follower { + /** + * @funcs: + * + * Dependent device callbacks; should be initted by the caller. + */ + const struct drm_panel_follower_funcs *funcs; + + /** + * @list + * + * Used for linking into panel's list; set by drm_panel_add_follower(). + */ + struct list_head list; + + /** + * @panel + * + * The panel we're dependent on; set by drm_panel_add_follower(). + */ + struct drm_panel *panel; +}; + /** * struct drm_panel - DRM panel object */ @@ -189,6 +230,20 @@ struct drm_panel { */ struct list_head list; + /** + * @followers: + * + * A list of struct drm_panel_follower dependent on this panel. + */ + struct list_head followers; + + /** + * @followers_lock: + * + * Lock for followers list. + */ + struct mutex follower_lock; + /** * @prepare_prev_first: * @@ -246,6 +301,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np, } #endif +#if defined(CONFIG_DRM_PANEL) +bool drm_is_panel_follower(struct device *dev); +int drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower); +void drm_panel_remove_follower(struct drm_panel_follower *follower); +int devm_drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower); +#else +static inline bool drm_is_panel_follower(struct device *dev) +{ + return false; +} + +static inline int drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + return -ENODEV; +} + +static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { } +static inline int devm_drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + return -ENODEV; +} +#endif + #if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) int drm_panel_of_backlight(struct drm_panel *panel); -- cgit v1.2.3 From 8e4bb53c902ed2b06a2c4778e6dbb2c1eeec4960 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 2 Aug 2023 10:04:11 +0300 Subject: drm/bridge: Add debugfs print for bridge chains DRM bridges are not visible to the userspace and it may not be immediately clear if the chain is somehow constructed incorrectly. I have had two separate instances of a bridge driver failing to do a drm_bridge_attach() call, resulting in the bridge connector not being part of the chain. In some situations this doesn't seem to cause issues, but it will if DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is used. Add a debugfs file to print the bridge chains. For me, on this TI AM62 based platform, I get the following output: encoder[39] bridge[0] type: 0, ops: 0x0 bridge[1] type: 0, ops: 0x0, OF: /bus@f0000/i2c@20000000/dsi@e:toshiba,tc358778 bridge[2] type: 0, ops: 0x3, OF: /bus@f0000/i2c@20010000/hdmi@48:lontium,lt8912b bridge[3] type: 11, ops: 0x7, OF: /hdmi-connector:hdmi-connector Tested-by: Alexander Stein Reviewed-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen Acked-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20230802-drm-bridge-chain-debugfs-v4-1-7e3ae3d137c0@ideasonboard.com --- drivers/gpu/drm/drm_bridge.c | 46 +++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_debugfs.c | 2 ++ include/drm/drm_bridge.h | 3 +++ 3 files changed, 51 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index c3d69af02e79..39e68e45bb12 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -27,8 +27,10 @@ #include #include +#include #include #include +#include #include #include @@ -1345,6 +1347,50 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif +#ifdef CONFIG_DEBUG_FS +static int drm_bridge_chains_info(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_mode_config *config = &dev->mode_config; + struct drm_encoder *encoder; + unsigned int bridge_idx = 0; + + list_for_each_entry(encoder, &config->encoder_list, head) { + struct drm_bridge *bridge; + + drm_printf(&p, "encoder[%u]\n", encoder->base.id); + + drm_for_each_bridge_in_chain(encoder, bridge) { + drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x", + bridge_idx, bridge->type, bridge->ops); + +#ifdef CONFIG_OF + if (bridge->of_node) + drm_printf(&p, ", OF: %pOFfc", bridge->of_node); +#endif + + drm_printf(&p, "\n"); + + bridge_idx++; + } + } + + return 0; +} + +static const struct drm_debugfs_info drm_bridge_debugfs_list[] = { + { "bridge_chains", drm_bridge_chains_info, 0 }, +}; + +void drm_bridge_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list, + ARRAY_SIZE(drm_bridge_debugfs_list)); +} +#endif + MODULE_AUTHOR("Ajay Kumar "); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index a3a488205009..2de43ff3ce0a 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -274,6 +275,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, if (drm_drv_uses_atomic_modeset(dev)) { drm_atomic_debugfs_init(minor); + drm_bridge_debugfs_init(minor); } if (drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index bf964cdfb330..c339fc85fd07 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -36,6 +36,7 @@ struct drm_bridge; struct drm_bridge_timings; struct drm_connector; struct drm_display_info; +struct drm_minor; struct drm_panel; struct edid; struct i2c_adapter; @@ -949,4 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif +void drm_bridge_debugfs_init(struct drm_minor *minor); + #endif -- cgit v1.2.3 From c0571b20fca4acebd4cb5fcfd07ca4654e9d63dd Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 2 Aug 2023 07:47:27 -0700 Subject: drm/panel: Fix kernel-doc typo for `follower_lock` In the kernel doc for the `follower_lock` member of `struct drm_panel` there was a typo where it was called `followers_lock`. This resulted in a warning when making "htmldocs": ./include/drm/drm_panel.h:270: warning: Function parameter or member 'follower_lock' not described in 'drm_panel' Fix the typo. Fixes: de0874165b83 ("drm/panel: Add a way for other devices to follow panel state") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/r/20230802142136.0f67b762@canb.auug.org.au Signed-off-by: Douglas Anderson Reviewed-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20230802074727.1.I4036706ad5e7f45e80d41b777164258e52079cd8@changeid --- include/drm/drm_panel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/drm') diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index d6c73f79c131..10015891b056 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -238,7 +238,7 @@ struct drm_panel { struct list_head followers; /** - * @followers_lock: + * @follower_lock: * * Lock for followers list. */ -- cgit v1.2.3 From dd9d7c18a78dfab5133e0254eae100107b660fd8 Mon Sep 17 00:00:00 2001 From: Sui Jingfeng Date: Sun, 9 Apr 2023 21:15:47 +0800 Subject: drm/drm_plane.h: fix grammar of the comment Signed-off-by: Sui Jingfeng Link: https://patchwork.freedesktop.org/patch/msgid/20230409131547.494128-1-15330273260@189.cn Signed-off-by: Daniel Vetter --- include/drm/drm_plane.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/drm') diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 51291983ea44..79d62856defb 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -56,7 +56,7 @@ struct drm_plane_state { /** * @crtc: * - * Currently bound CRTC, NULL if disabled. Do not this write directly, + * Currently bound CRTC, NULL if disabled. Do not write this directly, * use drm_atomic_set_crtc_for_plane() */ struct drm_crtc *crtc; -- cgit v1.2.3 From 0c0816d68d963302a7f4a7a28cc685f73263b964 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Fri, 4 Aug 2023 20:23:41 +0200 Subject: drm/gem: fix lockdep check for dma-resv lock When no custom lock is set to protect a GEMs GPUVA list, lockdep checks should fall back to the GEM objects dma-resv lock. With the current implementation we're setting the lock_dep_map of the GEM objects 'resv' pointer (in case no custom lock_dep_map is set yet) on drm_gem_private_object_init(). However, the GEM objects 'resv' pointer might still change after drm_gem_private_object_init() is called, e.g. through ttm_bo_init_reserved(). This can result in the wrong lock being tracked. To fix this, call dma_resv_held() directly from drm_gem_gpuva_assert_lock_held() and fall back to the GEMs lock_dep_map pointer only if an actual custom lock is set. Fixes: e6303f323b1a ("drm: manager to keep track of GPUs VA mappings") Reviewed-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20230804182406.5222-2-dakr@redhat.com --- include/drm/drm_gem.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index c0b13c43b459..bc9f6aa2f3fe 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -551,15 +551,17 @@ int drm_gem_evict(struct drm_gem_object *obj); * @lock: the lock used to protect the gpuva list. The locking primitive * must contain a dep_map field. * - * Call this if you're not proctecting access to the gpuva list - * with the dma-resv lock, otherwise, drm_gem_gpuva_init() takes care - * of initializing lock_dep_map for you. + * Call this if you're not proctecting access to the gpuva list with the + * dma-resv lock, but with a custom lock. */ #define drm_gem_gpuva_set_lock(obj, lock) \ - if (!(obj)->gpuva.lock_dep_map) \ + if (!WARN((obj)->gpuva.lock_dep_map, \ + "GEM GPUVA lock should be set only once.")) \ (obj)->gpuva.lock_dep_map = &(lock)->dep_map #define drm_gem_gpuva_assert_lock_held(obj) \ - lockdep_assert(lock_is_held((obj)->gpuva.lock_dep_map)) + lockdep_assert((obj)->gpuva.lock_dep_map ? \ + lock_is_held((obj)->gpuva.lock_dep_map) : \ + dma_resv_held((obj)->resv)) #else #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) @@ -573,11 +575,12 @@ int drm_gem_evict(struct drm_gem_object *obj); * * Calling this function is only necessary for drivers intending to support the * &drm_driver_feature DRIVER_GEM_GPUVA. + * + * See also drm_gem_gpuva_set_lock(). */ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) { INIT_LIST_HEAD(&obj->gpuva.list); - drm_gem_gpuva_set_lock(obj, &obj->resv->lock.base); } /** -- cgit v1.2.3 From 616bceae250d0bab7ab2cbcb0791d820434ffb71 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 31 Jul 2023 14:36:24 +0200 Subject: drm/exec: use unique instead of local label MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC forbids to jump to labels in loop conditions and a new clang check stumbled over this. So instead using a local label inside the loop condition use an unique label outside of it. Fixes: 09593216bff1 ("drm: execution context for GEM buffers v7") Link: https://gcc.gnu.org/onlinedocs/gcc/Statement-Exprs.html Link: https://github.com/ClangBuiltLinux/linux/issues/1890 Link: https://github.com/llvm/llvm-project/commit/20219106060208f0c2f5d096eb3aed7b712f5067 Reported-by: Nathan Chancellor Reported-by: Naresh Kamboju CC: Boris Brezillon Signed-off-by: Christian König Tested-by: Nathan Chancellor Reviewed-by: Boris Brezillon Reviewed-by: Nick Desaulniers Signed-off-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20230731123625.3766-1-christian.koenig@amd.com --- include/drm/drm_exec.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h index 73205afec162..e0462361adf9 100644 --- a/include/drm/drm_exec.h +++ b/include/drm/drm_exec.h @@ -3,6 +3,7 @@ #ifndef __DRM_EXEC_H__ #define __DRM_EXEC_H__ +#include #include #define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0) @@ -74,13 +75,12 @@ struct drm_exec { * Since labels can't be defined local to the loops body we use a jump pointer * to make sure that the retry is only used from within the loops body. */ -#define drm_exec_until_all_locked(exec) \ - for (void *__drm_exec_retry_ptr; ({ \ - __label__ __drm_exec_retry; \ -__drm_exec_retry: \ - __drm_exec_retry_ptr = &&__drm_exec_retry; \ - (void)__drm_exec_retry_ptr; \ - drm_exec_cleanup(exec); \ +#define drm_exec_until_all_locked(exec) \ +__PASTE(__drm_exec_, __LINE__): \ + for (void *__drm_exec_retry_ptr; ({ \ + __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\ + (void)__drm_exec_retry_ptr; \ + drm_exec_cleanup(exec); \ });) /** -- cgit v1.2.3 From 2799804ac651da1375ecb9b9a644eba97218df07 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Wed, 9 Aug 2023 21:58:39 +0800 Subject: drm/ttm: Remove two unused function declarations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit cd3a8a596214 ("drm/ttm: remove ttm_bo_(un)lock_delayed_workqueue") removed the implementations but not the declarations. Signed-off-by: Yue Haibing Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20230809135839.13216-1-yuehaibing@huawei.com Signed-off-by: Christian König --- include/drm/ttm/ttm_bo.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/drm') diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 8b113c384236..0223a41a64b2 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, void ttm_bo_put(struct ttm_buffer_object *bo); void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk); -int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); -void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place); int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, -- cgit v1.2.3 From c67b06f19419fb57aa2338944d222ae2a42e0782 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 22 Aug 2023 14:30:14 +0300 Subject: drm: Add an HPD poll helper to reschedule the poll work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper to reschedule drm_mode_config::output_poll_work after polling has been enabled for a connector (and needing a reschedule, since previously polling was disabled for all connectors and hence output_poll_work was not running). This is needed by the next patch fixing HPD polling on i915. CC: stable@vger.kernel.org # 6.4+ Cc: Dmitry Baryshkov Cc: dri-devel@lists.freedesktop.org Reviewed-by: Jouni Högander Reviewed-by: Dmitry Baryshkov Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20230822113015.41224-1-imre.deak@intel.com (cherry picked from commit fe2352fd64029918174de4b460dfe6df0c6911cd) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/drm_probe_helper.c | 68 ++++++++++++++++++++++++++------------ include/drm/drm_probe_helper.h | 1 + 2 files changed, 47 insertions(+), 22 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 2fb9bf901a2c..3f479483d7d8 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) +static void reschedule_output_poll_work(struct drm_device *dev) +{ + unsigned long delay = DRM_OUTPUT_POLL_PERIOD; + + if (dev->mode_config.delayed_event) + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ + delay = HZ; + + schedule_delayed_work(&dev->mode_config.output_poll_work, delay); +} + /** * drm_kms_helper_poll_enable - re-enable output polling. * @dev: drm_device @@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - bool poll = false; - unsigned long delay = DRM_OUTPUT_POLL_PERIOD; - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll || dev->mode_config.poll_running) return; - poll = drm_kms_helper_enable_hpd(dev); - - if (dev->mode_config.delayed_event) { - /* - * FIXME: - * - * Use short (1s) delay to handle the initial delayed event. - * This delay should not be needed, but Optimus/nouveau will - * fail in a mysterious way if the delayed event is handled as - * soon as possible like it is done in - * drm_helper_probe_single_connector_modes() in case the poll - * was enabled before. - */ - poll = true; - delay = HZ; - } - - if (poll) - schedule_delayed_work(&dev->mode_config.output_poll_work, delay); + if (drm_kms_helper_enable_hpd(dev) || + dev->mode_config.delayed_event) + reschedule_output_poll_work(dev); dev->mode_config.poll_running = true; } EXPORT_SYMBOL(drm_kms_helper_poll_enable); +/** + * drm_kms_helper_poll_reschedule - reschedule the output polling work + * @dev: drm_device + * + * This function reschedules the output polling work, after polling for a + * connector has been enabled. + * + * Drivers must call this helper after enabling polling for a connector by + * setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags + * in drm_connector::polled. Note that after disabling polling by clearing these + * flags for a connector will stop the output polling work automatically if + * the polling is disabled for all other connectors as well. + * + * The function can be called only after polling has been enabled by calling + * drm_kms_helper_poll_init() / drm_kms_helper_poll_enable(). + */ +void drm_kms_helper_poll_reschedule(struct drm_device *dev) +{ + if (dev->mode_config.poll_running) + reschedule_output_poll_work(dev); +} +EXPORT_SYMBOL(drm_kms_helper_poll_reschedule); + static enum drm_connector_status drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force) { diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 4977e0ab72db..fad3c4003b2b 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); +void drm_kms_helper_poll_reschedule(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, -- cgit v1.2.3