diff options
author | Thomas Hellström <thomas.hellstrom@linux.intel.com> | 2024-05-02 20:32:51 +0200 |
---|---|---|
committer | Thomas Hellström <thomas.hellstrom@linux.intel.com> | 2024-05-03 09:46:54 +0200 |
commit | 75521e8b56e8f9dc673b782df7bc3660f51f329a (patch) | |
tree | c5a27a6de7c5c0710d2ecfd15bc4a12e79c82feb /drivers/gpu/drm/xe/xe_bo.c | |
parent | 8ad0e1810bf23f22cedb8a2664548b15646570c7 (diff) | |
download | lwn-75521e8b56e8f9dc673b782df7bc3660f51f329a.tar.gz lwn-75521e8b56e8f9dc673b782df7bc3660f51f329a.zip |
drm/xe: Perform dma_map when moving system buffer objects to TT
Currently we dma_map on ttm_tt population and dma_unmap when
the pages are released in ttm_tt unpopulate.
Strictly, the dma_map is not needed until the bo is moved to the
XE_PL_TT placement, so perform the dma_mapping on such moves
instead, and remove the dma_mappig when moving to XE_PL_SYSTEM.
This is desired for the upcoming shrinker series where shrinking
of a ttm_tt might fail. That would lead to an odd construct where
we first dma_unmap, then shrink and if shrinking fails dma_map
again. If dma_mapping instead is performed on move like this,
shrinking does not need to care at all about dma mapping.
Finally, where a ttm_tt is destroyed while bound to a different
memory type than XE_PL_SYSTEM, we keep the dma_unmap in
unpopulate().
v2:
- Don't accidently unmap the dma-buf's sgtable.
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240502183251.10170-1-thomas.hellstrom@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 47 |
1 files changed, 30 insertions, 17 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index bc1f794e3e61..52a16cb4e736 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -302,6 +302,18 @@ static int xe_tt_map_sg(struct ttm_tt *tt) return 0; } +static void xe_tt_unmap_sg(struct ttm_tt *tt) +{ + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + + if (xe_tt->sg) { + dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, + DMA_BIDIRECTIONAL, 0); + sg_free_table(xe_tt->sg); + xe_tt->sg = NULL; + } +} + struct sg_table *xe_bo_sg(struct xe_bo *bo) { struct ttm_tt *tt = bo->ttm.ttm; @@ -377,27 +389,15 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, if (err) return err; - /* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */ - err = xe_tt_map_sg(tt); - if (err) - ttm_pool_free(&ttm_dev->pool, tt); - return err; } static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) { - struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) return; - if (xe_tt->sg) { - dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, - DMA_BIDIRECTIONAL, 0); - sg_free_table(xe_tt->sg); - xe_tt->sg = NULL; - } + xe_tt_unmap_sg(tt); return ttm_pool_free(&ttm_dev->pool, tt); } @@ -628,17 +628,21 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && ttm && ttm_tt_is_populated(ttm)) ? true : false; int ret = 0; + /* Bo creation path, moving to system or TT. */ if ((!old_mem && ttm) && !handle_system_ccs) { - ttm_bo_move_null(ttm_bo, new_mem); - return 0; + if (new_mem->mem_type == XE_PL_TT) + ret = xe_tt_map_sg(ttm); + if (!ret) + ttm_bo_move_null(ttm_bo, new_mem); + goto out; } if (ttm_bo->type == ttm_bo_type_sg) { ret = xe_bo_move_notify(bo, ctx); if (!ret) ret = xe_bo_move_dmabuf(ttm_bo, new_mem); - goto out; + return ret; } tt_has_data = ttm && (ttm_tt_is_populated(ttm) || @@ -650,6 +654,12 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || (!ttm && ttm_bo->type == ttm_bo_type_device); + if (new_mem->mem_type == XE_PL_TT) { + ret = xe_tt_map_sg(ttm); + if (ret) + goto out; + } + if ((move_lacks_source && !needs_clear)) { ttm_bo_move_null(ttm_bo, new_mem); goto out; @@ -786,8 +796,11 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, xe_pm_runtime_put(xe); out: - return ret; + if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && + ttm_bo->ttm) + xe_tt_unmap_sg(ttm_bo->ttm); + return ret; } /** |