diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 250 |
1 files changed, 244 insertions, 6 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d939925efa81..15cab9bda17f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -28,7 +28,7 @@ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ - +#include <linux/swap.h> #include <linux/vmalloc.h> #include <drm/ttm/ttm_bo.h> @@ -769,12 +769,10 @@ error_destroy_tt: return ret; } -static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk, +static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, bool *needs_unlock) { - struct ttm_operation_ctx *ctx = walk->ctx; - *needs_unlock = false; if (dma_resv_trylock(bo->base.resv)) { @@ -865,7 +863,8 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, s64 lret; spin_lock(&bdev->lru_lock); - ttm_resource_manager_for_each_res(man, &cursor, res) { + ttm_resource_cursor_init(&cursor, man); + ttm_resource_manager_for_each_res(&cursor, res) { struct ttm_buffer_object *bo = res->bo; bool bo_needs_unlock = false; bool bo_locked = false; @@ -876,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, * since if we do it the other way around, and the trylock fails, * we need to drop the lru lock to put the bo. */ - if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock)) + if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock)) bo_locked = true; else if (!walk->ticket || walk->ctx->no_wait_gpu || walk->trylock_only) @@ -919,3 +918,242 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, return progress; } +EXPORT_SYMBOL(ttm_lru_walk_for_evict); + +static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs) +{ + struct ttm_buffer_object *bo = curs->bo; + + if (bo) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + ttm_bo_put(bo); + curs->bo = NULL; + } +} + +/** + * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor + * and clean up any iteration it was used for. + * @curs: The cursor. + */ +void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + + ttm_bo_lru_cursor_cleanup_bo(curs); + spin_lock(lru_lock); + ttm_resource_cursor_fini(&curs->res_curs); + spin_unlock(lru_lock); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); + +/** + * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor + * @curs: The ttm_bo_lru_cursor to initialize. + * @man: The ttm resource_manager whose LRU lists to iterate over. + * @ctx: The ttm_operation_ctx to govern the locking. + * + * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking + * or prelocked buffer objects are available as detailed by + * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not + * supported. + * + * Return: Pointer to @curs. The function does not fail. + */ +struct ttm_bo_lru_cursor * +ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, + struct ttm_resource_manager *man, + struct ttm_operation_ctx *ctx) +{ + memset(curs, 0, sizeof(*curs)); + ttm_resource_cursor_init(&curs->res_curs, man); + curs->ctx = ctx; + + return curs; +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_init); + +static struct ttm_buffer_object * +ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs) +{ + struct ttm_buffer_object *bo = res->bo; + + if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock)) + return NULL; + + if (!ttm_bo_get_unless_zero(bo)) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + return NULL; + } + + curs->bo = bo; + return bo; +} + +/** + * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and + * ttm_bo_lru_cursor_first(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_resource *res = NULL; + struct ttm_buffer_object *bo; + + ttm_bo_lru_cursor_cleanup_bo(curs); + + spin_lock(lru_lock); + for (;;) { + res = ttm_resource_manager_next(&curs->res_curs); + if (!res) + break; + + bo = ttm_bo_from_res_reserved(res, curs); + if (bo) + break; + } + + spin_unlock(lru_lock); + return res ? bo : NULL; +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_next); + +/** + * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_buffer_object *bo; + struct ttm_resource *res; + + spin_lock(lru_lock); + res = ttm_resource_manager_first(&curs->res_curs); + if (!res) { + spin_unlock(lru_lock); + return NULL; + } + + bo = ttm_bo_from_res_reserved(res, curs); + spin_unlock(lru_lock); + + return bo ? bo : ttm_bo_lru_cursor_next(curs); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_first); + +/** + * ttm_bo_shrink() - Helper to shrink a ttm buffer object. + * @ctx: The struct ttm_operation_ctx used for the shrinking operation. + * @bo: The buffer object. + * @flags: Flags governing the shrinking behaviour. + * + * The function uses the ttm_tt_back_up functionality to back up or + * purge a struct ttm_tt. If the bo is not in system, it's first + * moved there. + * + * Return: The number of pages shrunken or purged, or + * negative error code on failure. + */ +long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct ttm_bo_shrink_flags flags) +{ + static const struct ttm_place sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .mem_type = TTM_PL_SYSTEM, + .flags = 0, + }; + static struct ttm_placement sys_placement = { + .num_placement = 1, + .placement = &sys_placement_flags, + }; + struct ttm_tt *tt = bo->ttm; + long lret; + + dma_resv_assert_held(bo->base.resv); + + if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { + int ret = ttm_bo_validate(bo, &sys_placement, ctx); + + /* Consider -ENOMEM and -ENOSPC non-fatal. */ + if (ret) { + if (ret == -ENOMEM || ret == -ENOSPC) + ret = -EBUSY; + return ret; + } + } + + ttm_bo_unmap_virtual(bo); + lret = ttm_bo_wait_ctx(bo, ctx); + if (lret < 0) + return lret; + + if (bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) + {.purge = flags.purge, + .writeback = flags.writeback}); + + if (lret <= 0 && bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + if (lret < 0 && lret != -EINTR) + return -EBUSY; + + return lret; +} +EXPORT_SYMBOL(ttm_bo_shrink); + +/** + * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking + * @ctx: The struct ttm_operation_ctx governing the shrinking. + * @bo: The candidate for shrinking. + * + * Check whether the object, given the information available to TTM, + * is suitable for shinking, This function can and should be used + * before attempting to shrink an object. + * + * Return: true if suitable. false if not. + */ +bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) +{ + return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && + (!ctx->no_wait_gpu || + dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); +} +EXPORT_SYMBOL(ttm_bo_shrink_suitable); + +/** + * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU + * during shrinking + * + * In some situations, like direct reclaim, waiting (in particular gpu waiting) + * should be avoided since it may stall a system that could otherwise make progress + * shrinking something else less time consuming. + * + * Return: true if gpu waiting should be avoided, false if not. + */ +bool ttm_bo_shrink_avoid_wait(void) +{ + return !current_is_kswapd(); +} +EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait); |