summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2024-08-08 18:58:45 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2024-08-08 18:58:46 +0200
commit91dae758bdb854367bf0811d97acb84e791764d9 (patch)
tree127bead858c1321276754befd3fb385e55df70de /drivers/gpu/drm/ttm
parenta4172af3040cdc207f1b60efffcdd219156093c9 (diff)
parentd97e71e449373efbd2403f1d7a32d416599f32ac (diff)
downloadlwn-91dae758bdb854367bf0811d97acb84e791764d9.tar.gz
lwn-91dae758bdb854367bf0811d97acb84e791764d9.zip
Merge tag 'drm-misc-next-2024-08-01' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.12: UAPI Changes: virtio: - Define DRM capset Cross-subsystem Changes: dma-buf: - heaps: Clean up documentation printk: - Pass description to kmsg_dump() Core Changes: CI: - Update IGT tests - Point upstream repo to GitLab instance modesetting: - Introduce Power Saving Policy property for connectors - Add might_fault() to drm_modeset_lock priming - Add dynamic per-crtc vblank configuration support panic: - Avoid build-time interference with framebuffer console docs: - Document Colorspace property scheduler: - Remove full_recover from drm_sched_start TTM: - Make LRU walk restartable after dropping locks - Allow direct reclaim to allocate local memory Driver Changes: amdgpu: - Support Power Saving Policy connector property ast: - astdp: Support AST2600 with VGA; Clean up HPD bridge: - Silence error message on -EPROBE_DEFER - analogix: Clean aup - bridge-connector: Fix double free - lt6505: Disable interrupt when powered off - tc358767: Make default DP port preemphasis configurable gma500: - Update i2c terminology ivpu: - Add MODULE_FIRMWARE() lcdif: - Fix pixel clock loongson: - Use GEM refcount over TTM's mgag200: - Improve BMC handling - Support VBLANK intterupts nouveau: - Refactor and clean up internals - Use GEM refcount over TTM's panel: - Shutdown fixes plus documentation - Refactor several drivers for better code sharing - boe-th101mb31ig002: Support for starry-er88577 MIPI-DSI panel plus DT; Fix porch parameter - edp: Support AOU B116XTN02.3, AUO B116XAN06.1, AOU B116XAT04.1, BOE NV140WUM-N41, BOE NV133WUM-N63, BOE NV116WHM-A4D, CMN N116BCA-EA2, CMN N116BCP-EA2, CSW MNB601LS1-4 - himax-hx8394: Support Microchip AC40T08A MIPI Display panel plus DT - ilitek-ili9806e: Support Densitron DMT028VGHMCMI-1D TFT plus DT - jd9365da: Support Melfas lmfbx101117480 MIPI-DSI panel plus DT; Refactor for code sharing sti: - Fix module owner stm: - Avoid UAF wih managed plane and CRTC helpers - Fix module owner - Fix error handling in probe - Depend on COMMON_CLK - ltdc: Fix transparency after disabling plane; Remove unused interrupt tegra: - Call drm_atomic_helper_shutdown() v3d: - Clean up perfmon vkms: - Clean up Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20240801121406.GA102996@linux.fritz.box
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c460
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c251
7 files changed, 574 insertions, 327 deletions
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index d1b32303d051..f0a7eb62116c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -271,7 +271,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
@@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 9c2f13e53162..22260e7aea58 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -198,7 +198,7 @@ static void ttm_resource_fini_basic(struct kunit *test)
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2427be8bc97f..320592435252 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -506,150 +432,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- if (bo->pin_count) {
- *locked = false;
- if (busy)
- *busy = false;
- return false;
+ spin_lock(&bdev->lru_lock);
+ res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
} else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
- }
-
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
-
- if (!busy_bo || !ticket)
- return -EBUSY;
-
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
-
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
-
- return r == -EDEADLK ? -EBUSY : r;
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->ctx);
}
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ if (lret)
+ goto out;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
- }
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_unlock(&bdev->lru_lock);
+ return lret;
+}
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .ctx = ctx,
+ .ticket = ticket,
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ };
+ s64 lret;
+
+ evict_walk.walk.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ if (lret || !ticket)
+ goto out;
- ttm_bo_put(bo);
- return ret;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.trylock_only = false;
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -760,6 +688,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
@@ -769,22 +698,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK))
continue;
- do {
- ret = ttm_resource_alloc(bo, place, res);
- if (unlikely(ret && ret != -ENOSPC))
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res);
+ if (ret) {
+ if (ret != -ENOSPC)
return ret;
- if (likely(!ret) || !force_space)
- break;
-
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret == -EBUSY))
- break;
- if (unlikely(ret))
+ if (!may_evict)
+ continue;
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
- } while (1);
- if (ret)
- continue;
+ }
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -1118,12 +1046,24 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1131,28 +1071,29 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
@@ -1164,12 +1105,13 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ if (ret)
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -1179,30 +1121,54 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0b3f4267130c..3c07f4712d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -768,3 +768,154 @@ error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
+
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct ttm_operation_ctx *ctx = walk->ctx;
+
+ *needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ *needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (walk->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ else
+ ret = dma_resv_lock(resv, walk->ticket);
+
+ if (!ret) {
+ *needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ walk->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
+{
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
+ s64 progress = 0;
+ s64 lret;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ bool bo_needs_unlock = false;
+ bool bo_locked = false;
+ int mem_type;
+
+ /*
+ * Attempt a trylock before taking a reference on the bo,
+ * since if we do it the other way around, and the trylock fails,
+ * we need to drop the lru lock to put the bo.
+ */
+ if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ bo_locked = true;
+ else if (!walk->ticket || walk->ctx->no_wait_gpu ||
+ walk->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+
+ lret = 0;
+ if (!bo_locked)
+ lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ if (!lret && bo->resource && bo->resource->mem_type == mem_type)
+ lret = walk->ops->process_bo(walk, bo);
+
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ ttm_bo_put(bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+
+ spin_lock(&bdev->lru_lock);
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ ttm_resource_cursor_fini(&cursor);
+ spin_unlock(&bdev->lru_lock);
+
+ return progress;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 434cf0258000..e7cc4954c1bc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -148,35 +148,20 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo || bo->resource != res)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -274,14 +259,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..8504dbe19c1a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -91,7 +91,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4a66b851b67d..6d764ba88aab 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,68 @@
#include <drm/drm_util.h>
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -42,10 +104,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -57,6 +137,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -70,8 +151,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -84,14 +165,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -122,11 +227,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -155,7 +260,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -166,7 +271,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -197,9 +302,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -221,7 +326,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -390,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
};
struct dma_fence *fence;
int ret;
- unsigned i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&bdev->lru_lock);
- }
- }
- spin_unlock(&bdev->lru_lock);
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
@@ -460,53 +552,106 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
-
lockdep_assert_held(&man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
- return NULL;
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ ttm_resource_cursor_fini(cursor);
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}