summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
commit4bf772b14675411a69b3c807f73006de0fe4b649 (patch)
treeb841e3ba0e3429695589cb0ab73871fa12f42c38 /drivers/gpu/drm/ttm/ttm_bo.c
parent3879ae653a3e98380fe2daf653338830b7ca0097 (diff)
parent24b8ef699e8221d2b7f813adaab13eec053e1507 (diff)
downloadlwn-4bf772b14675411a69b3c807f73006de0fe4b649.tar.gz
lwn-4bf772b14675411a69b3c807f73006de0fe4b649.zip
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This seems to have been a comparatively quieter merge window, I assume due to holidays etc. The "biggest" change is AMD header cleanups, which merge/remove a bunch of them. The AMD gpu scheduler is now being made generic with the etnaviv driver wanting to reuse the code, hopefully other drivers can go in the same direction. Otherwise it's the usual lots of stuff in i915/amdgpu, not so much stuff elsewhere. Core: - Add .last_close and .output_poll_changed helpers to reduce driver footprints - Fix plane clipping - Improved debug printing support - Add panel orientation property - Update edid derived properties at edid setting - Reduction in fbdev driver footprint - Move amdgpu scheduler into core for other drivers to use. i915: - Selftest and IGT improvements - Fast boot prep work on IPS, pipe config - HW workarounds for Cannonlake, Geminilake - Cannonlake clock and HDMI2.0 fixes - GPU cache invalidation and context switch improvements - Display planes cleanup - New PMU interface for perf queries - New firmware support for KBL/SKL - Geminilake HW workaround for perforamce - Coffeelake stolen memory improvements - GPU reset robustness work - Cannonlake horizontal plane flipping - GVT work amdgpu/radeon: - RV and Vega header file cleanups (lots of lines gone!) - TTM operation context support - 48-bit GPUVM support for Vega/RV - ECC support for Vega - Resizeable BAR support - Multi-display sync support - Enable swapout for reserved BOs during allocation - S3 fixes on Raven - GPU reset cleanup and fixes - 2+1 level GPU page table amdkfd: - GFX7/8 SDMA user queues support - Hardware scheduling for multiple processes - dGPU prep work rcar: - Added R8A7743/5 support - System suspend/resume support sun4i: - Multi-plane support for YUV formats - A83T and LVDS support msm: - Devfreq support for GPU tegra: - Prep work for adding Tegra186 support - Tegra186 HDMI support - HDMI2.0 and zpos support by using generic helpers tilcdc: - Misc fixes omapdrm: - Support memory bandwidth limits - DSI command mode panel cleanups - DMM error handling exynos: - drop the old IPP subdriver. etnaviv: - Occlusion query fixes - Job handling fixes - Prep work for hooking in gpu scheduler armada: - Move closer to atomic modesetting - Allow disabling primary plane if overlay is full screen imx: - Format modifier support - Add tile prefetch to PRE - Runtime PM support for PRG ast: - fix LUT loading" * tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux: (1471 commits) drm/ast: Load lut in crtc_commit drm: Check for lessee in DROP_MASTER ioctl drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing dma-buf: fix reservation_object_wait_timeout_rcu once more v2 drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm: Fix PANEL_ORIENTATION_QUIRKS breaking the Kconfig DRM menuconfig drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo ...
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c341
1 files changed, 171 insertions, 170 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c088703777e2..893003fc76a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,11 +42,6 @@
#include <linux/atomic.h>
#include <linux/reservation.h>
-#define TTM_ASSERT_LOCKED(param)
-#define TTM_DEBUG(fmt, arg...)
-#define TTM_BO_HASH_ORDER 13
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
static struct attribute ttm_bo_count = {
@@ -165,7 +160,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -175,7 +170,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap,
&bo->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
@@ -216,7 +212,7 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
@@ -233,7 +229,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
int ret = 0;
uint32_t page_flags = 0;
- TTM_ASSERT_LOCKED(&bo->mutex);
+ reservation_object_assert_held(bo->resv);
bo->ttm = NULL;
if (bdev->need_dma32)
@@ -269,9 +265,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
}
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem,
- bool evict, bool interruptible,
- bool no_wait_gpu)
+ struct ttm_mem_reg *mem, bool evict,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -306,7 +301,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(bo->ttm, mem);
+ ret = ttm_tt_bind(bo->ttm, mem, ctx);
if (ret)
goto out_err;
}
@@ -325,12 +320,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, ctx, mem);
else if (bdev->driver->move)
- ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_gpu, mem);
+ ret = bdev->driver->move(bo, evict, ctx, mem);
else
- ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -355,13 +349,13 @@ moved:
bo->evicted = false;
}
- if (bo->mem.mm_node) {
+ if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
- bo->cur_placement = bo->mem.placement;
- } else
+ else
bo->offset = 0;
+ ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0;
out_err:
@@ -390,8 +384,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
-
- ww_mutex_unlock (&bo->resv->lock);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -448,7 +440,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
}
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, NULL);
+ ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
if (!ret) {
if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
@@ -457,6 +449,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
reservation_object_unlock(&bo->ttm_resv);
ttm_bo_cleanup_memtype_use(bo);
+ reservation_object_unlock(bo->resv);
return;
}
@@ -472,7 +465,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
@@ -487,20 +480,21 @@ error:
}
/**
- * function ttm_bo_cleanup_refs_and_unlock
+ * function ttm_bo_cleanup_refs
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
* Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
+ * will drop the lru lock and optionally the reservation lock before returning.
*
* @interruptible Any sleeps should occur interruptibly.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ * @unlock_resv Unlock the reservation lock as well.
*/
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait_gpu,
+ bool unlock_resv)
{
struct ttm_bo_global *glob = bo->glob;
struct reservation_object *resv;
@@ -518,7 +512,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
if (ret && !no_wait_gpu) {
long lret;
- ww_mutex_unlock(&bo->resv->lock);
+
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
lret = reservation_object_wait_timeout_rcu(resv, true,
@@ -531,24 +527,24 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return -EBUSY;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, NULL);
-
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- if (ret) {
+ if (unlock_resv && !reservation_object_trylock(bo->resv)) {
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
spin_unlock(&glob->lru_lock);
return 0;
}
+ ret = 0;
}
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- __ttm_bo_unreserve(bo);
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -560,6 +556,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
+
return 0;
}
@@ -567,60 +566,44 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
* encountered buffers.
*/
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry = NULL;
- int ret = 0;
+ struct list_head removed;
+ bool empty;
- spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- goto out_unlock;
+ INIT_LIST_HEAD(&removed);
- entry = list_first_entry(&bdev->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&entry->list_kref);
-
- for (;;) {
- struct ttm_buffer_object *nentry = NULL;
+ spin_lock(&glob->lru_lock);
+ while (!list_empty(&bdev->ddestroy)) {
+ struct ttm_buffer_object *bo;
- if (entry->ddestroy.next != &bdev->ddestroy) {
- nentry = list_first_entry(&entry->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&nentry->list_kref);
- }
+ bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
+ ddestroy);
+ kref_get(&bo->list_kref);
+ list_move_tail(&bo->ddestroy, &removed);
- ret = __ttm_bo_reserve(entry, false, true, NULL);
- if (remove_all && ret) {
+ if (remove_all || bo->resv != &bo->ttm_resv) {
spin_unlock(&glob->lru_lock);
- ret = __ttm_bo_reserve(entry, false, false, NULL);
+ reservation_object_lock(bo->resv, NULL);
+
spin_lock(&glob->lru_lock);
- }
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- if (!ret)
- ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
- !remove_all);
- else
+ } else if (reservation_object_trylock(bo->resv)) {
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ } else {
spin_unlock(&glob->lru_lock);
+ }
- kref_put(&entry->list_kref, ttm_bo_release_list);
- entry = nentry;
-
- if (ret || !entry)
- goto out;
-
+ kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock);
- if (list_empty(&entry->ddestroy))
- break;
}
-
-out_unlock:
+ list_splice_tail(&removed, &bdev->ddestroy);
+ empty = list_empty(&bdev->ddestroy);
spin_unlock(&glob->lru_lock);
-out:
- if (entry)
- kref_put(&entry->list_kref, ttm_bo_release_list);
- return ret;
+
+ return empty;
}
static void ttm_bo_delayed_workqueue(struct work_struct *work)
@@ -628,7 +611,7 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
struct ttm_bo_device *bdev =
container_of(work, struct ttm_bo_device, wq.work);
- if (ttm_bo_delayed_delete(bdev, false)) {
+ if (!ttm_bo_delayed_delete(bdev, false)) {
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
@@ -672,15 +655,15 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_evict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -690,8 +673,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
- ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -701,8 +683,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -728,49 +709,78 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+/**
+ * Check the target bo is allowable to be evicted or swapout, including cases:
+ *
+ * a. if share same reservation object with ctx->resv, have assumption
+ * reservation objects should already be locked, so not lock again and
+ * return true directly when either the opreation allow_reserved_eviction
+ * or the target bo already is in delayed free list;
+ *
+ * b. Otherwise, trylock it.
+ */
+static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx, bool *locked)
+{
+ bool ret = false;
+
+ *locked = false;
+ if (bo->resv == ctx->resv) {
+ reservation_object_assert_held(bo->resv);
+ if (ctx->allow_reserved_eviction || !list_empty(&bo->ddestroy))
+ ret = true;
+ } else {
+ *locked = reservation_object_trylock(bo->resv);
+ ret = *locked;
+ }
+
+ return ret;
+}
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- uint32_t mem_type,
- const struct ttm_place *place,
- bool interruptible,
- bool no_wait_gpu)
+ uint32_t mem_type,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_buffer_object *bo;
- int ret = -EBUSY;
+ struct ttm_buffer_object *bo = NULL;
+ bool locked = false;
unsigned i;
+ int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (ret)
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
continue;
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
+ if (locked)
+ reservation_object_unlock(bo->resv);
continue;
}
-
break;
}
- if (!ret)
+ /* If the inner loop terminated early, we have our candidate */
+ if (&bo->lru != &man->lru[i])
break;
+
+ bo = NULL;
}
- if (ret) {
+ if (!bo) {
spin_unlock(&glob->lru_lock);
- return ret;
+ return -EBUSY;
}
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
+ ctx->no_wait_gpu, locked);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -778,10 +788,14 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- BUG_ON(ret != 0);
-
- ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
- ttm_bo_unreserve(bo);
+ ret = ttm_bo_evict(bo, ctx);
+ if (locked) {
+ ttm_bo_unreserve(bo);
+ } else {
+ spin_lock(&glob->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -832,8 +846,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -845,8 +858,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place,
- interruptible, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -909,8 +921,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -1004,8 +1015,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1024,14 +1034,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
int ret = 0;
struct ttm_mem_reg mem;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1041,12 +1050,10 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1097,20 +1104,18 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement,
EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
int ret;
uint32_t new_flags;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
/*
* Check whether we need to move buffer.
*/
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
} else {
@@ -1139,7 +1144,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- bool interruptible,
+ struct ttm_operation_ctx *ctx,
struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
@@ -1151,7 +1156,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
@@ -1199,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->sg = sg;
if (resv) {
bo->resv = resv;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
} else {
bo->resv = &bo->ttm_resv;
}
@@ -1221,12 +1226,12 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = ww_mutex_trylock(&bo->resv->lock);
+ locked = reservation_object_trylock(bo->resv);
WARN_ON(!locked);
}
if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, ctx);
if (unlikely(ret)) {
if (!resv)
@@ -1259,10 +1264,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
- page_alignment, interruptible,
+ page_alignment, &ctx,
persistent_swap_storage, acc_size,
sg, resv, destroy);
if (ret)
@@ -1334,6 +1340,7 @@ EXPORT_SYMBOL(ttm_bo_create);
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
unsigned mem_type)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence;
@@ -1348,7 +1355,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1469,7 +1476,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
- ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
__free_page(glob->dummy_read_page);
kfree(glob);
}
@@ -1494,6 +1500,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1504,14 +1511,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
-
- ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
- ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
- if (unlikely(ret != 0)) {
- pr_err("Could not register buffer object swapout\n");
- goto out_no_shrink;
- }
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
@@ -1519,8 +1518,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
-out_no_shrink:
- __free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
@@ -1554,16 +1551,13 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
cancel_delayed_work_sync(&bdev->wq);
- while (ttm_bo_delayed_delete(bdev, true))
- ;
+ if (ttm_bo_delayed_delete(bdev, true))
+ pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- TTM_DEBUG("Delayed destroy list was clean\n");
-
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&bdev->man[0].lru[0]))
- TTM_DEBUG("Swap list %d was clean\n", i);
+ pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
@@ -1706,21 +1700,20 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_global *glob =
- container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
+ bool locked;
unsigned i;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret)
+ if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
+ ret = 0;
break;
+ }
}
if (!ret)
break;
@@ -1734,7 +1727,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ ret = ttm_bo_cleanup_refs(bo, false, false, true);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1748,6 +1741,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
if (bo->mem.mem_type != TTM_PL_SYSTEM ||
bo->ttm->caching_state != tt_cached) {
+ struct ttm_operation_ctx ctx = { false, false };
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
@@ -1755,8 +1749,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
evict_mem.mem_type = TTM_PL_SYSTEM;
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
if (unlikely(ret != 0))
goto out;
}
@@ -1787,15 +1780,21 @@ out:
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
-
- __ttm_bo_unreserve(bo);
+ if (locked)
+ reservation_object_unlock(bo->resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
- while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
@@ -1822,10 +1821,12 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
- ret = __ttm_bo_reserve(bo, true, false, NULL);
+ ret = reservation_object_lock_interruptible(bo->resv, NULL);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
if (unlikely(ret != 0))
goto out_unlock;
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
out_unlock:
mutex_unlock(&bo->wu_mutex);