summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-11-22 13:24:40 +1000
committerDave Airlie <airlied@redhat.com>2010-11-22 13:24:40 +1000
commitd6ea88865d3e5b0c62040531310c1f2c6a994f46 (patch)
treeb80a7cbc6eeab003b412e3037fd335ce9d572f67 /drivers/gpu/drm/ttm
parent27641c3f003e7f3b6585c01d8a788883603eb262 (diff)
downloadlwn-d6ea88865d3e5b0c62040531310c1f2c6a994f46.tar.gz
lwn-d6ea88865d3e5b0c62040531310c1f2c6a994f46.zip
drm/ttm: Add a bo list reserve fastpath (v2)
Makes it possible to reserve a list of buffer objects with a single spin lock / unlock if there is no contention. Should improve cpu usage on SMP kernels. v2: Initialize private list members on reserve and don't call ttm_bo_list_ref_sub() with zero put_count. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c124
2 files changed, 129 insertions, 27 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 148a322d8f5d..a586378b1b2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
int put_count = 0;
@@ -267,6 +263,15 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
BUG();
}
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ while (count--)
+ kref_put(&bo->list_kref,
+ (never_free || (count >= 0)) ? ttm_bo_ref_bug :
+ ttm_bo_release_list);
+}
+
int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,8 +287,7 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return ret;
}
@@ -496,8 +500,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return;
} else {
@@ -580,8 +583,7 @@ retry:
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return 0;
}
@@ -802,8 +804,7 @@ retry:
BUG_ON(ret != 0);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
@@ -1783,8 +1784,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
/**
* Wait for GPU, then move to system cached.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..201a71d111ec 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,6 +32,72 @@
#include <linux/sched.h>
#include <linux/module.h>
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
+ entry->reserved = false;
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
+ }
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ spin_lock(&glob->lru_lock);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -61,35 +127,71 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
{
+ struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+
retry:
+ spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- entry->reserved = false;
- ret = ttm_bo_reserve(bo, true, false, true, val_seq);
- if (ret != 0) {
- ttm_eu_backoff_reservation(list);
- if (ret == -EAGAIN) {
- ret = ttm_bo_wait_unreserved(bo, true);
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- } else
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
return ret;
+ goto retry;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation(list);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_cpu(bo, false);
if (ret)
return ret;
goto retry;
}
}
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);