summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMika Kuoppala <mika.kuoppala@linux.intel.com>2015-04-28 17:56:17 +0300
committerJani Nikula <jani.nikula@intel.com>2015-04-30 13:31:24 +0300
commit75d04a3773ecee617847de963ae4195d6aa74c28 (patch)
tree0461f2fe4b0bd57cd60175633be7ef4026db0488 /drivers
parent245054a1fe33c06ad233e0d58a27ec7b64db9284 (diff)
downloadlwn-75d04a3773ecee617847de963ae4195d6aa74c28.tar.gz
lwn-75d04a3773ecee617847de963ae4195d6aa74c28.zip
drm/i915/gtt: Allocate va range only if vma is not bound
When we have bound vma into an address space, the layout of page table structures is immutable. So we can be absolutely certain that if vma is already bound, there is no need to (re)allocate a virtual address range for it. v2: - add sanity checks and remove superfluous GLOBAL_BIND set - we might do update for an unbound vma (Chris) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90224 Testcase: igt/gem_exec_big #bdw Reported-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6fae6bdde156..9d3852c521c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1928,8 +1928,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, pte_flags);
-
- vma->bound |= GLOBAL_BIND;
}
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -2804,21 +2802,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret = 0;
- u32 bind_flags = 0;
-
- if (vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma->vm, vma->node.start,
- vma->node.size,
- VM_TO_TRACE_NAME(vma->vm));
+ int ret;
+ u32 bind_flags;
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+ bind_flags = 0;
if (flags & PIN_GLOBAL)
bind_flags |= GLOBAL_BIND;
if (flags & PIN_USER)
@@ -2829,8 +2819,23 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
else
bind_flags &= ~vma->bound;
- if (bind_flags)
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma->bound == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm,
+ vma->node.start,
+ vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;