summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c22
5 files changed, 108 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bf5ad5ca7826..3c5e5ff97fb6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3341,6 +3341,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pin_count);
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -3373,6 +3384,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
+
+ __i915_vma_iounmap(vma);
}
trace_i915_vma_unbind(vma);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ca0bf4bb31a7..870234394edd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3640,3 +3640,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size;
}
}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!vma->obj->map_and_fenceable))
+ return ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!vma->is_ggtt);
+ GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ vma->pin_count++;
+ return ptr;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d7dd3d8a8758..b0ae6632c01a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
+#include <linux/io-mapping.h>
+
struct drm_i915_file_private;
typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ void __iomem *iomap;
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
@@ -559,4 +562,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->pin_count == 0);
+ GEM_BUG_ON(vma->iomap == NULL);
+ vma->pin_count--;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..2643d18e41f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -386,17 +386,33 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
- unsigned long freed_pages;
+ struct i915_vma *vma, *next;
+ unsigned long freed_pages = 0;
+ int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
- freed_pages = i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
+ /* Force everything onto the inactive lists */
+ ret = i915_gpu_idle(dev_priv->dev);
+ if (ret)
+ goto out;
+
+ freed_pages += i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+
+ /* We also want to clear any cached iomaps as they wrap vmap */
+ list_for_each_entry_safe(vma, next,
+ &dev_priv->ggtt.base.inactive_list, vm_link) {
+ unsigned long count = vma->node.size >> PAGE_SHIFT;
+ if (vma->iomap && i915_vma_unbind(vma) == 0)
+ freed_pages += count;
+ }
+out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index c0092d9f9a6c..5a101a28063b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
- int size, ret;
bool prealloc = false;
+ void *vaddr;
+ int ret;
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
obj = intel_fb->obj;
- size = obj->base.size;
mutex_lock(&dev->struct_mutex);
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
+ vma = i915_gem_obj_to_ggtt(obj);
+
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
- info->fix.smem_len = size;
+ info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+ info->fix.smem_len = vma->node.size;
- info->screen_base =
- ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
- size);
- if (!info->screen_base) {
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
- ret = -ENOSPC;
+ ret = PTR_ERR(vaddr);
goto out_destroy_fbi;
}
- info->screen_size = size;
+ info->screen_base = vaddr;
+ info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;