summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c207
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c250
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c718
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c83
7 files changed, 1183 insertions, 130 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index dad298127226..40d07a35293a 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,7 @@
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
- ttm_device.o ttm_sys_manager.o
+ ttm_device.o ttm_sys_manager.o ttm_backup.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
new file mode 100644
index 000000000000..93c007f18855
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_backup.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+
+/*
+ * Casting from randomized struct file * to struct ttm_backup * is fine since
+ * struct ttm_backup is never defined nor dereferenced.
+ */
+static struct file *ttm_backup_to_file(struct ttm_backup *backup)
+{
+ return (void *)backup;
+}
+
+static struct ttm_backup *ttm_file_to_backup(struct file *file)
+{
+ return (void *)file;
+}
+
+/*
+ * Need to map shmem indices to handle since a handle value
+ * of 0 means error, following the swp_entry_t convention.
+ */
+static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
+{
+ return (unsigned long)idx + 1;
+}
+
+static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
+{
+ return handle - 1;
+}
+
+/**
+ * ttm_backup_drop() - release memory associated with a handle
+ * @backup: The struct backup pointer used to obtain the handle
+ * @handle: The handle obtained from the @backup_page function.
+ */
+void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+{
+ loff_t start = ttm_backup_handle_to_shmem_idx(handle);
+
+ start <<= PAGE_SHIFT;
+ shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ start + PAGE_SIZE - 1);
+}
+
+/**
+ * ttm_backup_copy_page() - Copy the contents of a previously backed
+ * up page
+ * @backup: The struct backup pointer used to back up the page.
+ * @dst: The struct page to copy into.
+ * @handle: The handle returned when the page was backed up.
+ * @intr: Try to perform waits interruptable or at least killable.
+ *
+ * Return: 0 on success, Negative error code on failure, notably
+ * -EINTR if @intr was set to true and a signal is pending.
+ */
+int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+ pgoff_t handle, bool intr)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ struct folio *from_folio;
+ pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
+
+ from_folio = shmem_read_folio(mapping, idx);
+ if (IS_ERR(from_folio))
+ return PTR_ERR(from_folio);
+
+ copy_highpage(dst, folio_file_page(from_folio, idx));
+ folio_put(from_folio);
+
+ return 0;
+}
+
+/**
+ * ttm_backup_backup_page() - Backup a page
+ * @backup: The struct backup pointer to use.
+ * @page: The page to back up.
+ * @writeback: Whether to perform immediate writeback of the page.
+ * This may have performance implications.
+ * @idx: A unique integer for each page and each struct backup.
+ * This allows the backup implementation to avoid managing
+ * its address space separately.
+ * @page_gfp: The gfp value used when the page was allocated.
+ * This is used for accounting purposes.
+ * @alloc_gfp: The gfp to be used when allocating memory.
+ *
+ * Context: If called from reclaim context, the caller needs to
+ * assert that the shrinker gfp has __GFP_FS set, to avoid
+ * deadlocking on lock_page(). If @writeback is set to true and
+ * called from reclaim context, the caller also needs to assert
+ * that the shrinker gfp has __GFP_IO set, since without it,
+ * we're not allowed to start backup IO.
+ *
+ * Return: A handle on success. Negative error code on failure.
+ *
+ * Note: This function could be extended to back up a folio and
+ * implementations would then split the folio internally if needed.
+ * Drawback is that the caller would then have to keep track of
+ * the folio size- and usage.
+ */
+s64
+ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ unsigned long handle = 0;
+ struct folio *to_folio;
+ int ret;
+
+ to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
+ if (IS_ERR(to_folio))
+ return PTR_ERR(to_folio);
+
+ folio_mark_accessed(to_folio);
+ folio_lock(to_folio);
+ folio_mark_dirty(to_folio);
+ copy_highpage(folio_file_page(to_folio, idx), page);
+ handle = ttm_backup_shmem_idx_to_handle(idx);
+
+ if (writeback && !folio_mapped(to_folio) &&
+ folio_clear_dirty_for_io(to_folio)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ folio_set_reclaim(to_folio);
+ ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ if (!folio_test_writeback(to_folio))
+ folio_clear_reclaim(to_folio);
+ /*
+ * If writepage succeeds, it unlocks the folio.
+ * writepage() errors are otherwise dropped, since writepage()
+ * is only best effort here.
+ */
+ if (ret)
+ folio_unlock(to_folio);
+ } else {
+ folio_unlock(to_folio);
+ }
+
+ folio_put(to_folio);
+
+ return handle;
+}
+
+/**
+ * ttm_backup_fini() - Free the struct backup resources after last use.
+ * @backup: Pointer to the struct backup whose resources to free.
+ *
+ * After a call to this function, it's illegal to use the @backup pointer.
+ */
+void ttm_backup_fini(struct ttm_backup *backup)
+{
+ fput(ttm_backup_to_file(backup));
+}
+
+/**
+ * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
+ * left for backup.
+ *
+ * This function is intended also for driver use to indicate whether a
+ * backup attempt is meaningful.
+ *
+ * Return: An approximate size of backup space available.
+ */
+u64 ttm_backup_bytes_avail(void)
+{
+ /*
+ * The idea behind backing up to shmem is that shmem objects may
+ * eventually be swapped out. So no point swapping out if there
+ * is no or low swap-space available. But the accuracy of this
+ * number also depends on shmem actually swapping out backed-up
+ * shmem objects without too much buffering.
+ */
+ return (u64)get_nr_swap_pages() << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
+
+/**
+ * ttm_backup_shmem_create() - Create a shmem-based struct backup.
+ * @size: The maximum size (in bytes) to back up.
+ *
+ * Create a backup utilizing shmem objects.
+ *
+ * Return: A pointer to a struct ttm_backup on success,
+ * an error pointer on error.
+ */
+struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+{
+ struct file *filp;
+
+ filp = shmem_file_setup("ttm shmem backup", size, 0);
+
+ return ttm_file_to_backup(filp);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea5e49858857..95b86003c50d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -451,7 +451,8 @@ int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man
int ret = 0;
spin_lock(&bdev->lru_lock);
- res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_init(&cursor, man);
+ res = ttm_resource_manager_first(&cursor);
ttm_resource_cursor_fini(&cursor);
if (!res) {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d939925efa81..15cab9bda17f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,7 +28,7 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-
+#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <drm/ttm/ttm_bo.h>
@@ -769,12 +769,10 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
- struct ttm_operation_ctx *ctx = walk->ctx;
-
*needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
@@ -865,7 +863,8 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
s64 lret;
spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
+ ttm_resource_cursor_init(&cursor, man);
+ ttm_resource_manager_for_each_res(&cursor, res) {
struct ttm_buffer_object *bo = res->bo;
bool bo_needs_unlock = false;
bool bo_locked = false;
@@ -876,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
* since if we do it the other way around, and the trylock fails,
* we need to drop the lru lock to put the bo.
*/
- if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
bo_locked = true;
else if (!walk->ticket || walk->ctx->no_wait_gpu ||
walk->trylock_only)
@@ -919,3 +918,242 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
return progress;
}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @ctx: The ttm_operation_ctx to govern the locking.
+ *
+ * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
+ * or prelocked buffer objects are available as detailed by
+ * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
+ * supported.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->ctx = ctx;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = res->bo;
+
+ if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
+ return NULL;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ return NULL;
+ }
+
+ curs->bo = bo;
+ return bo;
+}
+
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ if (!res)
+ break;
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ if (bo)
+ break;
+ }
+
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+
+ spin_lock(lru_lock);
+ res = ttm_resource_manager_first(&curs->res_curs);
+ if (!res) {
+ spin_unlock(lru_lock);
+ return NULL;
+ }
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ spin_unlock(lru_lock);
+
+ return bo ? bo : ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
+}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 8504dbe19c1a..83b10706ba89 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -41,12 +41,20 @@
#include <asm/set_memory.h>
#endif
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h>
+static DECLARE_FAULT_ATTR(backup_fault_inject);
+#else
+#define should_fail(...) false
+#endif
+
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -58,6 +66,52 @@ struct ttm_pool_dma {
unsigned long vaddr;
};
+/**
+ * struct ttm_pool_alloc_state - Current state of the tt page allocation process
+ * @pages: Pointer to the next tt page pointer to populate.
+ * @caching_divide: Pointer to the first page pointer whose page has a staged but
+ * not committed caching transition from write-back to @tt_caching.
+ * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
+ * @remaining_pages: Remaining pages to populate.
+ * @tt_caching: The requested cpu-caching for the pages allocated.
+ */
+struct ttm_pool_alloc_state {
+ struct page **pages;
+ struct page **caching_divide;
+ dma_addr_t *dma_addr;
+ pgoff_t remaining_pages;
+ enum ttm_caching tt_caching;
+};
+
+/**
+ * struct ttm_pool_tt_restore - State representing restore from backup
+ * @pool: The pool used for page allocation while restoring.
+ * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
+ * @alloced_page: Pointer to the page most recently allocated from a pool or system.
+ * @first_dma: The dma address corresponding to @alloced_page if dma_mapping
+ * is requested.
+ * @alloced_pages: The number of allocated pages present in the struct ttm_tt
+ * page vector from this restore session.
+ * @restored_pages: The number of 4K pages restored for @alloced_page (which
+ * is typically a multi-order page).
+ * @page_caching: The struct ttm_tt requested caching
+ * @order: The order of @alloced_page.
+ *
+ * Recovery from backup might fail when we've recovered less than the
+ * full ttm_tt. In order not to loose any data (yet), keep information
+ * around that allows us to restart a failed ttm backup recovery.
+ */
+struct ttm_pool_tt_restore {
+ struct ttm_pool *pool;
+ struct ttm_pool_alloc_state snapshot_alloc;
+ struct page *alloced_page;
+ dma_addr_t first_dma;
+ pgoff_t alloced_pages;
+ pgoff_t restored_pages;
+ enum ttm_caching page_caching;
+ unsigned int order;
+};
+
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
@@ -160,34 +214,33 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
kfree(dma);
}
-/* Apply a new caching to an array of pages */
-static int ttm_pool_apply_caching(struct page **first, struct page **last,
- enum ttm_caching caching)
+/* Apply any cpu-caching deferred during page allocation */
+static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
{
#ifdef CONFIG_X86
- unsigned int num_pages = last - first;
+ unsigned int num_pages = alloc->pages - alloc->caching_divide;
if (!num_pages)
return 0;
- switch (caching) {
+ switch (alloc->tt_caching) {
case ttm_cached:
break;
case ttm_write_combined:
- return set_pages_array_wc(first, num_pages);
+ return set_pages_array_wc(alloc->caching_divide, num_pages);
case ttm_uncached:
- return set_pages_array_uc(first, num_pages);
+ return set_pages_array_uc(alloc->caching_divide, num_pages);
}
#endif
+ alloc->caching_divide = alloc->pages;
return 0;
}
-/* Map pages of 1 << order size and fill the DMA address array */
+/* DMA Map pages of 1 << order size and return the resulting dma_address. */
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr)
+ struct page *p, dma_addr_t *dma_addr)
{
dma_addr_t addr;
- unsigned int i;
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
@@ -201,10 +254,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
return -EFAULT;
}
- for (i = 1 << order; i ; --i) {
- *(*dma_addr)++ = addr;
- addr += PAGE_SIZE;
- }
+ *dma_addr = addr;
return 0;
}
@@ -354,24 +404,235 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
-/* Called when we got a page, either from a pool or newly allocated */
+/*
+ * Split larger pages so that we can free each PAGE_SIZE page as soon
+ * as it has been backed up, in order to avoid memory pressure during
+ * reclaim.
+ */
+static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p)
+{
+ unsigned int order = ttm_pool_page_order(pool, p);
+ pgoff_t nr;
+
+ if (!order)
+ return;
+
+ split_page(p, order);
+ nr = 1UL << order;
+ while (nr--)
+ (p++)->private = 0;
+}
+
+/**
+ * DOC: Partial backup and restoration of a struct ttm_tt.
+ *
+ * Swapout using ttm_backup_backup_page() and swapin using
+ * ttm_backup_copy_page() may fail.
+ * The former most likely due to lack of swap-space or memory, the latter due
+ * to lack of memory or because of signal interruption during waits.
+ *
+ * Backup failure is easily handled by using a ttm_tt pages vector that holds
+ * both backup handles and page pointers. This has to be taken into account when
+ * restoring such a ttm_tt from backup, and when freeing it while backed up.
+ * When restoring, for simplicity, new pages are actually allocated from the
+ * pool and the contents of any old pages are copied in and then the old pages
+ * are released.
+ *
+ * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state
+ * to be able to resume an interrupted restore, and that structure is freed once
+ * the restoration is complete. If the struct ttm_tt is destroyed while there
+ * is a valid struct ttm_pool_tt_restore attached, that is also properly taken
+ * care of.
+ */
+
+/* Is restore ongoing for the currently allocated page? */
+static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
+{
+ return restore && restore->restored_pages < (1 << restore->order);
+}
+
+/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
+static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
+ const dma_addr_t *dma_addr, enum ttm_caching caching)
+{
+ struct ttm_pool_type *pt = NULL;
+ unsigned int order;
+ pgoff_t nr;
+
+ if (pool) {
+ order = ttm_pool_page_order(pool, page);
+ nr = (1UL << order);
+ if (dma_addr)
+ ttm_pool_unmap(pool, *dma_addr, nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ } else {
+ order = page->private;
+ nr = (1UL << order);
+ }
+
+ if (pt)
+ ttm_pool_type_give(pt, page);
+ else
+ ttm_pool_free_page(pool, caching, order, page);
+
+ return nr;
+}
+
+/* Populate the page-array using the most recent allocated multi-order page. */
+static void ttm_pool_allocated_page_commit(struct page *allocated,
+ dma_addr_t first_dma,
+ struct ttm_pool_alloc_state *alloc,
+ pgoff_t nr)
+{
+ pgoff_t i;
+
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = allocated++;
+
+ alloc->remaining_pages -= nr;
+
+ if (!alloc->dma_addr)
+ return;
+
+ for (i = 0; i < nr; ++i) {
+ *alloc->dma_addr++ = first_dma;
+ first_dma += PAGE_SIZE;
+ }
+}
+
+/*
+ * When restoring, restore backed-up content to the newly allocated page and
+ * if successful, populate the page-table and dma-address arrays.
+ */
+static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
+ struct ttm_backup *backup,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc)
+
+{
+ pgoff_t i, nr = 1UL << restore->order;
+ struct page **first_page = alloc->pages;
+ struct page *p;
+ int ret = 0;
+
+ for (i = restore->restored_pages; i < nr; ++i) {
+ p = first_page[i];
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
+ should_fail(&backup_fault_inject, 1)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (handle == 0) {
+ restore->restored_pages++;
+ continue;
+ }
+
+ ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
+ handle, ctx->interruptible);
+ if (ret)
+ break;
+
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ /*
+ * We could probably avoid splitting the old page
+ * using clever logic, but ATM we don't care, as
+ * we prioritize releasing memory ASAP. Note that
+ * here, the old retained page is always write-back
+ * cached.
+ */
+ ttm_pool_split_for_swap(restore->pool, p);
+ copy_highpage(restore->alloced_page + i, p);
+ __free_pages(p, 0);
+ }
+
+ restore->restored_pages++;
+ first_page[i] = ttm_backup_handle_to_page_ptr(0);
+ }
+
+ if (ret) {
+ if (!restore->restored_pages) {
+ dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = nr;
+ }
+ return ret;
+ }
+
+ ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
+ alloc, nr);
+ if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
+ alloc->caching_divide = alloc->pages;
+ restore->snapshot_alloc = *alloc;
+ restore->alloced_pages += nr;
+
+ return 0;
+}
+
+/* If restoring, save information needed for ttm_pool_restore_commit(). */
+static void
+ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
+ struct page *p,
+ enum ttm_caching page_caching,
+ dma_addr_t first_dma,
+ struct ttm_pool_tt_restore *restore,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ restore->pool = pool;
+ restore->order = order;
+ restore->restored_pages = 0;
+ restore->page_caching = page_caching;
+ restore->first_dma = first_dma;
+ restore->alloced_page = p;
+ restore->snapshot_alloc = *alloc;
+}
+
+/*
+ * Called when we got a page, either from a pool or newly allocated.
+ * if needed, dma map the page and populate the dma address array.
+ * Populate the page address array.
+ * If the caching is consistent, update any deferred caching. Otherwise
+ * stage this page for an upcoming deferred caching update.
+ */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr,
- unsigned long *num_pages,
- struct page ***pages)
+ struct page *p, enum ttm_caching page_caching,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- unsigned int i;
- int r;
+ bool caching_consistent;
+ dma_addr_t first_dma;
+ int r = 0;
+
+ caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
- if (*dma_addr) {
- r = ttm_pool_map(pool, order, p, dma_addr);
+ if (caching_consistent) {
+ r = ttm_pool_apply_caching(alloc);
if (r)
return r;
}
- *num_pages -= 1 << order;
- for (i = 1 << order; i; --i, ++(*pages), ++p)
- **pages = p;
+ if (alloc->dma_addr) {
+ r = ttm_pool_map(pool, order, p, &first_dma);
+ if (r)
+ return r;
+ }
+
+ if (restore) {
+ ttm_pool_page_allocated_restore(pool, order, p, page_caching,
+ first_dma, restore, alloc);
+ } else {
+ ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
+
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
+ }
return 0;
}
@@ -394,53 +655,62 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- unsigned int order;
+ struct ttm_backup *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
- struct ttm_pool_type *pt = NULL;
+ struct page *p = *pages;
- order = ttm_pool_page_order(pool, *pages);
- nr = (1UL << order);
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], nr);
+ nr = 1;
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
- pt = ttm_pool_select_type(pool, caching, order);
- if (pt)
- ttm_pool_type_give(pt, *pages);
- else
- ttm_pool_free_page(pool, caching, order, *pages);
+ if (handle != 0)
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ dma_addr_t *dma_addr = tt->dma_address ?
+ tt->dma_address + i : NULL;
+
+ nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
+ }
}
}
-/**
- * ttm_pool_alloc - Fill a ttm_tt object
- *
- * @pool: ttm_pool to use
- * @tt: ttm_tt object to fill
- * @ctx: operation context
- *
- * Fill the ttm_tt object with pages and also make sure to DMA map them when
- * necessary.
- *
- * Returns: 0 on successe, negative error code otherwise.
+static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
+ struct ttm_pool_alloc_state *alloc)
+{
+ alloc->pages = tt->pages;
+ alloc->caching_divide = tt->pages;
+ alloc->dma_addr = tt->dma_address;
+ alloc->remaining_pages = tt->num_pages;
+ alloc->tt_caching = tt->caching;
+}
+
+/*
+ * Find a suitable allocation order based on highest desired order
+ * and number of remaining pages
*/
-int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx)
+static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
+}
+
+static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- pgoff_t num_pages = tt->num_pages;
- dma_addr_t *dma_addr = tt->dma_address;
- struct page **caching = tt->pages;
- struct page **pages = tt->pages;
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
unsigned int order;
+ bool allow_pools;
struct page *p;
int r;
- WARN_ON(!num_pages || ttm_tt_is_populated(tt));
- WARN_ON(dma_addr && !pool->dev);
+ WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc->dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
@@ -453,86 +723,155 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
- num_pages;
- order = min_t(unsigned int, order, __fls(num_pages))) {
+ page_caching = tt->caching;
+ allow_pools = true;
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
+ alloc->remaining_pages;
+ order = ttm_pool_alloc_find_order(order, alloc)) {
struct ttm_pool_type *pt;
- page_caching = tt->caching;
- pt = ttm_pool_select_type(pool, tt->caching, order);
- p = pt ? ttm_pool_type_take(pt) : NULL;
- if (p) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
-
- caching = pages;
- do {
- r = ttm_pool_page_allocated(pool, order, p,
- &dma_addr,
- &num_pages,
- &pages);
- if (r)
- goto error_free_page;
-
- caching = pages;
- if (num_pages < (1 << order))
- break;
-
- p = ttm_pool_type_take(pt);
- } while (p);
- }
-
- page_caching = ttm_cached;
- while (num_pages >= (1 << order) &&
- (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
-
- if (PageHighMem(p)) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
- caching = pages;
- }
- r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
- &num_pages, &pages);
- if (r)
- goto error_free_page;
- if (PageHighMem(p))
- caching = pages;
+ /* First, try to allocate a page from a pool if one exists. */
+ p = NULL;
+ pt = ttm_pool_select_type(pool, page_caching, order);
+ if (pt && allow_pools)
+ p = ttm_pool_type_take(pt);
+ /*
+ * If that fails or previously failed, allocate from system.
+ * Note that this also disallows additional pool allocations using
+ * write-back cached pools of the same order. Consider removing
+ * that behaviour.
+ */
+ if (!p) {
+ page_caching = ttm_cached;
+ allow_pools = false;
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
}
-
+ /* If that fails, lower the order if possible and retry. */
if (!p) {
if (order) {
--order;
+ page_caching = tt->caching;
+ allow_pools = true;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
+ restore);
+ if (r)
+ goto error_free_page;
+
+ if (ttm_pool_restore_valid(restore)) {
+ r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
+ if (r)
+ goto error_free_all;
+ }
}
- r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ r = ttm_pool_apply_caching(alloc);
if (r)
goto error_free_all;
+ kfree(tt->restore);
+ tt->restore = NULL;
+
return 0;
error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
- num_pages = tt->num_pages - num_pages;
- caching_divide = caching - tt->pages;
+ if (tt->restore)
+ return r;
+
+ caching_divide = alloc->caching_divide - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
- ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
+ tt->num_pages - alloc->remaining_pages);
return r;
}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
+}
EXPORT_SYMBOL(ttm_pool_alloc);
/**
+ * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up
+ * content.
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary. Read in backed-up content.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(!ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if (!tt->restore) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+ if (ctx->gfp_retry_mayfail)
+ gfp |= __GFP_RETRY_MAYFAIL;
+
+ tt->restore = kzalloc(sizeof(*tt->restore), gfp);
+ if (!tt->restore)
+ return -ENOMEM;
+
+ tt->restore->snapshot_alloc = alloc;
+ tt->restore->pool = pool;
+ tt->restore->restored_pages = 1;
+ } else {
+ struct ttm_pool_tt_restore *restore = tt->restore;
+ int ret;
+
+ alloc = restore->snapshot_alloc;
+ if (ttm_pool_restore_valid(tt->restore)) {
+ ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ret)
+ return ret;
+ }
+ if (!alloc.remaining_pages)
+ return 0;
+ }
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+}
+
+/**
* ttm_pool_free - Free the backing pages from a ttm_tt object
*
* @pool: Pool to give pages back to.
@@ -550,6 +889,169 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
EXPORT_SYMBOL(ttm_pool_free);
/**
+ * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt
+ * @tt: The struct ttm_tt.
+ *
+ * Release handles with associated content or any remaining pages of
+ * a backed-up struct ttm_tt.
+ */
+void ttm_pool_drop_backed_up(struct ttm_tt *tt)
+{
+ struct ttm_pool_tt_restore *restore;
+ pgoff_t start_page = 0;
+
+ WARN_ON(!ttm_tt_is_backed_up(tt));
+
+ restore = tt->restore;
+
+ /*
+ * Unmap and free any uncommitted restore page.
+ * any tt page-array backup entries already read back has
+ * been cleared already
+ */
+ if (ttm_pool_restore_valid(restore)) {
+ dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = 1UL << restore->order;
+ }
+
+ /*
+ * If a restore is ongoing, part of the tt pages may have a
+ * caching different than writeback.
+ */
+ if (restore) {
+ pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
+
+ start_page = restore->alloced_pages;
+ WARN_ON(mid > start_page);
+ /* Pages that might be dma-mapped and non-cached */
+ ttm_pool_free_range(restore->pool, tt, tt->caching,
+ 0, mid);
+ /* Pages that might be dma-mapped but cached */
+ ttm_pool_free_range(restore->pool, tt, ttm_cached,
+ mid, restore->alloced_pages);
+ kfree(restore);
+ tt->restore = NULL;
+ }
+
+ ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
+}
+
+/**
+ * ttm_pool_backup() - Back up or purge a struct ttm_tt
+ * @pool: The pool used when allocating the struct ttm_tt.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags to govern the backup behaviour.
+ *
+ * Back up or purge a struct ttm_tt. If @purge is true, then
+ * all pages will be freed directly to the system rather than to the pool
+ * they were allocated from, making the function behave similarly to
+ * ttm_pool_free(). If @purge is false the pages will be backed up instead,
+ * exchanged for handles.
+ * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and
+ * a subsequent call to ttm_pool_drop_backed_up() will drop it.
+ * If backup of a page fails for whatever reason, @ttm will still be
+ * partially backed up, retaining those pages for which backup fails.
+ * In that case, this function can be retried, possibly after freeing up
+ * memory resources.
+ *
+ * Return: Number of pages actually backed up or freed, or negative
+ * error code on error.
+ */
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_backup_flags *flags)
+{
+ struct ttm_backup *backup = tt->backup;
+ struct page *page;
+ unsigned long handle;
+ gfp_t alloc_gfp;
+ gfp_t gfp;
+ int ret = 0;
+ pgoff_t shrunken = 0;
+ pgoff_t i, num_pages;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if ((!ttm_backup_bytes_avail() && !flags->purge) ||
+ pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
+ return -EBUSY;
+
+#ifdef CONFIG_X86
+ /* Anything returned to the system needs to be cached. */
+ if (tt->caching != ttm_cached)
+ set_pages_array_wb(tt->pages, tt->num_pages);
+#endif
+
+ if (tt->dma_address || flags->purge) {
+ for (i = 0; i < tt->num_pages; i += num_pages) {
+ unsigned int order;
+
+ page = tt->pages[i];
+ if (unlikely(!page)) {
+ num_pages = 1;
+ continue;
+ }
+
+ order = ttm_pool_page_order(pool, page);
+ num_pages = 1UL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i],
+ num_pages);
+ if (flags->purge) {
+ shrunken += num_pages;
+ page->private = 0;
+ __free_pages(page, order);
+ memset(tt->pages + i, 0,
+ num_pages * sizeof(*tt->pages));
+ }
+ }
+ }
+
+ if (flags->purge)
+ return shrunken;
+
+ if (pool->use_dma32)
+ gfp = GFP_DMA32;
+ else
+ gfp = GFP_HIGHUSER;
+
+ alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
+
+ num_pages = tt->num_pages;
+
+ /* Pretend doing fault injection by shrinking only half of the pages. */
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
+ num_pages = DIV_ROUND_UP(num_pages, 2);
+
+ for (i = 0; i < num_pages; ++i) {
+ s64 shandle;
+
+ page = tt->pages[i];
+ if (unlikely(!page))
+ continue;
+
+ ttm_pool_split_for_swap(pool, page);
+
+ shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
+ gfp, alloc_gfp);
+ if (shandle < 0) {
+ /* We allow partially shrunken tts */
+ ret = shandle;
+ break;
+ }
+ handle = shandle;
+ tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
+ put_page(page);
+ shrunken++;
+ }
+
+ return shrunken ? shrunken : ret;
+}
+
+/**
* ttm_pool_init - Initialize a pool
*
* @pool: the pool to initialize
@@ -810,6 +1312,10 @@ int ttm_pool_mgr_init(unsigned long num_pages)
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
+ &backup_fault_inject);
+#endif
#endif
mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index cc29bbf3eabb..7e5a60c55813 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -83,6 +83,23 @@ static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
}
/**
+ * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
+ * @cursor: The cursor to initialize.
+ * @man: The resource manager.
+ *
+ * Initialize the cursor before using it for iteration.
+ */
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man)
+{
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ INIT_LIST_HEAD(&cursor->hitch.link);
+}
+
+/**
* ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
* @cursor: The struct ttm_resource_cursor to finalize.
*
@@ -252,11 +269,16 @@ static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_
return ttm_tt_is_swapped(bo->ttm);
}
+static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ return bo->pin_count || ttm_resource_is_swapped(res, bo);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
@@ -264,7 +286,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
@@ -276,10 +298,10 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo)) {
+ if (ttm_resource_unevictable(res, bo)) {
list_move_tail(&res->lru.link, &bdev->unevictable);
- } else if (bo->bulk_move) {
+ } else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
ttm_lru_bulk_move_pos(bo->bulk_move, res);
@@ -318,7 +340,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo))
+ if (ttm_resource_unevictable(res, bo))
list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
list_add_tail(&res->lru.link, &man->lru[bo->priority]);
@@ -612,7 +634,6 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
/**
* ttm_resource_manager_first() - Start iterating over the resources
* of a resource manager
- * @man: resource manager to iterate over
* @cursor: cursor to record the position
*
* Initializes the cursor and starts iterating. When done iterating,
@@ -621,17 +642,16 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
* Return: The first resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor)
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor)
{
- lockdep_assert_held(&man->bdev->lru_lock);
+ struct ttm_resource_manager *man = cursor->man;
- cursor->priority = 0;
- cursor->man = man;
- ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
- INIT_LIST_HEAD(&cursor->bulk_link);
- list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
+ if (WARN_ON_ONCE(!man))
+ return NULL;
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
return ttm_resource_manager_next(cursor);
}
@@ -667,8 +687,6 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
ttm_resource_cursor_clear_bulk(cursor);
}
- ttm_resource_cursor_fini(cursor);
-
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3baf215eca23..df0aa6c4b8b8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -40,6 +40,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -158,6 +159,8 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
@@ -182,6 +185,13 @@ void ttm_tt_fini(struct ttm_tt *ttm)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
+ }
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -254,6 +264,49 @@ out_err:
EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
+
+/**
* ttm_tt_swapout - swap out tt object
*
* @bdev: TTM device structure.
@@ -348,6 +401,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
@@ -477,3 +531,32 @@ unsigned long ttm_tt_pages_limit(void)
return ttm_pages_limit;
}
EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct ttm_backup *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);