diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_pool.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_pool.c | 718 |
1 files changed, 612 insertions, 106 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 8504dbe19c1a..83b10706ba89 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -41,12 +41,20 @@ #include <asm/set_memory.h> #endif +#include <drm/ttm/ttm_backup.h> #include <drm/ttm/ttm_pool.h> #include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_bo.h> #include "ttm_module.h" +#ifdef CONFIG_FAULT_INJECTION +#include <linux/fault-inject.h> +static DECLARE_FAULT_ATTR(backup_fault_inject); +#else +#define should_fail(...) false +#endif + /** * struct ttm_pool_dma - Helper object for coherent DMA mappings * @@ -58,6 +66,52 @@ struct ttm_pool_dma { unsigned long vaddr; }; +/** + * struct ttm_pool_alloc_state - Current state of the tt page allocation process + * @pages: Pointer to the next tt page pointer to populate. + * @caching_divide: Pointer to the first page pointer whose page has a staged but + * not committed caching transition from write-back to @tt_caching. + * @dma_addr: Pointer to the next tt dma_address entry to populate if any. + * @remaining_pages: Remaining pages to populate. + * @tt_caching: The requested cpu-caching for the pages allocated. + */ +struct ttm_pool_alloc_state { + struct page **pages; + struct page **caching_divide; + dma_addr_t *dma_addr; + pgoff_t remaining_pages; + enum ttm_caching tt_caching; +}; + +/** + * struct ttm_pool_tt_restore - State representing restore from backup + * @pool: The pool used for page allocation while restoring. + * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state. + * @alloced_page: Pointer to the page most recently allocated from a pool or system. + * @first_dma: The dma address corresponding to @alloced_page if dma_mapping + * is requested. + * @alloced_pages: The number of allocated pages present in the struct ttm_tt + * page vector from this restore session. + * @restored_pages: The number of 4K pages restored for @alloced_page (which + * is typically a multi-order page). + * @page_caching: The struct ttm_tt requested caching + * @order: The order of @alloced_page. + * + * Recovery from backup might fail when we've recovered less than the + * full ttm_tt. In order not to loose any data (yet), keep information + * around that allows us to restart a failed ttm backup recovery. + */ +struct ttm_pool_tt_restore { + struct ttm_pool *pool; + struct ttm_pool_alloc_state snapshot_alloc; + struct page *alloced_page; + dma_addr_t first_dma; + pgoff_t alloced_pages; + pgoff_t restored_pages; + enum ttm_caching page_caching; + unsigned int order; +}; + static unsigned long page_pool_size; MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); @@ -160,34 +214,33 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, kfree(dma); } -/* Apply a new caching to an array of pages */ -static int ttm_pool_apply_caching(struct page **first, struct page **last, - enum ttm_caching caching) +/* Apply any cpu-caching deferred during page allocation */ +static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc) { #ifdef CONFIG_X86 - unsigned int num_pages = last - first; + unsigned int num_pages = alloc->pages - alloc->caching_divide; if (!num_pages) return 0; - switch (caching) { + switch (alloc->tt_caching) { case ttm_cached: break; case ttm_write_combined: - return set_pages_array_wc(first, num_pages); + return set_pages_array_wc(alloc->caching_divide, num_pages); case ttm_uncached: - return set_pages_array_uc(first, num_pages); + return set_pages_array_uc(alloc->caching_divide, num_pages); } #endif + alloc->caching_divide = alloc->pages; return 0; } -/* Map pages of 1 << order size and fill the DMA address array */ +/* DMA Map pages of 1 << order size and return the resulting dma_address. */ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, - struct page *p, dma_addr_t **dma_addr) + struct page *p, dma_addr_t *dma_addr) { dma_addr_t addr; - unsigned int i; if (pool->use_dma_alloc) { struct ttm_pool_dma *dma = (void *)p->private; @@ -201,10 +254,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, return -EFAULT; } - for (i = 1 << order; i ; --i) { - *(*dma_addr)++ = addr; - addr += PAGE_SIZE; - } + *dma_addr = addr; return 0; } @@ -354,24 +404,235 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) return p->private; } -/* Called when we got a page, either from a pool or newly allocated */ +/* + * Split larger pages so that we can free each PAGE_SIZE page as soon + * as it has been backed up, in order to avoid memory pressure during + * reclaim. + */ +static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p) +{ + unsigned int order = ttm_pool_page_order(pool, p); + pgoff_t nr; + + if (!order) + return; + + split_page(p, order); + nr = 1UL << order; + while (nr--) + (p++)->private = 0; +} + +/** + * DOC: Partial backup and restoration of a struct ttm_tt. + * + * Swapout using ttm_backup_backup_page() and swapin using + * ttm_backup_copy_page() may fail. + * The former most likely due to lack of swap-space or memory, the latter due + * to lack of memory or because of signal interruption during waits. + * + * Backup failure is easily handled by using a ttm_tt pages vector that holds + * both backup handles and page pointers. This has to be taken into account when + * restoring such a ttm_tt from backup, and when freeing it while backed up. + * When restoring, for simplicity, new pages are actually allocated from the + * pool and the contents of any old pages are copied in and then the old pages + * are released. + * + * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state + * to be able to resume an interrupted restore, and that structure is freed once + * the restoration is complete. If the struct ttm_tt is destroyed while there + * is a valid struct ttm_pool_tt_restore attached, that is also properly taken + * care of. + */ + +/* Is restore ongoing for the currently allocated page? */ +static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) +{ + return restore && restore->restored_pages < (1 << restore->order); +} + +/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */ +static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page, + const dma_addr_t *dma_addr, enum ttm_caching caching) +{ + struct ttm_pool_type *pt = NULL; + unsigned int order; + pgoff_t nr; + + if (pool) { + order = ttm_pool_page_order(pool, page); + nr = (1UL << order); + if (dma_addr) + ttm_pool_unmap(pool, *dma_addr, nr); + + pt = ttm_pool_select_type(pool, caching, order); + } else { + order = page->private; + nr = (1UL << order); + } + + if (pt) + ttm_pool_type_give(pt, page); + else + ttm_pool_free_page(pool, caching, order, page); + + return nr; +} + +/* Populate the page-array using the most recent allocated multi-order page. */ +static void ttm_pool_allocated_page_commit(struct page *allocated, + dma_addr_t first_dma, + struct ttm_pool_alloc_state *alloc, + pgoff_t nr) +{ + pgoff_t i; + + for (i = 0; i < nr; ++i) + *alloc->pages++ = allocated++; + + alloc->remaining_pages -= nr; + + if (!alloc->dma_addr) + return; + + for (i = 0; i < nr; ++i) { + *alloc->dma_addr++ = first_dma; + first_dma += PAGE_SIZE; + } +} + +/* + * When restoring, restore backed-up content to the newly allocated page and + * if successful, populate the page-table and dma-address arrays. + */ +static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, + struct ttm_backup *backup, + const struct ttm_operation_ctx *ctx, + struct ttm_pool_alloc_state *alloc) + +{ + pgoff_t i, nr = 1UL << restore->order; + struct page **first_page = alloc->pages; + struct page *p; + int ret = 0; + + for (i = restore->restored_pages; i < nr; ++i) { + p = first_page[i]; + if (ttm_backup_page_ptr_is_handle(p)) { + unsigned long handle = ttm_backup_page_ptr_to_handle(p); + + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible && + should_fail(&backup_fault_inject, 1)) { + ret = -EINTR; + break; + } + + if (handle == 0) { + restore->restored_pages++; + continue; + } + + ret = ttm_backup_copy_page(backup, restore->alloced_page + i, + handle, ctx->interruptible); + if (ret) + break; + + ttm_backup_drop(backup, handle); + } else if (p) { + /* + * We could probably avoid splitting the old page + * using clever logic, but ATM we don't care, as + * we prioritize releasing memory ASAP. Note that + * here, the old retained page is always write-back + * cached. + */ + ttm_pool_split_for_swap(restore->pool, p); + copy_highpage(restore->alloced_page + i, p); + __free_pages(p, 0); + } + + restore->restored_pages++; + first_page[i] = ttm_backup_handle_to_page_ptr(0); + } + + if (ret) { + if (!restore->restored_pages) { + dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; + + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, + dma_addr, restore->page_caching); + restore->restored_pages = nr; + } + return ret; + } + + ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma, + alloc, nr); + if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page)) + alloc->caching_divide = alloc->pages; + restore->snapshot_alloc = *alloc; + restore->alloced_pages += nr; + + return 0; +} + +/* If restoring, save information needed for ttm_pool_restore_commit(). */ +static void +ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, + struct page *p, + enum ttm_caching page_caching, + dma_addr_t first_dma, + struct ttm_pool_tt_restore *restore, + const struct ttm_pool_alloc_state *alloc) +{ + restore->pool = pool; + restore->order = order; + restore->restored_pages = 0; + restore->page_caching = page_caching; + restore->first_dma = first_dma; + restore->alloced_page = p; + restore->snapshot_alloc = *alloc; +} + +/* + * Called when we got a page, either from a pool or newly allocated. + * if needed, dma map the page and populate the dma address array. + * Populate the page address array. + * If the caching is consistent, update any deferred caching. Otherwise + * stage this page for an upcoming deferred caching update. + */ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, - struct page *p, dma_addr_t **dma_addr, - unsigned long *num_pages, - struct page ***pages) + struct page *p, enum ttm_caching page_caching, + struct ttm_pool_alloc_state *alloc, + struct ttm_pool_tt_restore *restore) { - unsigned int i; - int r; + bool caching_consistent; + dma_addr_t first_dma; + int r = 0; + + caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p); - if (*dma_addr) { - r = ttm_pool_map(pool, order, p, dma_addr); + if (caching_consistent) { + r = ttm_pool_apply_caching(alloc); if (r) return r; } - *num_pages -= 1 << order; - for (i = 1 << order; i; --i, ++(*pages), ++p) - **pages = p; + if (alloc->dma_addr) { + r = ttm_pool_map(pool, order, p, &first_dma); + if (r) + return r; + } + + if (restore) { + ttm_pool_page_allocated_restore(pool, order, p, page_caching, + first_dma, restore, alloc); + } else { + ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order); + + if (caching_consistent) + alloc->caching_divide = alloc->pages; + } return 0; } @@ -394,53 +655,62 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, pgoff_t start_page, pgoff_t end_page) { struct page **pages = &tt->pages[start_page]; - unsigned int order; + struct ttm_backup *backup = tt->backup; pgoff_t i, nr; for (i = start_page; i < end_page; i += nr, pages += nr) { - struct ttm_pool_type *pt = NULL; + struct page *p = *pages; - order = ttm_pool_page_order(pool, *pages); - nr = (1UL << order); - if (tt->dma_address) - ttm_pool_unmap(pool, tt->dma_address[i], nr); + nr = 1; + if (ttm_backup_page_ptr_is_handle(p)) { + unsigned long handle = ttm_backup_page_ptr_to_handle(p); - pt = ttm_pool_select_type(pool, caching, order); - if (pt) - ttm_pool_type_give(pt, *pages); - else - ttm_pool_free_page(pool, caching, order, *pages); + if (handle != 0) + ttm_backup_drop(backup, handle); + } else if (p) { + dma_addr_t *dma_addr = tt->dma_address ? + tt->dma_address + i : NULL; + + nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching); + } } } -/** - * ttm_pool_alloc - Fill a ttm_tt object - * - * @pool: ttm_pool to use - * @tt: ttm_tt object to fill - * @ctx: operation context - * - * Fill the ttm_tt object with pages and also make sure to DMA map them when - * necessary. - * - * Returns: 0 on successe, negative error code otherwise. +static void ttm_pool_alloc_state_init(const struct ttm_tt *tt, + struct ttm_pool_alloc_state *alloc) +{ + alloc->pages = tt->pages; + alloc->caching_divide = tt->pages; + alloc->dma_addr = tt->dma_address; + alloc->remaining_pages = tt->num_pages; + alloc->tt_caching = tt->caching; +} + +/* + * Find a suitable allocation order based on highest desired order + * and number of remaining pages */ -int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, - struct ttm_operation_ctx *ctx) +static unsigned int ttm_pool_alloc_find_order(unsigned int highest, + const struct ttm_pool_alloc_state *alloc) +{ + return min_t(unsigned int, highest, __fls(alloc->remaining_pages)); +} + +static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx, + struct ttm_pool_alloc_state *alloc, + struct ttm_pool_tt_restore *restore) { - pgoff_t num_pages = tt->num_pages; - dma_addr_t *dma_addr = tt->dma_address; - struct page **caching = tt->pages; - struct page **pages = tt->pages; enum ttm_caching page_caching; gfp_t gfp_flags = GFP_USER; pgoff_t caching_divide; unsigned int order; + bool allow_pools; struct page *p; int r; - WARN_ON(!num_pages || ttm_tt_is_populated(tt)); - WARN_ON(dma_addr && !pool->dev); + WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt)); + WARN_ON(alloc->dma_addr && !pool->dev); if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; @@ -453,86 +723,155 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, else gfp_flags |= GFP_HIGHUSER; - for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages)); - num_pages; - order = min_t(unsigned int, order, __fls(num_pages))) { + page_caching = tt->caching; + allow_pools = true; + for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); + alloc->remaining_pages; + order = ttm_pool_alloc_find_order(order, alloc)) { struct ttm_pool_type *pt; - page_caching = tt->caching; - pt = ttm_pool_select_type(pool, tt->caching, order); - p = pt ? ttm_pool_type_take(pt) : NULL; - if (p) { - r = ttm_pool_apply_caching(caching, pages, - tt->caching); - if (r) - goto error_free_page; - - caching = pages; - do { - r = ttm_pool_page_allocated(pool, order, p, - &dma_addr, - &num_pages, - &pages); - if (r) - goto error_free_page; - - caching = pages; - if (num_pages < (1 << order)) - break; - - p = ttm_pool_type_take(pt); - } while (p); - } - - page_caching = ttm_cached; - while (num_pages >= (1 << order) && - (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { - - if (PageHighMem(p)) { - r = ttm_pool_apply_caching(caching, pages, - tt->caching); - if (r) - goto error_free_page; - caching = pages; - } - r = ttm_pool_page_allocated(pool, order, p, &dma_addr, - &num_pages, &pages); - if (r) - goto error_free_page; - if (PageHighMem(p)) - caching = pages; + /* First, try to allocate a page from a pool if one exists. */ + p = NULL; + pt = ttm_pool_select_type(pool, page_caching, order); + if (pt && allow_pools) + p = ttm_pool_type_take(pt); + /* + * If that fails or previously failed, allocate from system. + * Note that this also disallows additional pool allocations using + * write-back cached pools of the same order. Consider removing + * that behaviour. + */ + if (!p) { + page_caching = ttm_cached; + allow_pools = false; + p = ttm_pool_alloc_page(pool, gfp_flags, order); } - + /* If that fails, lower the order if possible and retry. */ if (!p) { if (order) { --order; + page_caching = tt->caching; + allow_pools = true; continue; } r = -ENOMEM; goto error_free_all; } + r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, + restore); + if (r) + goto error_free_page; + + if (ttm_pool_restore_valid(restore)) { + r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc); + if (r) + goto error_free_all; + } } - r = ttm_pool_apply_caching(caching, pages, tt->caching); + r = ttm_pool_apply_caching(alloc); if (r) goto error_free_all; + kfree(tt->restore); + tt->restore = NULL; + return 0; error_free_page: ttm_pool_free_page(pool, page_caching, order, p); error_free_all: - num_pages = tt->num_pages - num_pages; - caching_divide = caching - tt->pages; + if (tt->restore) + return r; + + caching_divide = alloc->caching_divide - tt->pages; ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); - ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages); + ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, + tt->num_pages - alloc->remaining_pages); return r; } + +/** + * ttm_pool_alloc - Fill a ttm_tt object + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx) +{ + struct ttm_pool_alloc_state alloc; + + if (WARN_ON(ttm_tt_is_backed_up(tt))) + return -EINVAL; + + ttm_pool_alloc_state_init(tt, &alloc); + + return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL); +} EXPORT_SYMBOL(ttm_pool_alloc); /** + * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up + * content. + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. Read in backed-up content. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx) +{ + struct ttm_pool_alloc_state alloc; + + if (WARN_ON(!ttm_tt_is_backed_up(tt))) + return -EINVAL; + + if (!tt->restore) { + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + + ttm_pool_alloc_state_init(tt, &alloc); + if (ctx->gfp_retry_mayfail) + gfp |= __GFP_RETRY_MAYFAIL; + + tt->restore = kzalloc(sizeof(*tt->restore), gfp); + if (!tt->restore) + return -ENOMEM; + + tt->restore->snapshot_alloc = alloc; + tt->restore->pool = pool; + tt->restore->restored_pages = 1; + } else { + struct ttm_pool_tt_restore *restore = tt->restore; + int ret; + + alloc = restore->snapshot_alloc; + if (ttm_pool_restore_valid(tt->restore)) { + ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc); + if (ret) + return ret; + } + if (!alloc.remaining_pages) + return 0; + } + + return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore); +} + +/** * ttm_pool_free - Free the backing pages from a ttm_tt object * * @pool: Pool to give pages back to. @@ -550,6 +889,169 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) EXPORT_SYMBOL(ttm_pool_free); /** + * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt + * @tt: The struct ttm_tt. + * + * Release handles with associated content or any remaining pages of + * a backed-up struct ttm_tt. + */ +void ttm_pool_drop_backed_up(struct ttm_tt *tt) +{ + struct ttm_pool_tt_restore *restore; + pgoff_t start_page = 0; + + WARN_ON(!ttm_tt_is_backed_up(tt)); + + restore = tt->restore; + + /* + * Unmap and free any uncommitted restore page. + * any tt page-array backup entries already read back has + * been cleared already + */ + if (ttm_pool_restore_valid(restore)) { + dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; + + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, + dma_addr, restore->page_caching); + restore->restored_pages = 1UL << restore->order; + } + + /* + * If a restore is ongoing, part of the tt pages may have a + * caching different than writeback. + */ + if (restore) { + pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; + + start_page = restore->alloced_pages; + WARN_ON(mid > start_page); + /* Pages that might be dma-mapped and non-cached */ + ttm_pool_free_range(restore->pool, tt, tt->caching, + 0, mid); + /* Pages that might be dma-mapped but cached */ + ttm_pool_free_range(restore->pool, tt, ttm_cached, + mid, restore->alloced_pages); + kfree(restore); + tt->restore = NULL; + } + + ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages); +} + +/** + * ttm_pool_backup() - Back up or purge a struct ttm_tt + * @pool: The pool used when allocating the struct ttm_tt. + * @tt: The struct ttm_tt. + * @flags: Flags to govern the backup behaviour. + * + * Back up or purge a struct ttm_tt. If @purge is true, then + * all pages will be freed directly to the system rather than to the pool + * they were allocated from, making the function behave similarly to + * ttm_pool_free(). If @purge is false the pages will be backed up instead, + * exchanged for handles. + * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and + * a subsequent call to ttm_pool_drop_backed_up() will drop it. + * If backup of a page fails for whatever reason, @ttm will still be + * partially backed up, retaining those pages for which backup fails. + * In that case, this function can be retried, possibly after freeing up + * memory resources. + * + * Return: Number of pages actually backed up or freed, or negative + * error code on error. + */ +long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_backup_flags *flags) +{ + struct ttm_backup *backup = tt->backup; + struct page *page; + unsigned long handle; + gfp_t alloc_gfp; + gfp_t gfp; + int ret = 0; + pgoff_t shrunken = 0; + pgoff_t i, num_pages; + + if (WARN_ON(ttm_tt_is_backed_up(tt))) + return -EINVAL; + + if ((!ttm_backup_bytes_avail() && !flags->purge) || + pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) + return -EBUSY; + +#ifdef CONFIG_X86 + /* Anything returned to the system needs to be cached. */ + if (tt->caching != ttm_cached) + set_pages_array_wb(tt->pages, tt->num_pages); +#endif + + if (tt->dma_address || flags->purge) { + for (i = 0; i < tt->num_pages; i += num_pages) { + unsigned int order; + + page = tt->pages[i]; + if (unlikely(!page)) { + num_pages = 1; + continue; + } + + order = ttm_pool_page_order(pool, page); + num_pages = 1UL << order; + if (tt->dma_address) + ttm_pool_unmap(pool, tt->dma_address[i], + num_pages); + if (flags->purge) { + shrunken += num_pages; + page->private = 0; + __free_pages(page, order); + memset(tt->pages + i, 0, + num_pages * sizeof(*tt->pages)); + } + } + } + + if (flags->purge) + return shrunken; + + if (pool->use_dma32) + gfp = GFP_DMA32; + else + gfp = GFP_HIGHUSER; + + alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; + + num_pages = tt->num_pages; + + /* Pretend doing fault injection by shrinking only half of the pages. */ + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1)) + num_pages = DIV_ROUND_UP(num_pages, 2); + + for (i = 0; i < num_pages; ++i) { + s64 shandle; + + page = tt->pages[i]; + if (unlikely(!page)) + continue; + + ttm_pool_split_for_swap(pool, page); + + shandle = ttm_backup_backup_page(backup, page, flags->writeback, i, + gfp, alloc_gfp); + if (shandle < 0) { + /* We allow partially shrunken tts */ + ret = shandle; + break; + } + handle = shandle; + tt->pages[i] = ttm_backup_handle_to_page_ptr(handle); + put_page(page); + shrunken++; + } + + return shrunken ? shrunken : ret; +} + +/** * ttm_pool_init - Initialize a pool * * @pool: the pool to initialize @@ -810,6 +1312,10 @@ int ttm_pool_mgr_init(unsigned long num_pages) &ttm_pool_debugfs_globals_fops); debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, &ttm_pool_debugfs_shrink_fops); +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root, + &backup_fault_inject); +#endif #endif mm_shrinker = shrinker_alloc(0, "drm-ttm_pool"); |