summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2015-07-09 14:19:30 -0400
committerDave Airlie <airlied@redhat.com>2015-07-17 18:18:04 +1000
commite9308884878942c618a42cab122df80fbf458dc9 (patch)
tree2f03adf139e17a71725c97f6a88a18affa5f24be /drivers/gpu/drm/ttm
parentef2b731759b9ae6380c9728b60ce92cf32683e94 (diff)
downloadlwn-e9308884878942c618a42cab122df80fbf458dc9.tar.gz
lwn-e9308884878942c618a42cab122df80fbf458dc9.zip
drm/ttm: improve uncached page deallocation.
Calls to set_memory_wb() incure heavy TLB flush and IPI cost. To minimize those wait until pool grow beyond batch size before draining the pool. Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com> Reviewed-and-Tested-by: Michel Dänzer <michel@daenzer.net> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index af230802b90b..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,13 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
- if (pool->npages_free > _manager->options.max_size) {
+ /*
+ * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
+ * to free in order to minimize calls to set_memory_wb().
+ */
+ if (pool->npages_free >= (_manager->options.max_size +
+ NUM_PAGES_TO_ALLOC))
npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
}
spin_unlock_irqrestore(&pool->lock, irq_flags);