summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c6
3 files changed, 16 insertions, 6 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index df4cf5468e7f..7726a72befc5 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -213,7 +213,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
+ ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 4db3982057be..cddb9151d20f 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -93,7 +93,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
- p = alloc_pages(gfp_flags, order);
+ p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
return p;
@@ -287,7 +287,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc)
+ if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
@@ -545,29 +545,32 @@ EXPORT_SYMBOL(ttm_pool_free);
*
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
+ * @nid: NUMA node to use for allocations
* @use_dma_alloc: true if coherent DMA alloc should be used
* @use_dma32: true if GFP_DMA32 should be used
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- bool use_dma_alloc, bool use_dma32)
+ int nid, bool use_dma_alloc, bool use_dma32)
{
unsigned int i, j;
WARN_ON(!dev && use_dma_alloc);
pool->dev = dev;
+ pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc) {
+ if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
}
+EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -581,7 +584,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc) {
+ if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
@@ -592,6 +595,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
*/
synchronize_shrinkers();
}
+EXPORT_SYMBOL(ttm_pool_fini);
/* As long as pages are available make sure to release at least one */
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 1ce4b36ab33b..e0a77671edd6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -449,3 +449,9 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);