summaryrefslogtreecommitdiff
path: root/include/net/page_pool/helpers.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/page_pool/helpers.h')
-rw-r--r--include/net/page_pool/helpers.h82
1 files changed, 60 insertions, 22 deletions
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 60a5347922be..582a3d00cbe2 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -104,8 +104,7 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
*
* Get a page fragment from the page allocator or page_pool caches.
*
- * Return:
- * Return allocated page fragment, otherwise return NULL.
+ * Return: allocated page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
@@ -116,22 +115,22 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
-static inline struct page *page_pool_alloc(struct page_pool *pool,
- unsigned int *offset,
- unsigned int *size, gfp_t gfp)
+static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
{
unsigned int max_size = PAGE_SIZE << pool->p.order;
- struct page *page;
+ netmem_ref netmem;
if ((*size << 1) > max_size) {
*size = max_size;
*offset = 0;
- return page_pool_alloc_pages(pool, gfp);
+ return page_pool_alloc_netmems(pool, gfp);
}
- page = page_pool_alloc_frag(pool, offset, *size, gfp);
- if (unlikely(!page))
- return NULL;
+ netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
+ if (unlikely(!netmem))
+ return 0;
/* There is very likely not enough space for another fragment, so append
* the remaining size to the current fragment to avoid truesize
@@ -142,7 +141,23 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
pool->frag_offset = max_size;
}
- return page;
+ return netmem;
+}
+
+static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmem(pool, offset, size, gfp);
+}
+
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
}
/**
@@ -155,8 +170,7 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
* depending on the requested size in order to allocate memory with least memory
* utilization and performance penalty.
*
- * Return:
- * Return allocated page or page fragment, otherwise return NULL.
+ * Return: allocated page or page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
unsigned int *offset,
@@ -190,8 +204,7 @@ static inline void *page_pool_alloc_va(struct page_pool *pool,
* This is just a thin wrapper around the page_pool_alloc() API, and
* it returns va of the allocated page or page fragment.
*
- * Return:
- * Return the va for the allocated page or page fragment, otherwise return NULL.
+ * Return: the va for the allocated page or page fragment, otherwise return NULL.
*/
static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
unsigned int *size)
@@ -302,7 +315,7 @@ static inline void page_pool_ref_page(struct page *page)
page_pool_ref_netmem(page_to_netmem(page));
}
-static inline bool page_pool_is_last_ref(netmem_ref netmem)
+static inline bool page_pool_unref_and_test(netmem_ref netmem)
{
/* If page_pool_unref_page() returns 0, we were the last user */
return page_pool_unref_netmem(netmem, 1) == 0;
@@ -317,7 +330,7 @@ static inline void page_pool_put_netmem(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_ref(netmem))
+ if (!page_pool_unref_and_test(netmem))
return;
page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
@@ -418,7 +431,21 @@ static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
*/
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
- return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
+static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
+ const dma_addr_t dma_addr,
+ u32 offset, u32 dma_sync_size)
+{
+ dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
+ offset + pool->p.offset, dma_sync_size,
+ page_pool_get_dma_dir(pool));
}
/**
@@ -437,10 +464,21 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
const struct page *page,
u32 offset, u32 dma_sync_size)
{
- dma_sync_single_range_for_cpu(pool->p.dev,
- page_pool_get_dma_addr(page),
- offset + pool->p.offset, dma_sync_size,
- page_pool_get_dma_dir(pool));
+ __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
+ dma_sync_size);
+}
+
+static inline void
+page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
+ const netmem_ref netmem, u32 offset,
+ u32 dma_sync_size)
+{
+ if (!pool->dma_sync_for_cpu)
+ return;
+
+ __page_pool_dma_sync_for_cpu(pool,
+ page_pool_get_dma_addr_netmem(netmem),
+ offset, dma_sync_size);
}
static inline bool page_pool_put(struct page_pool *pool)