summaryrefslogtreecommitdiff
path: root/include/net/page_pool.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/page_pool.h')
-rw-r--r--include/net/page_pool.h134
1 files changed, 104 insertions, 30 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index f1d5cc1fa13b..73d4f786418d 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -70,47 +70,76 @@ struct pp_alloc_cache {
struct page *cache[PP_ALLOC_CACHE_SIZE];
};
+/**
+ * struct page_pool_params - page pool parameters
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @order: 2^order pages on allocation
+ * @pool_size: size of the ptr_ring
+ * @nid: NUMA node id to allocate from pages from
+ * @dev: device, for DMA pre-mapping purposes
+ * @napi: NAPI which is the sole consumer of pages, otherwise NULL
+ * @dma_dir: DMA mapping direction
+ * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
+ * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ */
struct page_pool_params {
unsigned int flags;
unsigned int order;
unsigned int pool_size;
- int nid; /* Numa node id to allocate from pages from */
- struct device *dev; /* device, for DMA pre-mapping purposes */
- struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
- enum dma_data_direction dma_dir; /* DMA mapping direction */
- unsigned int max_len; /* max DMA sync memory size */
- unsigned int offset; /* DMA addr offset */
+ int nid;
+ struct device *dev;
+ struct napi_struct *napi;
+ enum dma_data_direction dma_dir;
+ unsigned int max_len;
+ unsigned int offset;
+/* private: used by test code only */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
};
#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * struct page_pool_alloc_stats - allocation statistics
+ * @fast: successful fast path allocations
+ * @slow: slow path order-0 allocations
+ * @slow_high_order: slow path high order allocations
+ * @empty: ptr ring is empty, so a slow path allocation was forced
+ * @refill: an allocation which triggered a refill of the cache
+ * @waive: pages obtained from the ptr ring that cannot be added to
+ * the cache due to a NUMA mismatch
+ */
struct page_pool_alloc_stats {
- u64 fast; /* fast path allocations */
- u64 slow; /* slow-path order 0 allocations */
- u64 slow_high_order; /* slow-path high order allocations */
- u64 empty; /* failed refills due to empty ptr ring, forcing
- * slow path allocation
- */
- u64 refill; /* allocations via successful refill */
- u64 waive; /* failed refills due to numa zone mismatch */
+ u64 fast;
+ u64 slow;
+ u64 slow_high_order;
+ u64 empty;
+ u64 refill;
+ u64 waive;
};
+/**
+ * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @cached: recycling placed page in the page pool cache
+ * @cache_full: page pool cache was full
+ * @ring: page placed into the ptr ring
+ * @ring_full: page released from page pool because the ptr ring was full
+ * @released_refcnt: page released (and not recycled) because refcnt > 1
+ */
struct page_pool_recycle_stats {
- u64 cached; /* recycling placed page in the cache. */
- u64 cache_full; /* cache was full */
- u64 ring; /* recycling placed page back into ptr ring */
- u64 ring_full; /* page was released from page-pool because
- * PTR ring was full.
- */
- u64 released_refcnt; /* page released because of elevated
- * refcnt
- */
+ u64 cached;
+ u64 cache_full;
+ u64 ring;
+ u64 ring_full;
+ u64 released_refcnt;
};
-/* This struct wraps the above stats structs so users of the
- * page_pool_get_stats API can pass a single argument when requesting the
- * stats for the page pool.
+/**
+ * struct page_pool_stats - combined page pool use statistics
+ * @alloc_stats: see struct page_pool_alloc_stats
+ * @recycle_stats: see struct page_pool_recycle_stats
+ *
+ * Wrapper struct for combining page pool stats with different storage
+ * requirements.
*/
struct page_pool_stats {
struct page_pool_alloc_stats alloc_stats;
@@ -211,6 +240,12 @@ struct page_pool {
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+/**
+ * page_pool_dev_alloc_pages() - allocate a page.
+ * @pool: pool from which to allocate
+ *
+ * Get a page from the page allocator or page_pool caches.
+ */
static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
{
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -230,8 +265,12 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
-/* get the stored dma direction. A driver might decide to treat this locally and
- * avoid the extra cache line from page_pool to determine the direction
+/**
+ * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
+ * @pool: pool from which page was allocated
+ *
+ * Get the stored dma direction. A driver might decide to store this locally
+ * and avoid the extra cache line from page_pool to determine the direction.
*/
static
inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
@@ -321,6 +360,19 @@ static inline bool page_pool_is_last_frag(struct page_pool *pool,
(page_pool_defrag_page(page, 1) == 0);
}
+/**
+ * page_pool_put_page() - release a reference to a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @dma_sync_size: how much of the page may have been touched by the device
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * The outcome of this depends on the page refcnt. If the driver bumps
+ * the refcnt > 1 this will unmap the page. If the page refcnt is 1
+ * the allocator owns the page and will try to recycle it in one of the pool
+ * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
+ * using dma_sync_single_range_for_device().
+ */
static inline void page_pool_put_page(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size,
@@ -337,14 +389,29 @@ static inline void page_pool_put_page(struct page_pool *pool,
#endif
}
-/* Same as above but will try to sync the entire area pool->max_len */
+/**
+ * page_pool_put_full_page() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * Similar to page_pool_put_page(), but will DMA sync the entire memory area
+ * as configured in &page_pool_params.max_len.
+ */
static inline void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
page_pool_put_page(pool, page, -1, allow_direct);
}
-/* Same as above but the caller must guarantee safe context. e.g NAPI */
+/**
+ * page_pool_recycle_direct() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ *
+ * Similar to page_pool_put_full_page() but caller must guarantee safe context
+ * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
+ */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
@@ -354,6 +421,13 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
(sizeof(dma_addr_t) > sizeof(unsigned long))
+/**
+ * page_pool_get_dma_addr() - Retrieve the stored DMA address.
+ * @page: page allocated from a page pool
+ *
+ * Fetch the DMA address of the page. The page pool to which the page belongs
+ * must had been created with PP_FLAG_DMA_MAP.
+ */
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
dma_addr_t ret = page->dma_addr;