diff options
author | David S. Miller <davem@davemloft.net> | 2019-06-19 11:23:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-06-19 11:23:14 -0400 |
commit | 2a54003e7af1eaddc05848dac14f7bcd77301478 (patch) | |
tree | 31644496760e3272d55f8248ef49eed16c32ec1d /net | |
parent | 9371a56f7101cc3f12b57db4bfbb6159205211f4 (diff) | |
parent | f71fec47c2df704c7081f946d7e46fe036a4208b (diff) | |
download | lwn-2a54003e7af1eaddc05848dac14f7bcd77301478.tar.gz lwn-2a54003e7af1eaddc05848dac14f7bcd77301478.zip |
Merge branch 'xdp-page_pool-fixes-and-in-flight-accounting'
Jesper Dangaard Brouer says:
====================
xdp: page_pool fixes and in-flight accounting
This patchset fix page_pool API and users, such that drivers can use it for
DMA-mapping. A number of places exist, where the DMA-mapping would not get
released/unmapped, all these are fixed. This occurs e.g. when an xdp_frame
gets converted to an SKB. As network stack doesn't have any callback for XDP
memory models.
The patchset also address a shutdown race-condition. Today removing a XDP
memory model, based on page_pool, is only delayed one RCU grace period. This
isn't enough as redirected xdp_frames can still be in-flight on different
queues (remote driver TX, cpumap or veth).
We stress that when drivers use page_pool for DMA-mapping, then they MUST
use one packet per page. This might change in the future, but more work lies
ahead, before we can lift this restriction.
This patchset change the page_pool API to be more strict, as in-flight page
accounting is added.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/net-traces.c | 4 | ||||
-rw-r--r-- | net/core/page_pool.c | 95 | ||||
-rw-r--r-- | net/core/xdp.c | 120 |
3 files changed, 191 insertions, 28 deletions
diff --git a/net/core/net-traces.c b/net/core/net-traces.c index 470b179d599e..283ddb2dbc7d 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c @@ -43,6 +43,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete); EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update); #endif +#if IS_ENABLED(CONFIG_PAGE_POOL) +#include <trace/events/page_pool.h> +#endif + #include <trace/events/neigh.h> EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update); EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update_done); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 5b2252c6d49b..b366f59885c1 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -4,9 +4,11 @@ * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> * Copyright (C) 2016 Red Hat, Inc. */ + #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/device.h> #include <net/page_pool.h> #include <linux/dma-direction.h> @@ -14,6 +16,8 @@ #include <linux/page-flags.h> #include <linux/mm.h> /* for __put_page() */ +#include <trace/events/page_pool.h> + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -43,6 +47,11 @@ static int page_pool_init(struct page_pool *pool, if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; + atomic_set(&pool->pages_state_release_cnt, 0); + + if (pool->p.flags & PP_FLAG_DMA_MAP) + get_device(pool->p.dev); + return 0; } @@ -151,6 +160,11 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, page->dma_addr = dma; skip_dma_map: + /* Track how many pages are held 'in-flight' */ + pool->pages_state_hold_cnt++; + + trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); + /* When page just alloc'ed is should/must have refcnt 1. */ return page; } @@ -173,6 +187,33 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) } EXPORT_SYMBOL(page_pool_alloc_pages); +/* Calculate distance between two u32 values, valid if distance is below 2^(31) + * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution + */ +#define _distance(a, b) (s32)((a) - (b)) + +static s32 page_pool_inflight(struct page_pool *pool) +{ + u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); + u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); + s32 distance; + + distance = _distance(hold_cnt, release_cnt); + + trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt); + return distance; +} + +static bool __page_pool_safe_to_destroy(struct page_pool *pool) +{ + s32 inflight = page_pool_inflight(pool); + + /* The distance should not be able to become negative */ + WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); + + return (inflight == 0); +} + /* Cleanup page_pool state from page */ static void __page_pool_clean_page(struct page_pool *pool, struct page *page) @@ -180,7 +221,7 @@ static void __page_pool_clean_page(struct page_pool *pool, dma_addr_t dma; if (!(pool->p.flags & PP_FLAG_DMA_MAP)) - return; + goto skip_dma_unmap; dma = page->dma_addr; /* DMA unmap */ @@ -188,12 +229,27 @@ static void __page_pool_clean_page(struct page_pool *pool, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); page->dma_addr = 0; +skip_dma_unmap: + atomic_inc(&pool->pages_state_release_cnt); + trace_page_pool_state_release(pool, page, + atomic_read(&pool->pages_state_release_cnt)); } +/* unmap the page and clean our state */ +void page_pool_unmap_page(struct page_pool *pool, struct page *page) +{ + /* When page is unmapped, this implies page will not be + * returned to page_pool. + */ + __page_pool_clean_page(pool, page); +} +EXPORT_SYMBOL(page_pool_unmap_page); + /* Return a page to the page allocator, cleaning up our state */ static void __page_pool_return_page(struct page_pool *pool, struct page *page) { __page_pool_clean_page(pool, page); + put_page(page); /* An optimization would be to call __free_pages(page, pool->p.order) * knowing page is not part of page-cache (thus avoiding a @@ -285,21 +341,41 @@ static void __page_pool_empty_ring(struct page_pool *pool) } } -static void __page_pool_destroy_rcu(struct rcu_head *rcu) +static void __warn_in_flight(struct page_pool *pool) { - struct page_pool *pool; + u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); + u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); + s32 distance; - pool = container_of(rcu, struct page_pool, rcu); + distance = _distance(hold_cnt, release_cnt); + /* Drivers should fix this, but only problematic when DMA is used */ + WARN(1, "Still in-flight pages:%d hold:%u released:%u", + distance, hold_cnt, release_cnt); +} + +void __page_pool_free(struct page_pool *pool) +{ WARN(pool->alloc.count, "API usage violation"); + WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); + + /* Can happen due to forced shutdown */ + if (!__page_pool_safe_to_destroy(pool)) + __warn_in_flight(pool); - __page_pool_empty_ring(pool); ptr_ring_cleanup(&pool->ring, NULL); + + if (pool->p.flags & PP_FLAG_DMA_MAP) + put_device(pool->p.dev); + kfree(pool); } +EXPORT_SYMBOL(__page_pool_free); -/* Cleanup and release resources */ -void page_pool_destroy(struct page_pool *pool) +/* Request to shutdown: release pages cached by page_pool, and check + * for in-flight pages + */ +bool __page_pool_request_shutdown(struct page_pool *pool) { struct page *page; @@ -317,7 +393,6 @@ void page_pool_destroy(struct page_pool *pool) */ __page_pool_empty_ring(pool); - /* An xdp_mem_allocator can still ref page_pool pointer */ - call_rcu(&pool->rcu, __page_pool_destroy_rcu); + return __page_pool_safe_to_destroy(pool); } -EXPORT_SYMBOL(page_pool_destroy); +EXPORT_SYMBOL(__page_pool_request_shutdown); diff --git a/net/core/xdp.c b/net/core/xdp.c index 8aab08b131d9..b29d7b513a18 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -14,6 +14,8 @@ #include <net/page_pool.h> #include <net/xdp.h> +#include <net/xdp_priv.h> /* struct xdp_mem_allocator */ +#include <trace/events/xdp.h> #define REG_STATE_NEW 0x0 #define REG_STATE_REGISTERED 0x1 @@ -29,17 +31,6 @@ static int mem_id_next = MEM_ID_MIN; static bool mem_id_init; /* false */ static struct rhashtable *mem_id_ht; -struct xdp_mem_allocator { - struct xdp_mem_info mem; - union { - void *allocator; - struct page_pool *page_pool; - struct zero_copy_allocator *zc_alloc; - }; - struct rhash_head node; - struct rcu_head rcu; -}; - static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) { const u32 *k = data; @@ -79,13 +70,13 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) xa = container_of(rcu, struct xdp_mem_allocator, rcu); + /* Allocator have indicated safe to remove before this is called */ + if (xa->mem.type == MEM_TYPE_PAGE_POOL) + page_pool_free(xa->page_pool); + /* Allow this ID to be reused */ ida_simple_remove(&mem_id_pool, xa->mem.id); - /* Notice, driver is expected to free the *allocator, - * e.g. page_pool, and MUST also use RCU free. - */ - /* Poison memory */ xa->mem.id = 0xFFFF; xa->mem.type = 0xF0F0; @@ -94,6 +85,64 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } +bool __mem_id_disconnect(int id, bool force) +{ + struct xdp_mem_allocator *xa; + bool safe_to_remove = true; + + mutex_lock(&mem_id_lock); + + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); + if (!xa) { + mutex_unlock(&mem_id_lock); + WARN(1, "Request remove non-existing id(%d), driver bug?", id); + return true; + } + xa->disconnect_cnt++; + + /* Detects in-flight packet-pages for page_pool */ + if (xa->mem.type == MEM_TYPE_PAGE_POOL) + safe_to_remove = page_pool_request_shutdown(xa->page_pool); + + trace_mem_disconnect(xa, safe_to_remove, force); + + if ((safe_to_remove || force) && + !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); + + mutex_unlock(&mem_id_lock); + return (safe_to_remove|force); +} + +#define DEFER_TIME (msecs_to_jiffies(1000)) +#define DEFER_WARN_INTERVAL (30 * HZ) +#define DEFER_MAX_RETRIES 120 + +static void mem_id_disconnect_defer_retry(struct work_struct *wq) +{ + struct delayed_work *dwq = to_delayed_work(wq); + struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); + bool force = false; + + if (xa->disconnect_cnt > DEFER_MAX_RETRIES) + force = true; + + if (__mem_id_disconnect(xa->mem.id, force)) + return; + + /* Periodic warning */ + if (time_after_eq(jiffies, xa->defer_warn)) { + int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ; + + pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n", + __func__, xa->mem.id, xa->disconnect_cnt, sec); + xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; + } + + /* Still not ready to be disconnected, retry later */ + schedule_delayed_work(&xa->defer_wq, DEFER_TIME); +} + void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) { struct xdp_mem_allocator *xa; @@ -112,16 +161,30 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) if (id == 0) return; + if (__mem_id_disconnect(id, false)) + return; + + /* Could not disconnect, defer new disconnect attempt to later */ mutex_lock(&mem_id_lock); xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); - if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) - call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); + if (!xa) { + mutex_unlock(&mem_id_lock); + return; + } + xa->defer_start = jiffies; + xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; + INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); mutex_unlock(&mem_id_lock); + schedule_delayed_work(&xa->defer_wq, DEFER_TIME); } EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); +/* This unregister operation will also cleanup and destroy the + * allocator. The page_pool_free() operation is first called when it's + * safe to remove, possibly deferred to a workqueue. + */ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { /* Simplify driver cleanup code paths, allow unreg "unused" */ @@ -301,12 +364,15 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, /* Insert allocator into ID lookup table */ ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); if (IS_ERR(ptr)) { + ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); + xdp_rxq->mem.id = 0; errno = PTR_ERR(ptr); goto err; } mutex_unlock(&mem_id_lock); + trace_mem_connect(xdp_alloc, xdp_rxq); return 0; err: mutex_unlock(&mem_id_lock); @@ -333,10 +399,13 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); - if (xa) { + if (likely(xa)) { napi_direct &= !xdp_return_frame_no_direct(); page_pool_put_page(xa->page_pool, page, napi_direct); } else { + /* Hopefully stack show who to blame for late return */ + WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); + trace_mem_return_failed(mem, page); put_page(page); } rcu_read_unlock(); @@ -379,6 +448,21 @@ void xdp_return_buff(struct xdp_buff *xdp) } EXPORT_SYMBOL_GPL(xdp_return_buff); +/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ +void __xdp_release_frame(void *data, struct xdp_mem_info *mem) +{ + struct xdp_mem_allocator *xa; + struct page *page; + + rcu_read_lock(); + xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); + page = virt_to_head_page(data); + if (xa) + page_pool_release_page(xa->page_pool, page); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(__xdp_release_frame); + int xdp_attachment_query(struct xdp_attachment_info *info, struct netdev_bpf *bpf) { |