diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2016-06-29 13:54:00 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2016-07-11 15:50:43 -0400 |
commit | e2ac236c0b65129f12fef358390f76cc3cacb865 (patch) | |
tree | 14c8ab8d8f1ba37fc60695382e55a46eac1d2aa1 /net/sunrpc/xprtrdma/xprt_rdma.h | |
parent | a54d4059e5f356c522aabfd38563ab6e64773263 (diff) | |
download | lwn-e2ac236c0b65129f12fef358390f76cc3cacb865.tar.gz lwn-e2ac236c0b65129f12fef358390f76cc3cacb865.zip |
xprtrdma: Allocate MRs on demand
Frequent MR list exhaustion can impact I/O throughput, so enough MRs
are always created during transport set-up to prevent running out.
This means more MRs are created than most workloads need.
Commit 94f58c58c0b4 ("xprtrdma: Allow Read list and Reply chunk
simultaneously") introduced support for sending two chunk lists per
RPC, which consumes more MRs per RPC.
Instead of trying to provision more MRs, introduce a mechanism for
allocating MRs on demand. A few MRs are allocated during transport
set-up to kick things off.
This significantly reduces the average number of MRs per transport
while allowing the MR count to grow for workloads or devices that
need more MRs.
FRWR with mlx4 allocated almost 400 MRs per transport before this
patch. Now it starts with 32.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/xprt_rdma.h')
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 08d441d65a83..649d01dda327 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -339,6 +339,7 @@ struct rpcrdma_buffer { spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */ struct list_head rb_stale_mrs; struct delayed_work rb_recovery_worker; + struct delayed_work rb_refresh_worker; }; #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) @@ -387,6 +388,7 @@ struct rpcrdma_stats { unsigned long bcall_count; unsigned long mrs_recovered; unsigned long mrs_orphaned; + unsigned long mrs_allocated; }; /* @@ -405,8 +407,9 @@ struct rpcrdma_memreg_ops { struct rpcrdma_ep *, struct rpcrdma_create_data_internal *); size_t (*ro_maxpages)(struct rpcrdma_xprt *); - int (*ro_init)(struct rpcrdma_xprt *); - void (*ro_destroy)(struct rpcrdma_buffer *); + int (*ro_init_mr)(struct rpcrdma_ia *, + struct rpcrdma_mw *); + void (*ro_release_mr)(struct rpcrdma_mw *); const char *ro_displayname; }; |