summaryrefslogtreecommitdiff
path: root/net/sunrpc/xprtrdma/xprt_rdma.h
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-12-16 17:22:39 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-12-18 15:34:33 -0500
commit32d0ceecdfd0e941c492390fe5b6237cc1cf9fa6 (patch)
treece80278de85aec38afde06b8e8b8b3f245342e56 /net/sunrpc/xprtrdma/xprt_rdma.h
parent3cf4e169be95e1a3a70a063b6bd8103fbd5911f3 (diff)
downloadlwn-32d0ceecdfd0e941c492390fe5b6237cc1cf9fa6.tar.gz
lwn-32d0ceecdfd0e941c492390fe5b6237cc1cf9fa6.zip
xprtrdma: Introduce ro_unmap_sync method
In the current xprtrdma implementation, some memreg strategies implement ro_unmap synchronously (the MR is knocked down before the method returns) and some asynchonously (the MR will be knocked down and returned to the pool in the background). To guarantee the MR is truly invalid before the RPC consumer is allowed to resume execution, we need an unmap method that is always synchronous, invoked from the RPC/RDMA reply handler. The new method unmaps all MRs for an RPC. The existing ro_unmap method unmaps only one MR at a time. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@avagotech.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/xprt_rdma.h')
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 5c1e0c600faf..c32cba3f21fb 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -368,6 +368,8 @@ struct rpcrdma_xprt;
struct rpcrdma_memreg_ops {
int (*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool);
+ void (*ro_unmap_sync)(struct rpcrdma_xprt *,
+ struct rpcrdma_req *);
int (*ro_unmap)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *);
int (*ro_open)(struct rpcrdma_ia *,