diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2019-10-24 09:34:16 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2019-10-30 16:32:37 -0400 |
commit | 5866efa8cbfbadf3905072798e96652faf02dbe8 (patch) | |
tree | b02eae553499e2c0a32b9721aac3ac37b9696cb4 /net/sunrpc | |
parent | ff27e9f748303e8567bfceb6d7ff264cbcaca2ef (diff) | |
download | lwn-5866efa8cbfbadf3905072798e96652faf02dbe8.tar.gz lwn-5866efa8cbfbadf3905072798e96652faf02dbe8.zip |
SUNRPC: Fix svcauth_gss_proxy_init()
gss_read_proxy_verf() assumes things about the XDR buffer containing
the RPC Call that are not true for buffers generated by
svc_rdma_recv().
RDMA's buffers look more like what the upper layer generates for
sending: head is a kmalloc'd buffer; it does not point to a page
whose contents are contiguous with the first page in the buffers'
page array. The result is that ACCEPT_SEC_CONTEXT via RPC/RDMA has
stopped working on Linux NFS servers that use gssproxy.
This does not affect clients that use only TCP to send their
ACCEPT_SEC_CONTEXT operation (that's all Linux clients). Other
clients, like Solaris NFS clients, send ACCEPT_SEC_CONTEXT on the
same transport as they send all other NFS operations. Such clients
can send ACCEPT_SEC_CONTEXT via RPC/RDMA.
I thought I had found every direct reference in the server RPC code
to the rqstp->rq_pages field.
Bug found at the 2019 Westford NFS bake-a-thon.
Fixes: 3316f0631139 ("svcrdma: Persistently allocate and DMA- ... ")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Bill Baker <bill.baker@oracle.com>
Reviewed-by: Simo Sorce <simo@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 84 |
1 files changed, 63 insertions, 21 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f1309905aed3..c62d1f10978b 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1078,24 +1078,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc, return 0; } -/* Ok this is really heavily depending on a set of semantics in - * how rqstp is set up by svc_recv and pages laid down by the - * server when reading a request. We are basically guaranteed that - * the token lays all down linearly across a set of pages, starting - * at iov_base in rq_arg.head[0] which happens to be the first of a - * set of pages stored in rq_pages[]. - * rq_arg.head[0].iov_base will provide us the page_base to pass - * to the upcall. - */ -static inline int -gss_read_proxy_verf(struct svc_rqst *rqstp, - struct rpc_gss_wire_cred *gc, __be32 *authp, - struct xdr_netobj *in_handle, - struct gssp_in_token *in_token) +static void gss_free_in_token_pages(struct gssp_in_token *in_token) { - struct kvec *argv = &rqstp->rq_arg.head[0]; u32 inlen; - int res; + int i; + + i = 0; + inlen = in_token->page_len; + while (inlen) { + if (in_token->pages[i]) + put_page(in_token->pages[i]); + inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen; + } + + kfree(in_token->pages); + in_token->pages = NULL; +} + +static int gss_read_proxy_verf(struct svc_rqst *rqstp, + struct rpc_gss_wire_cred *gc, __be32 *authp, + struct xdr_netobj *in_handle, + struct gssp_in_token *in_token) +{ + struct kvec *argv = &rqstp->rq_arg.head[0]; + unsigned int page_base, length; + int pages, i, res; + size_t inlen; res = gss_read_common_verf(gc, argv, authp, in_handle); if (res) @@ -1105,10 +1113,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp, if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) return SVC_DENIED; - in_token->pages = rqstp->rq_pages; - in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK; + pages = DIV_ROUND_UP(inlen, PAGE_SIZE); + in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); + if (!in_token->pages) + return SVC_DENIED; + in_token->page_base = 0; in_token->page_len = inlen; + for (i = 0; i < pages; i++) { + in_token->pages[i] = alloc_page(GFP_KERNEL); + if (!in_token->pages[i]) { + gss_free_in_token_pages(in_token); + return SVC_DENIED; + } + } + length = min_t(unsigned int, inlen, argv->iov_len); + memcpy(page_address(in_token->pages[0]), argv->iov_base, length); + inlen -= length; + + i = 1; + page_base = rqstp->rq_arg.page_base; + while (inlen) { + length = min_t(unsigned int, inlen, PAGE_SIZE); + memcpy(page_address(in_token->pages[i]), + page_address(rqstp->rq_arg.pages[i]) + page_base, + length); + + inlen -= length; + page_base = 0; + i++; + } return 0; } @@ -1282,8 +1316,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, break; case GSS_S_COMPLETE: status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle); - if (status) + if (status) { + pr_info("%s: gss_proxy_save_rsc failed (%d)\n", + __func__, status); goto out; + } cli_handle.data = (u8 *)&handle; cli_handle.len = sizeof(handle); break; @@ -1294,15 +1331,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, /* Got an answer to the upcall; use it: */ if (gss_write_init_verf(sn->rsc_cache, rqstp, - &cli_handle, &ud.major_status)) + &cli_handle, &ud.major_status)) { + pr_info("%s: gss_write_init_verf failed\n", __func__); goto out; + } if (gss_write_resv(resv, PAGE_SIZE, &cli_handle, &ud.out_token, - ud.major_status, ud.minor_status)) + ud.major_status, ud.minor_status)) { + pr_info("%s: gss_write_resv failed\n", __func__); goto out; + } ret = SVC_COMPLETE; out: + gss_free_in_token_pages(&ud.in_token); gssp_free_upcall_data(&ud); return ret; } |