diff options
author | Chuck Lever <cel@netapp.com> | 2006-03-20 13:44:31 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-20 13:44:31 -0500 |
commit | 63ab46abc70b01cb0711301f5ddb08c1c0bb9b1c (patch) | |
tree | 4d1d03a95f1468d240c3c7e0cd207fc14aa54023 /fs/nfs/direct.c | |
parent | 93619e5989173614bef0013b0bb8a3fe3dbd5a95 (diff) | |
download | lwn-63ab46abc70b01cb0711301f5ddb08c1c0bb9b1c.tar.gz lwn-63ab46abc70b01cb0711301f5ddb08c1c0bb9b1c.zip |
NFS: create common routine for handling direct I/O completion
Factor out the common piece of completing an NFS direct I/O request.
Test plan:
Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled.
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r-- | fs/nfs/direct.c | 46 |
1 files changed, 26 insertions, 20 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 489f736d0f5d..4df21ce28e17 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -201,6 +201,30 @@ out: } /* + * We must hold a reference to all the pages in this direct read request + * until the RPCs complete. This could be long *after* we are woken up in + * nfs_direct_wait (for instance, if someone hits ^C on a slow server). + * + * In addition, synchronous I/O uses a stack-allocated iocb. Thus we + * can't trust the iocb is still valid here if this is a synchronous + * request. If the waiter is woken prematurely, the iocb is long gone. + */ +static void nfs_direct_complete(struct nfs_direct_req *dreq) +{ + nfs_free_user_pages(dreq->pages, dreq->npages, 1); + + if (dreq->iocb) { + long res = atomic_read(&dreq->error); + if (!res) + res = atomic_read(&dreq->count); + aio_complete(dreq->iocb, res, 0); + } else + wake_up(&dreq->wait); + + kref_put(&dreq->kref, nfs_direct_req_release); +} + +/* * Note we also set the number of requests we have in the dreq when we are * done. This prevents races with I/O completion so we will always wait * until all requests have been dispatched and completed. @@ -245,15 +269,6 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) return dreq; } -/* - * We must hold a reference to all the pages in this direct read request - * until the RPCs complete. This could be long *after* we are woken up in - * nfs_direct_wait (for instance, if someone hits ^C on a slow server). - * - * In addition, synchronous I/O uses a stack-allocated iocb. Thus we - * can't trust the iocb is still valid here if this is a synchronous - * request. If the waiter is woken prematurely, the iocb is long gone. - */ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; @@ -266,17 +281,8 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) else atomic_set(&dreq->error, task->tk_status); - if (unlikely(atomic_dec_and_test(&dreq->complete))) { - nfs_free_user_pages(dreq->pages, dreq->npages, 1); - if (dreq->iocb) { - long res = atomic_read(&dreq->error); - if (!res) - res = atomic_read(&dreq->count); - aio_complete(dreq->iocb, res, 0); - } else - wake_up(&dreq->wait); - kref_put(&dreq->kref, nfs_direct_req_release); - } + if (unlikely(atomic_dec_and_test(&dreq->complete))) + nfs_direct_complete(dreq); } static const struct rpc_call_ops nfs_read_direct_ops = { |