summaryrefslogtreecommitdiff
path: root/fs/aio.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-07-16 12:25:17 +0200
committerChristoph Hellwig <hch@lst.de>2018-08-06 10:24:39 +0200
commite8693bcfa0b4a56268946f0756153d942cb66cf7 (patch)
tree49ee2926bf5140d84172acb5f60ffd44b336e014 /fs/aio.c
parentbfe4037e722ec672c9dafd5730d9132afeeb76e9 (diff)
downloadlwn-e8693bcfa0b4a56268946f0756153d942cb66cf7.tar.gz
lwn-e8693bcfa0b4a56268946f0756153d942cb66cf7.zip
aio: allow direct aio poll comletions for keyed wakeups
If we get a keyed wakeup for a aio poll waitqueue and wake can acquire the ctx_lock without spinning we can just complete the iocb straight from the wakeup callback to avoid a context switch. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Avi Kivity <avi@scylladb.com>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 2fd19521d8a8..29f2b5b57d32 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1672,13 +1672,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
__poll_t mask = key_to_poll(key);
req->woken = true;
/* for instances that support it check for an event match first: */
- if (mask && !(mask & req->events))
- return 0;
+ if (mask) {
+ if (!(mask & req->events))
+ return 0;
+
+ /* try to complete the iocb inline if we can: */
+ if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ list_del(&iocb->ki_list);
+ spin_unlock(&iocb->ki_ctx->ctx_lock);
+
+ list_del_init(&req->wait.entry);
+ aio_poll_complete(iocb, mask);
+ return 1;
+ }
+ }
list_del_init(&req->wait.entry);
schedule_work(&req->work);