summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-09-24 21:59:52 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:53 -0600
commitf5ed3bcd5b117ffe73b9dc2394bbbad26a68c086 (patch)
tree314b3885c9c7b7df8d58ea645e2a603884dd8fae /fs/io_uring.c
parentb3fa03fd1b17f557e2c43736440feed66fb86741 (diff)
downloadlwn-f5ed3bcd5b117ffe73b9dc2394bbbad26a68c086.tar.gz
lwn-f5ed3bcd5b117ffe73b9dc2394bbbad26a68c086.zip
io_uring: optimise batch completion
First, convert rest of iopoll bits to single linked lists, and also replace per-request list_add_tail() with splicing a part of slist. With that, use io_free_batch_list() to put/free requests. The main advantage of it is that it's now the only user of struct req_batch and friends, and so they can be inlined. The main overhead there was per-request call to not-inlined io_req_free_batch(), which is expensive enough. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/b37fc6d5954b241e025eead7ab92c6f44a42f229.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c38
1 files changed, 10 insertions, 28 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fb122c2b69f2..91077b56a564 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2421,35 +2421,13 @@ static inline bool io_run_task_work(void)
return false;
}
-/*
- * Find and free completed poll iocbs
- */
-static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done)
-{
- struct req_batch rb;
- struct io_kiocb *req;
-
- io_init_req_batch(&rb);
- while (!list_empty(done)) {
- req = list_first_entry(done, struct io_kiocb, inflight_entry);
- list_del(&req->inflight_entry);
-
- if (req_ref_put_and_test(req))
- io_req_free_batch(&rb, req, &ctx->submit_state);
- }
-
- io_commit_cqring(ctx);
- io_cqring_ev_posted_iopoll(ctx);
- io_req_free_batch_finish(ctx, &rb);
-}
-
static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;
unsigned int poll_flags = BLK_POLL_NOSLEEP;
+ struct io_wq_work_list list;
DEFINE_IO_COMP_BATCH(iob);
int nr_events = 0;
- LIST_HEAD(done);
/*
* Only spin for completions if we don't have multiple devices hanging
@@ -2496,15 +2474,19 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
__io_cqring_fill_event(ctx, req->user_data, req->result,
- io_put_rw_kbuf(req));
-
- list_add_tail(&req->inflight_entry, &done);
+ io_put_rw_kbuf(req));
nr_events++;
}
+ if (unlikely(!nr_events))
+ return 0;
+
+ io_commit_cqring(ctx);
+ io_cqring_ev_posted_iopoll(ctx);
+ list.first = start ? start->next : ctx->iopoll_list.first;
+ list.last = prev;
wq_list_cut(&ctx->iopoll_list, prev, start);
- if (nr_events)
- io_iopoll_complete(ctx, &done);
+ io_free_batch_list(ctx, &list);
return nr_events;
}