diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-08-09 13:04:08 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-23 13:07:59 -0600 |
commit | 864ea921b0300fe5a4db9136b7e307e94b369530 (patch) | |
tree | 3f62cdf2545e515b9d31f223989fb775cf19726a /fs/io_uring.c | |
parent | 8724dd8c833832c398c3578340374f45d6d9dd0d (diff) | |
download | lwn-864ea921b0300fe5a4db9136b7e307e94b369530.tar.gz lwn-864ea921b0300fe5a4db9136b7e307e94b369530.zip |
io_uring: refactor io_alloc_req
Replace the main if of io_flush_cached_reqs() with inverted condition +
goto, so all the cases are handled in the same way. And also extract
io_preinit_req() to make it cleaner and easier to refer to.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1abcba1f7b55dc53bf1dbe95036e345ffb1d5b01.1628471125.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 620ab4c90dff..8892747bd173 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1702,6 +1702,19 @@ static void io_req_complete_failed(struct io_kiocb *req, long res) io_req_complete_post(req, res, 0); } +/* + * Don't initialise the fields below on every allocation, but do that in + * advance and keep them valid across allocations. + */ +static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + req->ctx = ctx; + req->link = NULL; + req->async_data = NULL; + /* not necessary, but safer to zero */ + req->result = 0; +} + static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, struct io_comp_state *cs) { @@ -1744,45 +1757,31 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + int ret, i; BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); - if (!state->free_reqs) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; - int ret, i; - - if (io_flush_cached_reqs(ctx)) - goto got_req; - - ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, - state->reqs); - - /* - * Bulk alloc is all-or-nothing. If we fail to get a batch, - * retry single alloc to be on the safe side. - */ - if (unlikely(ret <= 0)) { - state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); - if (!state->reqs[0]) - return NULL; - ret = 1; - } + if (likely(state->free_reqs || io_flush_cached_reqs(ctx))) + goto got_req; - /* - * Don't initialise the fields below on every allocation, but - * do that in advance and keep valid on free. - */ - for (i = 0; i < ret; i++) { - struct io_kiocb *req = state->reqs[i]; + ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, + state->reqs); - req->ctx = ctx; - req->link = NULL; - req->async_data = NULL; - /* not necessary, but safer to zero */ - req->result = 0; - } - state->free_reqs = ret; + /* + * Bulk alloc is all-or-nothing. If we fail to get a batch, + * retry single alloc to be on the safe side. + */ + if (unlikely(ret <= 0)) { + state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!state->reqs[0]) + return NULL; + ret = 1; } + + for (i = 0; i < ret; i++) + io_preinit_req(state->reqs[i], ctx); + state->free_reqs = ret; got_req: state->free_reqs--; return state->reqs[state->free_reqs]; @@ -6569,6 +6568,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags; int personality, ret = 0; + /* req is partially pre-initialised, see io_preinit_req() */ req->opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ req->flags = sqe_flags = READ_ONCE(sqe->flags); |