diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-02-10 00:03:17 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-02-10 07:28:43 -0700 |
commit | bf019da7fcbe7e42372582cc339fd1fb8e1e4fa5 (patch) | |
tree | c3f80a602a087ecc133ece18989ee5557dd0c642 /fs/io_uring.c | |
parent | 9ae7246321d2b735867f6767e0fab96dd248c555 (diff) | |
download | lwn-bf019da7fcbe7e42372582cc339fd1fb8e1e4fa5.tar.gz lwn-bf019da7fcbe7e42372582cc339fd1fb8e1e4fa5.zip |
io_uring: persistent req cache
Don't free batch-allocated requests across syscalls.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 9b84d6314c11..1f0b3b332d32 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -266,6 +266,8 @@ struct io_sq_data { #define IO_IOPOLL_BATCH 8 #define IO_COMPL_BATCH 32 +#define IO_REQ_CACHE_SIZE 8 +#define IO_REQ_ALLOC_BATCH 8 struct io_comp_state { unsigned int nr; @@ -278,7 +280,7 @@ struct io_submit_state { /* * io_kiocb alloc cache */ - void *reqs[IO_IOPOLL_BATCH]; + void *reqs[IO_REQ_CACHE_SIZE]; unsigned int free_reqs; bool plug_started; @@ -1942,13 +1944,14 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; + BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs)); + if (!state->free_reqs) { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; - size_t sz; int ret; - sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); - ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); + ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, + state->reqs); /* * Bulk alloc is all-or-nothing. If we fail to get a batch, @@ -6629,10 +6632,6 @@ static void io_submit_state_end(struct io_submit_state *state, if (state->plug_started) blk_finish_plug(&state->plug); io_state_file_put(state); - if (state->free_reqs) { - kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); - state->free_reqs = 0; - } } /* @@ -8632,6 +8631,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) static void io_ring_ctx_free(struct io_ring_ctx *ctx) { + struct io_submit_state *submit_state = &ctx->submit_state; + io_finish_async(ctx); io_sqe_buffers_unregister(ctx); @@ -8642,6 +8643,10 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } + if (submit_state->free_reqs) + kmem_cache_free_bulk(req_cachep, submit_state->free_reqs, + submit_state->reqs); + #ifdef CONFIG_BLK_CGROUP if (ctx->sqo_blkcg_css) css_put(ctx->sqo_blkcg_css); |