diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-06-14 23:37:30 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-06-15 15:38:40 -0600 |
commit | 76cc33d79175a1b224bf02d3ff6c7be53fc684d5 (patch) | |
tree | b6a3e6d459a86b0ca581b17899c707748cd55d11 /fs | |
parent | 0499e582aaff4e4072a760d1f31434acb50c7813 (diff) | |
download | lwn-76cc33d79175a1b224bf02d3ff6c7be53fc684d5.tar.gz lwn-76cc33d79175a1b224bf02d3ff6c7be53fc684d5.zip |
io_uring: refactor io_req_defer()
Rename io_req_defer() into io_drain_req() and refactor it uncoupling it
from io_queue_sqe() error handling and preparing for coming
optimisations. Also, prioritise non IOSQE_ASYNC path.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4f17dd56e7fbe52d1866f8acd8efe3284d2bebcb.1623709150.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 74c8334d67a4..fc764e912844 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5998,7 +5998,7 @@ static u32 io_get_sequence(struct io_kiocb *req) return ctx->cached_sq_head - nr_reqs; } -static int io_req_defer(struct io_kiocb *req) +static bool io_drain_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -6008,27 +6008,29 @@ static int io_req_defer(struct io_kiocb *req) /* Still need defer if there is pending req in defer list. */ if (likely(list_empty_careful(&ctx->defer_list) && !(req->flags & REQ_F_IO_DRAIN))) - return 0; + return false; seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) - return 0; + return false; ret = io_req_prep_async(req); if (ret) return ret; io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); - if (!de) - return -ENOMEM; + if (!de) { + io_req_complete_failed(req, ret); + return true; + } spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); kfree(de); io_queue_async_work(req); - return -EIOCBQUEUED; + return true; } trace_io_uring_defer(ctx, req, req->user_data); @@ -6036,7 +6038,7 @@ static int io_req_defer(struct io_kiocb *req) de->seq = seq; list_add_tail(&de->list, &ctx->defer_list); spin_unlock_irq(&ctx->completion_lock); - return -EIOCBQUEUED; + return true; } static void io_clean_op(struct io_kiocb *req) @@ -6447,21 +6449,18 @@ static void __io_queue_sqe(struct io_kiocb *req) static void io_queue_sqe(struct io_kiocb *req) { - int ret; + if (io_drain_req(req)) + return; - ret = io_req_defer(req); - if (ret) { - if (ret != -EIOCBQUEUED) { -fail_req: - io_req_complete_failed(req, ret); - } - } else if (req->flags & REQ_F_FORCE_ASYNC) { - ret = io_req_prep_async(req); - if (unlikely(ret)) - goto fail_req; - io_queue_async_work(req); - } else { + if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) { __io_queue_sqe(req); + } else { + int ret = io_req_prep_async(req); + + if (unlikely(ret)) + io_req_complete_failed(req, ret); + else + io_queue_async_work(req); } } |