summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-03-21 22:02:20 +0000
committerJens Axboe <axboe@kernel.dk>2022-04-24 17:34:16 -0600
commit60053be859b33f7a381a3f1755db5caffaa3cab8 (patch)
tree549a2d78804481e18d8448c4a4128587eea38234 /fs/io_uring.c
parent68ca8fc00277ad04c975c382bd6e2d500e5c7185 (diff)
downloadlwn-60053be859b33f7a381a3f1755db5caffaa3cab8.tar.gz
lwn-60053be859b33f7a381a3f1755db5caffaa3cab8.zip
io_uring: remove extra ifs around io_commit_cqring
Now io_commit_cqring() is simple and it tolerates well being called without a new CQE filled, so kill a bunch of not needed anymore guards. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/36aed692dff402bba00a444a63a9cd2e97a340ea.1647897811.git.asml.silence@gmail.com [axboe: fold in followup fix] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7fefdb9c4ae3..f61490aee3d3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1920,8 +1920,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
}
- if (posted)
- io_commit_cqring(ctx);
+ io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (posted)
io_cqring_ev_posted(ctx);
@@ -2361,8 +2360,7 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
spin_lock(&ctx->completion_lock);
posted = io_disarm_next(req);
- if (posted)
- io_commit_cqring(ctx);
+ io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (posted)
io_cqring_ev_posted(ctx);
@@ -10128,8 +10126,7 @@ static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
}
}
spin_unlock_irq(&ctx->timeout_lock);
- if (canceled != 0)
- io_commit_cqring(ctx);
+ io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (canceled != 0)
io_cqring_ev_posted(ctx);
@@ -10149,11 +10146,13 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock);
- io_kill_timeouts(ctx, NULL, true);
- io_poll_remove_all(ctx, NULL, true);
-
- /* if we failed setting up the ctx, we might not have any rings */
- io_iopoll_try_reap_events(ctx);
+ /* failed during ring init, it couldn't have issued any requests */
+ if (ctx->rings) {
+ io_kill_timeouts(ctx, NULL, true);
+ io_poll_remove_all(ctx, NULL, true);
+ /* if we failed setting up the ctx, we might not have any rings */
+ io_iopoll_try_reap_events(ctx);
+ }
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
@@ -10245,6 +10244,10 @@ static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
struct io_uring_task *tctx = task ? task->io_uring : NULL;
+ /* failed during ring init, it couldn't have issued any requests */
+ if (!ctx->rings)
+ return;
+
while (1) {
enum io_wq_cancel cret;
bool ret = false;