summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-04 13:51:58 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-04 08:05:46 -0700
commiteeb60b9ab4000d20261973642dfc9fb0e4b5d073 (patch)
treefc4101dcc7c860897ea716bc08693eee49ecdc25 /fs/io_uring.c
parentc1d5a224683b333ddbe278e455d639ccd4f5ca2b (diff)
downloadlwn-eeb60b9ab4000d20261973642dfc9fb0e4b5d073.tar.gz
lwn-eeb60b9ab4000d20261973642dfc9fb0e4b5d073.zip
io_uring: refactor io_cqring_wait
It's easy to make a mistake in io_cqring_wait() because for all break/continue clauses we need to watch for prepare/finish_wait to be used correctly. Extract all those into a new helper io_cqring_wait_schedule(), and transforming the loop into simple series of func calls: prepare(); check_and_schedule(); finish(); Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5b735635b8f0..dcb9e937daa3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -7195,6 +7195,25 @@ static int io_run_task_work_sig(void)
return -EINTR;
}
+/* when returns >0, the caller should retry */
+static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ signed long *timeout)
+{
+ int ret;
+
+ /* make sure we run task_work before checking for signals */
+ ret = io_run_task_work_sig();
+ if (ret || io_should_wake(iowq))
+ return ret;
+ /* let the caller flush overflows, retry */
+ if (test_bit(0, &ctx->cq_check_overflow))
+ return 1;
+
+ *timeout = schedule_timeout(*timeout);
+ return !*timeout ? -ETIME : 1;
+}
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -7251,27 +7270,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
io_cqring_overflow_flush(ctx, false, NULL, NULL);
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- /* make sure we run task_work before checking for signals */
- ret = io_run_task_work_sig();
- if (ret > 0) {
- finish_wait(&ctx->wait, &iowq.wq);
- continue;
- }
- else if (ret < 0)
- break;
- if (io_should_wake(&iowq))
- break;
- if (test_bit(0, &ctx->cq_check_overflow)) {
- finish_wait(&ctx->wait, &iowq.wq);
- continue;
- }
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- ret = -ETIME;
- break;
- }
- } while (1);
- finish_wait(&ctx->wait, &iowq.wq);
+ ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ finish_wait(&ctx->wait, &iowq.wq);
+ } while (ret > 0);
restore_saved_sigmask_unless(ret == -EINTR);