diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-03-19 17:22:44 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-04-11 17:41:59 -0600 |
commit | c15b79dee51bd73d56fe526a779e8fbc02b09e6c (patch) | |
tree | caf7de1d56bf635ec14f744847e6171c273622e8 /fs/io_uring.c | |
parent | e1d767f078b88423bb8ed179fbfe3369395e10f8 (diff) | |
download | lwn-c15b79dee51bd73d56fe526a779e8fbc02b09e6c.tar.gz lwn-c15b79dee51bd73d56fe526a779e8fbc02b09e6c.zip |
io_uring: optimise io_req_task_work_add()
Inline io_task_work_add() into io_req_task_work_add(). They both work
with a request, so keeping them separate doesn't make things much more
clear, but merging allows optimise it. Apart from small wins like not
reading req->ctx or not calculating @notify in the hot path, i.e. with
tctx->task_state set, it avoids doing wake_up_process() for every single
add, but only after actually done task_work_add().
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 50 |
1 files changed, 18 insertions, 32 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 6b805b1c003d..7a6b2b313328 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1911,13 +1911,17 @@ static void tctx_task_work(struct callback_head *cb) cond_resched(); } -static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req, - enum task_work_notify_mode notify) +static int io_req_task_work_add(struct io_kiocb *req) { + struct task_struct *tsk = req->task; struct io_uring_task *tctx = tsk->io_uring; + enum task_work_notify_mode notify; struct io_wq_work_node *node, *prev; unsigned long flags; - int ret; + int ret = 0; + + if (unlikely(tsk->flags & PF_EXITING)) + return -ESRCH; WARN_ON_ONCE(!tctx); @@ -1930,14 +1934,23 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req, test_and_set_bit(0, &tctx->task_state)) return 0; - if (!task_work_add(tsk, &tctx->task_work, notify)) + /* + * SQPOLL kernel thread doesn't need notification, just a wakeup. For + * all other cases, use TWA_SIGNAL unconditionally to ensure we're + * processing task_work. There's no reliable way to tell if TWA_RESUME + * will do the job. + */ + notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; + + if (!task_work_add(tsk, &tctx->task_work, notify)) { + wake_up_process(tsk); return 0; + } /* * Slow path - we failed, find and delete work. if the work is not * in the list, it got run and we're fine. */ - ret = 0; spin_lock_irqsave(&tctx->task_lock, flags); wq_list_for_each(node, prev, &tctx->task_list) { if (&req->io_task_work.node == node) { @@ -1951,33 +1964,6 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req, return ret; } -static int io_req_task_work_add(struct io_kiocb *req) -{ - struct task_struct *tsk = req->task; - struct io_ring_ctx *ctx = req->ctx; - enum task_work_notify_mode notify; - int ret; - - if (tsk->flags & PF_EXITING) - return -ESRCH; - - /* - * SQPOLL kernel thread doesn't need notification, just a wakeup. For - * all other cases, use TWA_SIGNAL unconditionally to ensure we're - * processing task_work. There's no reliable way to tell if TWA_RESUME - * will do the job. - */ - notify = TWA_NONE; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) - notify = TWA_SIGNAL; - - ret = io_task_work_add(tsk, req, notify); - if (!ret) - wake_up_process(tsk); - - return ret; -} - static bool io_run_task_work_head(struct callback_head **work_head) { struct callback_head *work, *next; |