summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-01-19 13:32:42 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-01 10:02:42 -0700
commiteab30c4d20dc761d463445e5130421863ff81505 (patch)
treed22c11edb7f9a701d465de59f78f0039cb68fff0 /fs/io_uring.c
parent02b23a9af5ba4db0a85ebb81c8b376b2fe860d0f (diff)
downloadlwn-eab30c4d20dc761d463445e5130421863ff81505.tar.gz
lwn-eab30c4d20dc761d463445e5130421863ff81505.zip
io_uring: deduplicate failing task_work_add
When io_req_task_work_add() fails, the request will be cancelled by enqueueing via task_works of io-wq. Extract a function for that. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c46
1 files changed, 17 insertions, 29 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c3e0d6246d71..90c3cad1723b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2171,6 +2171,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
return ret;
}
+static void io_req_task_work_add_fallback(struct io_kiocb *req,
+ void (*cb)(struct callback_head *))
+{
+ struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
+
+ init_task_work(&req->task_work, cb);
+ task_work_add(tsk, &req->task_work, TWA_NONE);
+ wake_up_process(tsk);
+}
+
static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2225,14 +2235,8 @@ static void io_req_task_queue(struct io_kiocb *req)
percpu_ref_get(&req->ctx->refs);
ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- init_task_work(&req->task_work, io_req_task_cancel);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
- }
+ if (unlikely(ret))
+ io_req_task_work_add_fallback(req, io_req_task_cancel);
}
static inline void io_queue_next(struct io_kiocb *req)
@@ -2350,13 +2354,8 @@ static void io_free_req_deferred(struct io_kiocb *req)
init_task_work(&req->task_work, io_put_req_deferred_cb);
ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
- }
+ if (unlikely(ret))
+ io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
}
static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -3425,15 +3424,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- /* queue just for cancelation */
- init_task_work(&req->task_work, io_req_task_cancel);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
- }
+ if (unlikely(ret))
+ io_req_task_work_add_fallback(req, io_req_task_cancel);
return 1;
}
@@ -5153,12 +5145,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
*/
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
- struct task_struct *tsk;
-
WRITE_ONCE(poll->canceled, true);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
+ io_req_task_work_add_fallback(req, func);
}
return 1;
}