summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-28 22:35:09 +0000
committerJens Axboe <axboe@kernel.dk>2021-04-11 17:41:57 -0600
commite83acd7d37d83035f2fe078f656f87418ea2a687 (patch)
tree0f0dc9006b0a54854aeb948a80239b3bb2110726 /fs/io_uring.c
parentd434405aaab7d0ebc516b68a8fc4100922d7f5ef (diff)
downloadlwn-e83acd7d37d83035f2fe078f656f87418ea2a687.tar.gz
lwn-e83acd7d37d83035f2fe078f656f87418ea2a687.zip
io_uring: avoid taking ctx refs for task-cancel
Don't bother to take a ctx->refs for io_req_task_cancel() because it take uring_lock before putting a request, and the context is promised to stay alive until unlock happens. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bd14327c8e7e..db0c4b2dd141 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1985,10 +1985,10 @@ static void io_req_task_cancel(struct callback_head *cb)
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
+ /* ctx is guaranteed to stay alive while we hold uring_lock */
mutex_lock(&ctx->uring_lock);
__io_req_task_cancel(req, req->result);
mutex_unlock(&ctx->uring_lock);
- percpu_ref_put(&ctx->refs);
}
static void __io_req_task_submit(struct io_kiocb *req)
@@ -2019,14 +2019,12 @@ static void io_req_task_queue(struct io_kiocb *req)
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
req->result = -ECANCELED;
- percpu_ref_get(&req->ctx->refs);
io_req_task_work_add_fallback(req, io_req_task_cancel);
}
}
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
- percpu_ref_get(&req->ctx->refs);
req->result = ret;
req->task_work.func = io_req_task_cancel;