summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-14 20:57:09 +0000
committerJens Axboe <axboe@kernel.dk>2021-03-15 09:32:24 -0600
commit180f829fe4026bd192447d261e712b6cb84f6202 (patch)
tree0e3dd92a7f5d7ae9951ea247c8320be12e9d7c5e /fs/io_uring.c
parentefe814a471e0e58f28f1efaf430c8784a4f36626 (diff)
downloadlwn-180f829fe4026bd192447d261e712b6cb84f6202.tar.gz
lwn-180f829fe4026bd192447d261e712b6cb84f6202.zip
io_uring: fix complete_post use ctx after free
If io_req_complete_post() put not a final ref, we can't rely on the request's ctx ref, and so ctx may potentially be freed while complete_post() is in io_cqring_ev_posted()/etc. In that case get an additional ctx reference, and put it in the end, so protecting following io_cqring_ev_posted(). And also prolong ctx lifetime until spin_unlock happens, as we do with mutexes, so added percpu_ref_get() doesn't race with ctx free. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 217f72d50ff5..f8a683607b25 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1550,14 +1550,17 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
io_put_task(req->task, 1);
list_add(&req->compl.list, &cs->locked_free_list);
cs->locked_free_nr++;
- } else
- req = NULL;
+ } else {
+ if (!percpu_ref_tryget(&ctx->refs))
+ req = NULL;
+ }
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- io_cqring_ev_posted(ctx);
- if (req)
+ if (req) {
+ io_cqring_ev_posted(ctx);
percpu_ref_put(&ctx->refs);
+ }
}
static void io_req_complete_state(struct io_kiocb *req, long res,
@@ -8373,11 +8376,13 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
/*
* Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock, see
- * __io_req_task_submit(). Wait for them to finish.
+ * and they are free to do so while still holding uring_lock or
+ * completion_lock, see __io_req_task_submit(). Wait for them to finish.
*/
mutex_lock(&ctx->uring_lock);
mutex_unlock(&ctx->uring_lock);
+ spin_lock_irq(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->completion_lock);
io_sq_thread_finish(ctx);
io_sqe_buffers_unregister(ctx);