summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-12 03:23:54 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-12 05:30:25 -0700
commit04fc6c802dfacba800f5a5d00bea0ebfcc60f840 (patch)
tree4a4ee9b5924d069c1c4d9f64b24ed9634058be1b /fs/io_uring.c
parent921b9054e0c4c443c479c21800f6c4c8b43fa1b0 (diff)
downloadlwn-04fc6c802dfacba800f5a5d00bea0ebfcc60f840.tar.gz
lwn-04fc6c802dfacba800f5a5d00bea0ebfcc60f840.zip
io_uring: save ctx put/get for task_work submit
Do a little trick in io_ring_ctx_free() briefly taking uring_lock, that will wait for everyone currently holding it, so we can skip pinning ctx with ctx->refs for __io_req_task_submit(), which is executed and loses its refs/reqs while holding the lock. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5c0b1a7dba80..87f2f8e660e8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2336,6 +2336,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
mutex_lock(&ctx->uring_lock);
if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
__io_queue_sqe(req);
@@ -2347,10 +2348,8 @@ static void __io_req_task_submit(struct io_kiocb *req)
static void io_req_task_submit(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_ring_ctx *ctx = req->ctx;
__io_req_task_submit(req);
- percpu_ref_put(&ctx->refs);
}
static void io_req_task_queue(struct io_kiocb *req)
@@ -2358,11 +2357,11 @@ static void io_req_task_queue(struct io_kiocb *req)
int ret;
req->task_work.func = io_req_task_submit;
- percpu_ref_get(&req->ctx->refs);
-
ret = io_req_task_work_add(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ percpu_ref_get(&req->ctx->refs);
io_req_task_work_add_fallback(req, io_req_task_cancel);
+ }
}
static inline void io_queue_next(struct io_kiocb *req)
@@ -8707,6 +8706,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
struct io_submit_state *submit_state = &ctx->submit_state;
+ /*
+ * Some may use context even when all refs and requests have been put,
+ * and they are free to do so while still holding uring_lock, see
+ * __io_req_task_submit(). Wait for them to finish.
+ */
+ mutex_lock(&ctx->uring_lock);
+ mutex_unlock(&ctx->uring_lock);
+
io_finish_async(ctx);
io_sqe_buffers_unregister(ctx);