summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2026-04-08 11:31:38 -0600
committerJens Axboe <axboe@kernel.dk>2026-04-08 13:21:34 -0600
commit7880174e1e5e88944ea75cf871efd77ec5e3ef51 (patch)
treeecf5ec0d30cabc602215e1fda6d1ad4f1ff3b246
parent2c453a4281245135b9e6f1048962272c74853b53 (diff)
downloadlwn-7880174e1e5e88944ea75cf871efd77ec5e3ef51.tar.gz
lwn-7880174e1e5e88944ea75cf871efd77ec5e3ef51.zip
io_uring/tctx: clean up __io_uring_add_tctx_node() error handling
Refactor __io_uring_add_tctx_node() so that on error it never leaves current->io_uring pointing at a half-setup tctx. This moves the assignment of current->io_uring to the end of the function post any failure points. Separate out the node installation into io_tctx_install_node() to further clean this up. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/tctx.c60
1 files changed, 40 insertions, 20 deletions
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index e5cef6a8dde0..61533f30494f 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -108,10 +108,37 @@ __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *tas
return tctx;
}
+static int io_tctx_install_node(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx)
+{
+ struct io_tctx_node *node;
+ int ret;
+
+ if (xa_load(&tctx->xa, (unsigned long)ctx))
+ return 0;
+
+ node = kmalloc_obj(*node);
+ if (!node)
+ return -ENOMEM;
+ node->ctx = ctx;
+ node->task = current;
+
+ ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
+ node, GFP_KERNEL));
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ mutex_lock(&ctx->tctx_lock);
+ list_add(&node->ctx_node, &ctx->tctx_list);
+ mutex_unlock(&ctx->tctx_lock);
+ return 0;
+}
+
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
- struct io_tctx_node *node;
int ret;
if (unlikely(!tctx)) {
@@ -119,14 +146,13 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (IS_ERR(tctx))
return PTR_ERR(tctx);
- current->io_uring = tctx;
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
ret = io_wq_max_workers(tctx->io_wq, limits);
if (ret)
- return ret;
+ goto err_free;
}
}
@@ -137,25 +163,19 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
*/
if (tctx->io_wq)
io_wq_set_exit_on_idle(tctx->io_wq, false);
- if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
- node = kmalloc_obj(*node);
- if (!node)
- return -ENOMEM;
- node->ctx = ctx;
- node->task = current;
-
- ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
- node, GFP_KERNEL));
- if (ret) {
- kfree(node);
- return ret;
- }
- mutex_lock(&ctx->tctx_lock);
- list_add(&node->ctx_node, &ctx->tctx_list);
- mutex_unlock(&ctx->tctx_lock);
+ ret = io_tctx_install_node(ctx, tctx);
+ if (!ret) {
+ current->io_uring = tctx;
+ return 0;
}
- return 0;
+ if (!current->io_uring) {
+err_free:
+ io_wq_put_and_exit(tctx->io_wq);
+ percpu_counter_destroy(&tctx->inflight);
+ kfree(tctx);
+ }
+ return ret;
}
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)