diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-05-16 22:58:04 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-06-14 08:23:04 -0600 |
commit | 3dd0c97a9e011b11ce6bd245bacf58c57f6f7875 (patch) | |
tree | d12ae86e20f7a1c560de67d7fbb42dd8d018d298 /fs/io_uring.c | |
parent | acfb381d9d714c657ff540099fa5a6fa98e71f07 (diff) | |
download | lwn-3dd0c97a9e011b11ce6bd245bacf58c57f6f7875.tar.gz lwn-3dd0c97a9e011b11ce6bd245bacf58c57f6f7875.zip |
io_uring: get rid of files in exit cancel
We don't match against files on cancellation anymore, so no need to drag
around files_struct anymore, just pass a flag telling whether only
inflight or all requests should be killed.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7bfc5409a78f8e2d6b27dec3293ec2d248677348.1621201931.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 63 |
1 files changed, 31 insertions, 32 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 3a7889939455..8b8d25216662 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1036,7 +1036,7 @@ static bool io_disarm_next(struct io_kiocb *req); static void io_uring_del_task_file(unsigned long index); static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, struct task_struct *task, - struct files_struct *files); + bool cancel_all); static void io_uring_cancel_sqpoll(struct io_sq_data *sqd); static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); @@ -1105,15 +1105,14 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) percpu_ref_put(ref); } -static bool io_match_task(struct io_kiocb *head, - struct task_struct *task, - struct files_struct *files) +static bool io_match_task(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) { struct io_kiocb *req; if (task && head->task != task) return false; - if (!files) + if (cancel_all) return true; io_for_each_link(req, head) { @@ -5256,7 +5255,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) * Returns true if we found and killed one or more poll requests */ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) + bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; @@ -5268,7 +5267,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, files)) + if (io_match_task(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } @@ -8742,7 +8741,7 @@ static void io_ring_exit_work(struct work_struct *work) * as nobody else will be looking for them. */ do { - io_uring_try_cancel_requests(ctx, NULL, NULL); + io_uring_try_cancel_requests(ctx, NULL, true); if (ctx->sq_data) { struct io_sq_data *sqd = ctx->sq_data; struct task_struct *tsk; @@ -8793,14 +8792,14 @@ static void io_ring_exit_work(struct work_struct *work) /* Returns true if we found and killed one or more timeouts */ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) + bool cancel_all) { struct io_kiocb *req, *tmp; int canceled = 0; spin_lock_irq(&ctx->completion_lock); list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { - if (io_match_task(req, tsk, files)) { + if (io_match_task(req, tsk, cancel_all)) { io_kill_timeout(req, -ECANCELED); canceled++; } @@ -8826,8 +8825,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_unregister_personality(ctx, index); mutex_unlock(&ctx->uring_lock); - io_kill_timeouts(ctx, NULL, NULL); - io_poll_remove_all(ctx, NULL, NULL); + io_kill_timeouts(ctx, NULL, true); + io_poll_remove_all(ctx, NULL, true); /* if we failed setting up the ctx, we might not have any rings */ io_iopoll_try_reap_events(ctx); @@ -8853,7 +8852,7 @@ static int io_uring_release(struct inode *inode, struct file *file) struct io_task_cancel { struct task_struct *task; - struct files_struct *files; + bool all; }; static bool io_cancel_task_cb(struct io_wq_work *work, void *data) @@ -8862,30 +8861,29 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) struct io_task_cancel *cancel = data; bool ret; - if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { + if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { unsigned long flags; struct io_ring_ctx *ctx = req->ctx; /* protect against races with linked timeouts */ spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_match_task(req, cancel->task, cancel->files); + ret = io_match_task(req, cancel->task, cancel->all); spin_unlock_irqrestore(&ctx->completion_lock, flags); } else { - ret = io_match_task(req, cancel->task, cancel->files); + ret = io_match_task(req, cancel->task, cancel->all); } return ret; } static bool io_cancel_defer_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) + struct task_struct *task, bool cancel_all) { struct io_defer_entry *de; LIST_HEAD(list); spin_lock_irq(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, files)) { + if (io_match_task(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } @@ -8929,9 +8927,9 @@ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, struct task_struct *task, - struct files_struct *files) + bool cancel_all) { - struct io_task_cancel cancel = { .task = task, .files = files, }; + struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; struct io_uring_task *tctx = task ? task->io_uring : NULL; while (1) { @@ -8951,7 +8949,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, } /* SQPOLL thread does its own polling */ - if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) || + if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || (ctx->sq_data && ctx->sq_data->thread == current)) { while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); @@ -8959,9 +8957,9 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, } } - ret |= io_cancel_defer_files(ctx, task, files); - ret |= io_poll_remove_all(ctx, task, files); - ret |= io_kill_timeouts(ctx, task, files); + ret |= io_cancel_defer_files(ctx, task, cancel_all); + ret |= io_poll_remove_all(ctx, task, cancel_all); + ret |= io_kill_timeouts(ctx, task, cancel_all); ret |= io_run_task_work(); ret |= io_run_ctx_fallback(ctx); if (!ret) @@ -9067,7 +9065,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) return percpu_counter_sum(&tctx->inflight); } -static void io_uring_try_cancel(struct files_struct *files) +static void io_uring_try_cancel(bool cancel_all) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_node *node; @@ -9078,7 +9076,7 @@ static void io_uring_try_cancel(struct files_struct *files) /* sqpoll task will cancel all its requests */ if (!ctx->sq_data) - io_uring_try_cancel_requests(ctx, current, files); + io_uring_try_cancel_requests(ctx, current, cancel_all); } } @@ -9104,7 +9102,7 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd) if (!inflight) break; list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - io_uring_try_cancel_requests(ctx, current, NULL); + io_uring_try_cancel_requests(ctx, current, true); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); /* @@ -9128,6 +9126,7 @@ void __io_uring_cancel(struct files_struct *files) struct io_uring_task *tctx = current->io_uring; DEFINE_WAIT(wait); s64 inflight; + bool cancel_all = !files; if (tctx->io_wq) io_wq_exit_start(tctx->io_wq); @@ -9136,10 +9135,10 @@ void __io_uring_cancel(struct files_struct *files) atomic_inc(&tctx->in_idle); do { /* read completions before cancelations */ - inflight = tctx_inflight(tctx, !!files); + inflight = tctx_inflight(tctx, !cancel_all); if (!inflight) break; - io_uring_try_cancel(files); + io_uring_try_cancel(cancel_all); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); /* @@ -9147,14 +9146,14 @@ void __io_uring_cancel(struct files_struct *files) * avoids a race where a completion comes in before we did * prepare_to_wait(). */ - if (inflight == tctx_inflight(tctx, !!files)) + if (inflight == tctx_inflight(tctx, !cancel_all)) schedule(); finish_wait(&tctx->wait, &wait); } while (1); atomic_dec(&tctx->in_idle); io_uring_clean_tctx(tctx); - if (!files) { + if (cancel_all) { /* for exec all current's requests should be gone, kill tctx */ __io_uring_free(current); } |