diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-12-06 22:22:44 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-12-09 12:04:01 -0700 |
commit | 634578f800652035debba3098d8ab0d21af7c7a5 (patch) | |
tree | fb44b31f8d5f96c771abdb6645531e4b9ff4ff63 /fs/io_uring.c | |
parent | 31bff9a51b264df6d144931a6a5f1d6cc815ed4b (diff) | |
download | lwn-634578f800652035debba3098d8ab0d21af7c7a5.tar.gz lwn-634578f800652035debba3098d8ab0d21af7c7a5.zip |
io_uring: fix racy IOPOLL flush overflow
It's not safe to call io_cqring_overflow_flush() for IOPOLL mode without
hodling uring_lock, because it does synchronisation differently. Make
sure we have it.
As for io_ring_exit_work(), we don't even need it there because
io_ring_ctx_wait_and_kill() already set force flag making all overflowed
requests to be dropped.
Cc: <stable@vger.kernel.org> # 5.5+
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index d030a4404b8f..c0306f77211a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8654,8 +8654,6 @@ static void io_ring_exit_work(struct work_struct *work) * as nobody else will be looking for them. */ do { - if (ctx->rings) - io_cqring_overflow_flush(ctx, true, NULL, NULL); io_iopoll_try_reap_events(ctx); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); io_ring_ctx_free(ctx); @@ -8665,6 +8663,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); + if (ctx->rings) + io_cqring_overflow_flush(ctx, true, NULL, NULL); mutex_unlock(&ctx->uring_lock); io_kill_timeouts(ctx, NULL, NULL); @@ -8674,8 +8674,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_wq_cancel_all(ctx->io_wq); /* if we failed setting up the ctx, we might not have any rings */ - if (ctx->rings) - io_cqring_overflow_flush(ctx, true, NULL, NULL); io_iopoll_try_reap_events(ctx); idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); @@ -8840,7 +8838,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, } io_cancel_defer_files(ctx, task, files); + io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); io_cqring_overflow_flush(ctx, true, task, files); + io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); if (!files) __io_uring_cancel_task_requests(ctx, task); @@ -9172,8 +9172,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, */ ret = 0; if (ctx->flags & IORING_SETUP_SQPOLL) { + io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); if (!list_empty_careful(&ctx->cq_overflow_list)) io_cqring_overflow_flush(ctx, false, NULL, NULL); + io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); if (flags & IORING_ENTER_SQ_WAIT) |