diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-04-13 02:58:44 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-04-13 09:37:55 -0600 |
commit | cce4b8b0ce1f9fdf67f4f73ed12a2da2a085d5e3 (patch) | |
tree | 045e28a52b29848c565e11cebd5d4d16be40dde6 /fs/io_uring.c | |
parent | e31001a3abb81a2dba976b842b8ab65d123bca2a (diff) | |
download | lwn-cce4b8b0ce1f9fdf67f4f73ed12a2da2a085d5e3.tar.gz lwn-cce4b8b0ce1f9fdf67f4f73ed12a2da2a085d5e3.zip |
io_uring: don't fail overflow on in_idle
As CQE overflows are now untied from requests and so don't hold any
ref, we don't need to handle exiting/exec'ing cases there anymore.
Moreover, it's much nicer in regards to userspace to save overflowed
CQEs whenever possible, so remove failing on in_idle.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d873b7dab75c7f3039ead9628a745bea01f2cfd2.1618278933.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 44 |
1 files changed, 20 insertions, 24 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 1627d69a570c..3632b5a4f13f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1513,32 +1513,28 @@ static bool io_cqring_event_overflow(struct io_kiocb *req, long res, unsigned int cflags) { struct io_ring_ctx *ctx = req->ctx; + struct io_overflow_cqe *ocqe; - if (!atomic_read(&req->task->io_uring->in_idle)) { - struct io_overflow_cqe *ocqe; - - ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT); - if (!ocqe) - goto overflow; - if (list_empty(&ctx->cq_overflow_list)) { - set_bit(0, &ctx->sq_check_overflow); - set_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; - } - ocqe->cqe.user_data = req->user_data; - ocqe->cqe.res = res; - ocqe->cqe.flags = cflags; - list_add_tail(&ocqe->list, &ctx->cq_overflow_list); - return true; + ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT); + if (!ocqe) { + /* + * If we're in ring overflow flush mode, or in task cancel mode, + * or cannot allocate an overflow entry, then we need to drop it + * on the floor. + */ + WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow); + return false; } -overflow: - /* - * If we're in ring overflow flush mode, or in task cancel mode, - * or cannot allocate an overflow entry, then we need to drop it - * on the floor. - */ - WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow); - return false; + if (list_empty(&ctx->cq_overflow_list)) { + set_bit(0, &ctx->sq_check_overflow); + set_bit(0, &ctx->cq_check_overflow); + ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; + } + ocqe->cqe.user_data = req->user_data; + ocqe->cqe.res = res; + ocqe->cqe.flags = cflags; + list_add_tail(&ocqe->list, &ctx->cq_overflow_list); + return true; } static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res, |