summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-06-25 12:38:13 +0300
committerJens Axboe <axboe@kernel.dk>2020-06-25 07:22:38 -0600
commitd3cac64c498c4fb2df46b97ee6f4c7d6d75f5e3d (patch)
tree69f3c045b16c8612253e619c65b04ba4daab65f1 /fs/io_uring.c
parentf6b6c7d6a9600bdbf5826f57137630e1670e2d87 (diff)
downloadlwn-d3cac64c498c4fb2df46b97ee6f4c7d6d75f5e3d.tar.gz
lwn-d3cac64c498c4fb2df46b97ee6f4c7d6d75f5e3d.zip
io_uring: fix NULL-mm for linked reqs
__io_queue_sqe() tries to handle all request of a link, so it's not enough to grab mm in io_sq_thread_acquire_mm() based just on the head. Don't check req->needs_mm and do it always. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c686061c3762..72739188b2ff 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1991,10 +1991,9 @@ static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
}
}
-static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
- if (io_op_defs[req->opcode].needs_mm && !current->mm) {
+ if (!current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
kthread_use_mm(ctx->sqo_mm);
@@ -2003,6 +2002,14 @@ static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
return 0;
}
+static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].needs_mm)
+ return 0;
+ return __io_sq_thread_acquire_mm(ctx);
+}
+
#ifdef CONFIG_BLOCK
static bool io_resubmit_prep(struct io_kiocb *req, int error)
{
@@ -2781,7 +2788,7 @@ static void io_async_buf_retry(struct callback_head *cb)
ctx = req->ctx;
__set_current_state(TASK_RUNNING);
- if (!io_sq_thread_acquire_mm(ctx, req)) {
+ if (!__io_sq_thread_acquire_mm(ctx)) {
mutex_lock(&ctx->uring_lock);
__io_queue_sqe(req, NULL);
mutex_unlock(&ctx->uring_lock);