summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3b2f6fd8f58f..caf908382cdb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5240,7 +5240,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock,
}
static int io_req_defer_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe, bool for_async)
+ const struct io_uring_sqe *sqe)
{
ssize_t ret = 0;
@@ -5254,9 +5254,6 @@ static int io_req_defer_prep(struct io_kiocb *req,
return ret;
}
- if (for_async || (req->flags & REQ_F_WORK_INITIALIZED))
- io_req_work_grab_env(req);
-
switch (req->opcode) {
case IORING_OP_NOP:
break;
@@ -5369,9 +5366,10 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!req->io) {
if (io_alloc_async_ctx(req))
return -EAGAIN;
- ret = io_req_defer_prep(req, sqe, true);
+ ret = io_req_defer_prep(req, sqe);
if (ret < 0)
return ret;
+ io_req_work_grab_env(req);
}
spin_lock_irq(&ctx->completion_lock);
@@ -5983,9 +5981,10 @@ fail_req:
ret = -EAGAIN;
if (io_alloc_async_ctx(req))
goto fail_req;
- ret = io_req_defer_prep(req, sqe, true);
+ ret = io_req_defer_prep(req, sqe);
if (unlikely(ret < 0))
goto fail_req;
+ io_req_work_grab_env(req);
}
/*
@@ -6039,7 +6038,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (io_alloc_async_ctx(req))
return -EAGAIN;
- ret = io_req_defer_prep(req, sqe, false);
+ ret = io_req_defer_prep(req, sqe);
if (ret) {
/* fail even hard links since we don't submit */
head->flags |= REQ_F_FAIL_LINK;
@@ -6066,7 +6065,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (io_alloc_async_ctx(req))
return -EAGAIN;
- ret = io_req_defer_prep(req, sqe, false);
+ ret = io_req_defer_prep(req, sqe);
if (ret)
req->flags |= REQ_F_FAIL_LINK;
*link = req;