diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-06-29 19:18:43 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-06-30 08:40:00 -0600 |
commit | cbdcb4357c000861b77369c34e110fa893d23607 (patch) | |
tree | 7c7a9d3417a5b5bb7f8129a7e55b91a109e3642c /fs/io_uring.c | |
parent | debb85f496c9cc70663eac31d3ad9153839c844c (diff) | |
download | lwn-cbdcb4357c000861b77369c34e110fa893d23607.tar.gz lwn-cbdcb4357c000861b77369c34e110fa893d23607.zip |
io_uring: do grab_env() just before punting
Currently io_steal_work() is disabled, and every linked request should
go through task_work for initialisation. Do io_req_work_grab_env()
just before io-wq punting and for the whole link, so any request
reachable by io_steal_work() is prepared.
This is also interesting for another reason -- it localises
io_req_work_grab_env() into one place just before io-wq punting, helping
to to better manage req->work lifetime and add some neat
cleanup/optimisations later.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index caf908382cdb..9bc4339057ef 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1101,7 +1101,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) } } -static inline void io_req_work_grab_env(struct io_kiocb *req) +static void io_req_work_grab_env(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -1150,8 +1150,7 @@ static inline void io_req_work_drop_env(struct io_kiocb *req) } } -static inline void io_prep_async_work(struct io_kiocb *req, - struct io_kiocb **link) +static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -1164,15 +1163,22 @@ static inline void io_prep_async_work(struct io_kiocb *req, } io_req_work_grab_env(req); - *link = io_prep_linked_timeout(req); } -static inline void io_queue_async_work(struct io_kiocb *req) +static void io_prep_async_link(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *link; + struct io_kiocb *cur; - io_prep_async_work(req, &link); + io_prep_async_work(req); + if (req->flags & REQ_F_LINK_HEAD) + list_for_each_entry(cur, &req->link_list, link_list) + io_prep_async_work(cur); +} + +static void __io_queue_async_work(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *link = io_prep_linked_timeout(req); trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, &req->work, req->flags); @@ -1182,6 +1188,13 @@ static inline void io_queue_async_work(struct io_kiocb *req) io_queue_linked_timeout(link); } +static void io_queue_async_work(struct io_kiocb *req) +{ + /* init ->work of the whole link before punting */ + io_prep_async_link(req); + __io_queue_async_work(req); +} + static void io_kill_timeout(struct io_kiocb *req) { int ret; @@ -1215,7 +1228,8 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) if (req_need_defer(req)) break; list_del_init(&req->list); - io_queue_async_work(req); + /* punt-init is done before queueing for defer */ + __io_queue_async_work(req); } while (!list_empty(&ctx->defer_list)); } @@ -1791,7 +1805,7 @@ static void io_put_req(struct io_kiocb *req) static struct io_wq_work *io_steal_work(struct io_kiocb *req) { - struct io_kiocb *nxt = NULL; + struct io_kiocb *timeout, *nxt = NULL; /* * A ref is owned by io-wq in which context we're. So, if that's the @@ -1805,18 +1819,10 @@ static struct io_wq_work *io_steal_work(struct io_kiocb *req) if (!nxt) return NULL; - if ((nxt->flags & REQ_F_ISREG) && io_op_defs[nxt->opcode].hash_reg_file) - io_wq_hash_work(&nxt->work, file_inode(nxt->file)); - - io_req_task_queue(nxt); - /* - * If we're going to return actual work, here should be timeout prep: - * - * link = io_prep_linked_timeout(nxt); - * if (link) - * nxt->flags |= REQ_F_QUEUE_TIMEOUT; - */ - return NULL; + timeout = io_prep_linked_timeout(nxt); + if (timeout) + nxt->flags |= REQ_F_QUEUE_TIMEOUT; + return &nxt->work; } /* @@ -5369,8 +5375,8 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) ret = io_req_defer_prep(req, sqe); if (ret < 0) return ret; - io_req_work_grab_env(req); } + io_prep_async_link(req); spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { @@ -5984,7 +5990,6 @@ fail_req: ret = io_req_defer_prep(req, sqe); if (unlikely(ret < 0)) goto fail_req; - io_req_work_grab_env(req); } /* |