diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-04-15 22:08:25 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-04-24 18:02:49 -0600 |
commit | cb2d344c7551c250c07c009639eb81d54235ef8b (patch) | |
tree | 2a9d4c3f3864456ede2fb2da5595f23e211c7db4 /fs/io_uring.c | |
parent | f5c6cf2a310d8de3bd02aa8a217f8ca63df6f236 (diff) | |
download | lwn-cb2d344c7551c250c07c009639eb81d54235ef8b.tar.gz lwn-cb2d344c7551c250c07c009639eb81d54235ef8b.zip |
io_uring: helper for prep+queuing linked timeouts
We try to aggresively inline the submission path, so it's a good idea to
not pollute it with colder code. One of them is linked timeout
preparation + queue, which can be extracted into a function.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ecf74df7ac77389b6d9211211ec4954e91de98ba.1650056133.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index e9f0c89280cd..946906e93d51 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1675,6 +1675,17 @@ static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) return __io_prep_linked_timeout(req); } +static noinline void __io_arm_ltimeout(struct io_kiocb *req) +{ + io_queue_linked_timeout(__io_prep_linked_timeout(req)); +} + +static inline void io_arm_ltimeout(struct io_kiocb *req) +{ + if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) + __io_arm_ltimeout(req); +} + static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -7301,7 +7312,6 @@ static void io_wq_submit_work(struct io_wq_work *work) const struct io_op_def *def = &io_op_defs[req->opcode]; unsigned int issue_flags = IO_URING_F_UNLOCKED; bool needs_poll = false; - struct io_kiocb *timeout; int ret = 0, err = -ECANCELED; /* one will be dropped by ->io_free_work() after returning to io-wq */ @@ -7310,10 +7320,7 @@ static void io_wq_submit_work(struct io_wq_work *work) else req_ref_get(req); - timeout = io_prep_linked_timeout(req); - if (timeout) - io_queue_linked_timeout(timeout); - + io_arm_ltimeout(req); /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ if (work->flags & IO_WQ_WORK_CANCEL) { @@ -7528,7 +7535,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) static inline void __io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - struct io_kiocb *linked_timeout; int ret; ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); @@ -7542,9 +7548,7 @@ static inline void __io_queue_sqe(struct io_kiocb *req) * doesn't support non-blocking read/write attempts */ if (likely(!ret)) { - linked_timeout = io_prep_linked_timeout(req); - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); + io_arm_ltimeout(req); } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { io_queue_sqe_arm_apoll(req); } else { |