summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-08-15 10:40:18 +0100
committerJens Axboe <axboe@kernel.dk>2021-08-23 13:10:37 -0600
commit48dcd38d73c22b22bf9dc1c01b0ca0b8414b31da (patch)
tree03f30a7dfe5404cf3844eaf7afe1a3a8a18fc9da /fs/io_uring.c
parenta141dd896f544df9627502cfb3fc1a73fb6587e4 (diff)
downloadlwn-48dcd38d73c22b22bf9dc1c01b0ca0b8414b31da.tar.gz
lwn-48dcd38d73c22b22bf9dc1c01b0ca0b8414b31da.zip
io_uring: optimise iowq refcounting
If a requests is forwarded into io-wq, there is a good chance it hasn't been refcounted yet and we can save one req_ref_get() by setting the refcount number to the right value directly. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/2d53f4449faaf73b4a4c5de667fc3c176d974860.1628981736.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e1ca427e183a..86466e12c74d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1115,14 +1115,19 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
-static inline void io_req_refcount(struct io_kiocb *req)
+static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {
req->flags |= REQ_F_REFCOUNT;
- atomic_set(&req->refs, 1);
+ atomic_set(&req->refs, nr);
}
}
+static inline void io_req_set_refcount(struct io_kiocb *req)
+{
+ __io_req_set_refcount(req, 1);
+}
+
static inline void io_req_set_rsrc_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1306,8 +1311,8 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
return NULL;
/* linked timeouts should have two refs once prep'ed */
- io_req_refcount(req);
- io_req_refcount(nxt);
+ io_req_set_refcount(req);
+ io_req_set_refcount(nxt);
req_ref_get(nxt);
nxt->timeout.head = req;
@@ -5233,7 +5238,7 @@ static int io_arm_poll_handler(struct io_kiocb *req)
req->apoll = apoll;
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
- io_req_refcount(req);
+ io_req_set_refcount(req);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
@@ -5421,7 +5426,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
- io_req_refcount(req);
+ io_req_set_refcount(req);
poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
@@ -6313,9 +6318,11 @@ static void io_wq_submit_work(struct io_wq_work *work)
struct io_kiocb *timeout;
int ret = 0;
- io_req_refcount(req);
- /* will be dropped by ->io_free_work() after returning to io-wq */
- req_ref_get(req);
+ /* one will be dropped by ->io_free_work() after returning to io-wq */
+ if (!(req->flags & REQ_F_REFCOUNT))
+ __io_req_set_refcount(req, 2);
+ else
+ req_ref_get(req);
timeout = io_prep_linked_timeout(req);
if (timeout)