diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-02-24 13:28:27 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-04-11 17:41:58 -0600 |
commit | de9b4ccad750f216616730b74ed2be16c80892a4 (patch) | |
tree | 3a5f36e344e473052bf6c757d1a9d87b719b1955 /fs/io_uring.c | |
parent | 179ae0d15e8b3a2d9affe680281009f1f10c4a9d (diff) | |
download | lwn-de9b4ccad750f216616730b74ed2be16c80892a4.tar.gz lwn-de9b4ccad750f216616730b74ed2be16c80892a4.zip |
io_uring: wrap io_kiocb reference count manipulation in helpers
No functional changes in this patch, just in preparation for handling the
references a bit more efficiently.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 55 |
1 files changed, 40 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 3e102acb311a..e37d5e769afe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1470,6 +1470,31 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, return ret; } +static inline bool req_ref_inc_not_zero(struct io_kiocb *req) +{ + return refcount_inc_not_zero(&req->refs); +} + +static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs) +{ + return refcount_sub_and_test(refs, &req->refs); +} + +static inline bool req_ref_put_and_test(struct io_kiocb *req) +{ + return refcount_dec_and_test(&req->refs); +} + +static inline void req_ref_put(struct io_kiocb *req) +{ + refcount_dec(&req->refs); +} + +static inline void req_ref_get(struct io_kiocb *req) +{ + refcount_inc(&req->refs); +} + static void __io_cqring_fill_event(struct io_kiocb *req, long res, unsigned int cflags) { @@ -1506,7 +1531,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, io_clean_op(req); req->result = res; req->compl.cflags = cflags; - refcount_inc(&req->refs); + req_ref_get(req); list_add_tail(&req->compl.list, &ctx->cq_overflow_list); } } @@ -1528,7 +1553,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, * If we're the last reference to this request, add to our locked * free_list cache. */ - if (refcount_dec_and_test(&req->refs)) { + if (req_ref_put_and_test(req)) { struct io_comp_state *cs = &ctx->submit_state.comp; if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { @@ -2108,7 +2133,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs, req = cs->reqs[i]; /* submission and completion refs */ - if (refcount_sub_and_test(2, &req->refs)) + if (req_ref_sub_and_test(req, 2)) io_req_free_batch(&rb, req, &ctx->submit_state); } @@ -2124,7 +2149,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt = NULL; - if (refcount_dec_and_test(&req->refs)) { + if (req_ref_put_and_test(req)) { nxt = io_req_find_next(req); __io_free_req(req); } @@ -2133,7 +2158,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) static void io_put_req(struct io_kiocb *req) { - if (refcount_dec_and_test(&req->refs)) + if (req_ref_put_and_test(req)) io_free_req(req); } @@ -2156,14 +2181,14 @@ static void io_free_req_deferred(struct io_kiocb *req) static inline void io_put_req_deferred(struct io_kiocb *req, int refs) { - if (refcount_sub_and_test(refs, &req->refs)) + if (req_ref_sub_and_test(req, refs)) io_free_req_deferred(req); } static void io_double_put_req(struct io_kiocb *req) { /* drop both submit and complete references */ - if (refcount_sub_and_test(2, &req->refs)) + if (req_ref_sub_and_test(req, 2)) io_free_req(req); } @@ -2249,7 +2274,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, __io_cqring_fill_event(req, req->result, cflags); (*nr_events)++; - if (refcount_dec_and_test(&req->refs)) + if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); } @@ -2464,7 +2489,7 @@ static bool io_rw_reissue(struct io_kiocb *req) lockdep_assert_held(&req->ctx->uring_lock); if (io_resubmit_prep(req)) { - refcount_inc(&req->refs); + req_ref_get(req); io_queue_async_work(req); return true; } @@ -3169,7 +3194,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, list_del_init(&wait->entry); /* submit ref gets dropped, acquire a new one */ - refcount_inc(&req->refs); + req_ref_get(req); io_req_task_queue(req); return 1; } @@ -4893,7 +4918,7 @@ static void io_poll_remove_double(struct io_kiocb *req) spin_lock(&head->lock); list_del_init(&poll->wait.entry); if (poll->wait.private) - refcount_dec(&req->refs); + req_ref_put(req); poll->head = NULL; spin_unlock(&head->lock); } @@ -4959,7 +4984,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, poll->wait.func(&poll->wait, mode, sync, key); } } - refcount_dec(&req->refs); + req_ref_put(req); return 1; } @@ -5002,7 +5027,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, return; } io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); - refcount_inc(&req->refs); + req_ref_get(req); poll->wait.private = req; *poll_ptr = poll; } @@ -6142,7 +6167,7 @@ static void io_wq_submit_work(struct io_wq_work *work) /* avoid locking problems by failing it from a clean context */ if (ret) { /* io-wq is going to take one down */ - refcount_inc(&req->refs); + req_ref_get(req); io_req_task_queue_fail(req, ret); } } @@ -6200,7 +6225,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) * We don't expect the list to be empty, that will only happen if we * race with the completion of the linked work. */ - if (prev && refcount_inc_not_zero(&prev->refs)) + if (prev && req_ref_inc_not_zero(prev)) io_remove_next_linked(prev); else prev = NULL; |