summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-09-24 21:59:51 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:53 -0600
commitb3fa03fd1b17f557e2c43736440feed66fb86741 (patch)
tree9aa3acc5cf662945a9755ac699285189e5a41c94 /fs/io_uring.c
parent3aa83bfb6e5c3a89d0ec67e598ee04bfc1425b13 (diff)
downloadlwn-b3fa03fd1b17f557e2c43736440feed66fb86741.tar.gz
lwn-b3fa03fd1b17f557e2c43736440feed66fb86741.zip
io_uring: convert iopoll_completed to store_release
Convert explicit barrier around iopoll_completed to smp_load_acquire() and smp_store_release(). Similar on the callback side, but replaces a single smp_rmb() with per-request smp_load_acquire(), neither imply any extra CPU ordering for x86. Use READ_ONCE as usual where it doesn't matter. Use it to move filling CQEs by iopoll earlier, that will be necessary to avoid traversing the list one extra time in the future. Suggested-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/8bd663cb15efdc72d6247c38ee810964e744a450.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index df122cdfc85d..fb122c2b69f2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2429,17 +2429,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done)
struct req_batch rb;
struct io_kiocb *req;
- /* order with ->result store in io_complete_rw_iopoll() */
- smp_rmb();
-
io_init_req_batch(&rb);
while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- __io_cqring_fill_event(ctx, req->user_data, req->result,
- io_put_rw_kbuf(req));
-
if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state);
}
@@ -2498,8 +2492,12 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each_resume(pos, prev) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
- if (!READ_ONCE(req->iopoll_completed))
+ /* order with io_complete_rw_iopoll(), e.g. ->result updates */
+ if (!smp_load_acquire(&req->iopoll_completed))
break;
+ __io_cqring_fill_event(ctx, req->user_data, req->result,
+ io_put_rw_kbuf(req));
+
list_add_tail(&req->inflight_entry, &done);
nr_events++;
}
@@ -2712,10 +2710,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
}
}
- WRITE_ONCE(req->result, res);
- /* order with io_iopoll_complete() checking ->result */
- smp_wmb();
- WRITE_ONCE(req->iopoll_completed, 1);
+ req->result = res;
+ /* order with io_iopoll_complete() checking ->iopoll_completed */
+ smp_store_release(&req->iopoll_completed, 1);
}
/*