summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorBijan Mottahedeh <bijan.mottahedeh@oracle.com>2021-01-15 17:37:47 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-01 10:02:42 -0700
commit2a63b2d9c30b2029892c368d11ede1434de6c565 (patch)
tree80e378311f27323e6e9e8345648990da64284330 /fs/io_uring.c
parentd67d2263fb2350a68074f2cb4dd78549aeebbfae (diff)
downloadlwn-2a63b2d9c30b2029892c368d11ede1434de6c565.tar.gz
lwn-2a63b2d9c30b2029892c368d11ede1434de6c565.zip
io_uring: add rsrc_ref locking routines
Encapsulate resource reference locking into separate routines. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fcc7a3ed800a..a129192c20d3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -7350,14 +7350,24 @@ static void io_rsrc_ref_kill(struct percpu_ref *ref)
complete(&data->done);
}
+static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
+{
+ spin_lock_bh(&ctx->rsrc_ref_lock);
+}
+
+static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
+{
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
+}
+
static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
struct fixed_rsrc_data *rsrc_data,
struct fixed_rsrc_ref_node *ref_node)
{
- spin_lock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_lock(ctx);
rsrc_data->node = ref_node;
list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
- spin_unlock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_unlock(ctx);
percpu_ref_get(&rsrc_data->refs);
}
@@ -7374,9 +7384,9 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
if (!backup_node)
return -ENOMEM;
- spin_lock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_lock(ctx);
ref_node = data->node;
- spin_unlock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_unlock(ctx);
if (ref_node)
percpu_ref_kill(&ref_node->refs);
@@ -7766,7 +7776,7 @@ static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
data = ref_node->rsrc_data;
ctx = data->ctx;
- spin_lock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_lock(ctx);
ref_node->done = true;
while (!list_empty(&ctx->rsrc_ref_list)) {
@@ -7778,7 +7788,7 @@ static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
list_del(&ref_node->node);
first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
}
- spin_unlock_bh(&ctx->rsrc_ref_lock);
+ io_rsrc_ref_unlock(ctx);
if (percpu_ref_is_dying(&data->refs))
delay = 0;