summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-29 02:47:33 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:04 -0400
commit9f311f2166eb969dbe3d69ab24cd78567a30d62c (patch)
treea1e041bda4ca7766691ef933d9bfa81f6344bc2d /fs/bcachefs/io.c
parentaf171183194f73cca9a2f44ba13907ecc9c761a9 (diff)
downloadlwn-9f311f2166eb969dbe3d69ab24cd78567a30d62c.tar.gz
lwn-9f311f2166eb969dbe3d69ab24cd78567a30d62c.zip
bcachefs: Don't use bch_write_op->cl for delivering completions
We already had op->end_io as an alternative mechanism to op->cl.parent for delivering write completions; this switches all code paths to using op->end_io. Two reasons: - op->end_io is more efficient, due to fewer atomic ops, this completes the conversion that was originally only done for the direct IO path. - We'll be restructing the write path to use a different mechanism for punting to process context, refactoring to not use op->cl will make that easier. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/io.c')
-rw-r--r--fs/bcachefs/io.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 1b457e90a172..4424129cad46 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -558,13 +558,9 @@ static void bch2_write_done(struct closure *cl)
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- if (op->end_io) {
- EBUG_ON(cl->parent);
- closure_debug_destroy(cl);
- op->end_io(op);
- } else {
- closure_return(cl);
- }
+ EBUG_ON(cl->parent);
+ closure_debug_destroy(cl);
+ op->end_io(op);
}
/**
@@ -1357,7 +1353,6 @@ err:
/* Cache promotion on read */
struct promote_op {
- struct closure cl;
struct rcu_head rcu;
u64 start_time;
@@ -1411,10 +1406,10 @@ static void promote_free(struct bch_fs *c, struct promote_op *op)
kfree_rcu(op, rcu);
}
-static void promote_done(struct closure *cl)
+static void promote_done(struct bch_write_op *wop)
{
struct promote_op *op =
- container_of(cl, struct promote_op, cl);
+ container_of(wop, struct promote_op, write.op);
struct bch_fs *c = op->write.op.c;
bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
@@ -1427,7 +1422,6 @@ static void promote_done(struct closure *cl)
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{
struct bch_fs *c = rbio->c;
- struct closure *cl = &op->cl;
struct bio *bio = &op->write.op.wbio.bio;
trace_promote(&rbio->bio);
@@ -1442,9 +1436,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
bch2_migrate_read_done(&op->write, rbio);
- closure_init(cl, NULL);
- closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
- closure_return_with_destructor(cl, promote_done);
+ closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, NULL);
}
static struct promote_op *__promote_alloc(struct bch_fs *c,
@@ -1509,6 +1501,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
},
btree_id, k);
BUG_ON(ret);
+ op->write.op.end_io = promote_done;
return op;
err: