diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-03 00:29:43 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:45 -0400 |
commit | a1ee777bfcceeb916d837321144c782e12082588 (patch) | |
tree | d83ecba10afd8dbb6a9ded3dcf1c9956ab8caed3 /fs/bcachefs/io.c | |
parent | 03e83f630223261978e23875299e87ae61403548 (diff) | |
download | lwn-a1ee777bfcceeb916d837321144c782e12082588.tar.gz lwn-a1ee777bfcceeb916d837321144c782e12082588.zip |
bcachefs: Kill BCH_WRITE_FLUSH
BCH_WRITE_FLUSH is a write flag that causes a journal flush. It's only
used in the direct IO path, and this will allow for some consolidation
with the regular fsync path, which will help with the upcoming nocow
mode.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/io.c')
-rw-r--r-- | fs/bcachefs/io.c | 44 |
1 files changed, 9 insertions, 35 deletions
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index e754f57c1342..701bfc8ce0e4 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -273,7 +273,6 @@ int bch2_extent_update(struct btree_trans *trans, struct btree_iter *iter, struct bkey_i *k, struct disk_reservation *disk_res, - u64 *journal_seq, u64 new_i_size, s64 *i_sectors_delta_total, bool check_enospc) @@ -374,7 +373,7 @@ int bch2_extent_update(struct btree_trans *trans, } ret = bch2_trans_update(trans, iter, k, 0) ?: - bch2_trans_commit(trans, disk_res, journal_seq, + bch2_trans_commit(trans, disk_res, NULL, BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL); err: @@ -438,8 +437,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, bch2_cut_back(end_pos, &delete); ret = bch2_extent_update(trans, inum, iter, &delete, - &disk_res, NULL, - 0, i_sectors_delta, false); + &disk_res, 0, i_sectors_delta, false); bch2_disk_reservation_put(c, &disk_res); } @@ -507,7 +505,7 @@ static int bch2_write_index_default(struct bch_write_op *op) BTREE_ITER_SLOTS|BTREE_ITER_INTENT); ret = bch2_extent_update(&trans, inum, &iter, sk.k, - &op->res, &op->journal_seq, + &op->res, op->new_i_size, &op->i_sectors_delta, op->flags & BCH_WRITE_CHECK_ENOSPC); bch2_trans_iter_exit(&trans, &iter); @@ -596,14 +594,11 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, static void __bch2_write(struct bch_write_op *); -static void __bch2_write_done(struct closure *cl) +static void bch2_write_done(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); struct bch_fs *c = op->c; - if (!op->error && (op->flags & BCH_WRITE_FLUSH)) - op->error = bch2_journal_error(&c->journal); - bch2_disk_reservation_put(c, &op->res); percpu_ref_put(&c->writes); bch2_keylist_free(&op->insert_keys, op->inline_keys); @@ -616,21 +611,6 @@ static void __bch2_write_done(struct closure *cl) op->end_io(op); } -static __always_inline void bch2_write_done(struct bch_write_op *op) -{ - if (likely(!(op->flags & BCH_WRITE_FLUSH) || op->error)) { - __bch2_write_done(&op->cl); - } else if (!(op->flags & BCH_WRITE_SYNC)) { - bch2_journal_flush_seq_async(&op->c->journal, - op->journal_seq, - &op->cl); - continue_at(&op->cl, __bch2_write_done, index_update_wq(op)); - } else { - bch2_journal_flush_seq(&op->c->journal, op->journal_seq); - __bch2_write_done(&op->cl); - } -} - static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op) { struct keylist *keys = &op->insert_keys; @@ -789,16 +769,10 @@ unlock: __bch2_write_index(op); - if (!(op->flags & BCH_WRITE_DONE)) { + if (!(op->flags & BCH_WRITE_DONE)) __bch2_write(op); - } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) { - bch2_journal_flush_seq_async(&op->c->journal, - op->journal_seq, - &op->cl); - continue_at(&op->cl, __bch2_write_done, index_update_wq(op)); - } else { - __bch2_write_done(&op->cl); - } + else + bch2_write_done(&op->cl); } } @@ -1347,7 +1321,7 @@ err: if (!(op->flags & BCH_WRITE_DONE)) goto again; - bch2_write_done(op); + bch2_write_done(&op->cl); } else { continue_at(&op->cl, bch2_write_index, NULL); } @@ -1395,7 +1369,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len) __bch2_write_index(op); err: - bch2_write_done(op); + bch2_write_done(&op->cl); } /** |