summaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-08-03 14:33:30 +0800
committerDavid Sterba <dsterba@suse.com>2023-08-21 14:54:48 +0200
commit3c771c194402ffe20d4de68d9fc21e703179a9ce (patch)
tree8f6ce7f754a27556b44a18a536b02f73a7b1a36c /fs/btrfs/scrub.c
parent1dc4888e725dc748b82858984f2a5bd41efc5201 (diff)
downloadlwn-3c771c194402ffe20d4de68d9fc21e703179a9ce.tar.gz
lwn-3c771c194402ffe20d4de68d9fc21e703179a9ce.zip
btrfs: scrub: avoid unnecessary csum tree search preparing stripes
One of the bottleneck of the new scrub code is the extra csum tree search. The old code would only do the csum tree search for each scrub bio, which can be as large as 512KiB, thus they can afford to allocate a new path each time. But the new scrub code is doing csum tree search for each stripe, which is only 64KiB, this means we'd better re-use the same csum path during each search. This patch would introduce a per-sctx path for csum tree search, as we don't need to re-allocate the path every time we need to do a csum tree search. With this change we can further improve the queue depth and improve the scrub read performance: Before (with regression and cached extent tree path): Device r/s rkB/s rrqm/s %rrqm r_await rareq-sz aqu-sz %util nvme0n1p3 15875.00 1013328.00 12.00 0.08 0.08 63.83 1.35 100.00 After (with both cached extent/csum tree path): nvme0n1p3 17759.00 1133280.00 10.00 0.06 0.08 63.81 1.50 100.00 Fixes: e02ee89baa66 ("btrfs: scrub: switch scrub_simple_mirror() to scrub_stripe infrastructure") CC: stable@vger.kernel.org # 6.4+ Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6aabce141d65..d04fd3c6e927 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -176,6 +176,7 @@ struct scrub_ctx {
struct scrub_stripe *raid56_data_stripes;
struct btrfs_fs_info *fs_info;
struct btrfs_path extent_path;
+ struct btrfs_path csum_path;
int first_free;
int cur_stripe;
atomic_t cancel_req;
@@ -342,6 +343,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
sctx->fs_info = fs_info;
sctx->extent_path.search_commit_root = 1;
sctx->extent_path.skip_locking = 1;
+ sctx->csum_path.search_commit_root = 1;
+ sctx->csum_path.skip_locking = 1;
for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
int ret;
@@ -1471,6 +1474,7 @@ static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
*/
static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
struct btrfs_path *extent_path,
+ struct btrfs_path *csum_path,
struct btrfs_device *dev, u64 physical,
int mirror_num, u64 logical_start,
u32 logical_len,
@@ -1562,9 +1566,9 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
*/
ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
- ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical,
- stripe_end, stripe->csums,
- &csum_bitmap, true);
+ ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
+ stripe->logical, stripe_end,
+ stripe->csums, &csum_bitmap);
if (ret < 0)
goto out;
if (ret > 0)
@@ -1766,9 +1770,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
/* We can queue one stripe using the remaining slot. */
scrub_reset_stripe(stripe);
- ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, dev,
- physical, mirror_num, logical,
- length, stripe);
+ ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+ &sctx->csum_path, dev, physical,
+ mirror_num, logical, length, stripe);
/* Either >0 as no more extents or <0 for error. */
if (ret)
return ret;
@@ -1787,6 +1791,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
struct btrfs_raid_bio *rbio;
struct btrfs_io_context *bioc = NULL;
struct btrfs_path extent_path = { 0 };
+ struct btrfs_path csum_path = { 0 };
struct bio *bio;
struct scrub_stripe *stripe;
bool all_empty = true;
@@ -1798,12 +1803,14 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
ASSERT(sctx->raid56_data_stripes);
/*
- * For data stripe search, we cannot re-use the same extent path, as
- * the data stripe bytenr may be smaller than previous extent. Thus we
- * have to use our own extent path.
+ * For data stripe search, we cannot re-use the same extent/csum paths,
+ * as the data stripe bytenr may be smaller than previous extent. Thus
+ * we have to use our own extent/csum paths.
*/
extent_path.search_commit_root = 1;
extent_path.skip_locking = 1;
+ csum_path.search_commit_root = 1;
+ csum_path.skip_locking = 1;
for (int i = 0; i < data_stripes; i++) {
int stripe_index;
@@ -1819,7 +1826,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
scrub_reset_stripe(stripe);
set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
- ret = scrub_find_fill_first_stripe(bg, &extent_path,
+ ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
map->stripes[stripe_index].dev, physical, 1,
full_stripe_start + btrfs_stripe_nr_to_offset(i),
BTRFS_STRIPE_LEN, stripe);
@@ -1948,6 +1955,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
btrfs_bio_counter_dec(fs_info);
btrfs_release_path(&extent_path);
+ btrfs_release_path(&csum_path);
out:
return ret;
}
@@ -2237,6 +2245,7 @@ out:
if (!ret)
ret = ret2;
btrfs_release_path(&sctx->extent_path);
+ btrfs_release_path(&sctx->csum_path);
if (sctx->raid56_data_stripes) {
for (int i = 0; i < nr_data_stripes(map); i++)