summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2018-11-02 09:39:49 +0800
committerDavid Sterba <dsterba@suse.com>2018-12-17 14:51:26 +0100
commite1a4184815672c0cd8ea91beca3dd6d0bd4b9e53 (patch)
tree48c3b390097ea2823553ac1d6e7bd759dc7a9c96 /fs/btrfs/extent-tree.c
parentd06e3bb69009d21802776b8751dfe5deef7102f5 (diff)
downloadlwn-e1a4184815672c0cd8ea91beca3dd6d0bd4b9e53.tar.gz
lwn-e1a4184815672c0cd8ea91beca3dd6d0bd4b9e53.zip
btrfs: Refactor unclustered extent allocation into find_free_extent_unclustered()
This patch will extract unclsutered extent allocation code into find_free_extent_unclustered(). And this helper function will use return value to indicate what to do next. This should make find_free_extent() a little easier to read. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: Su Yue <suy.fnst@cn.fujitsu.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> [Update merge conflict with fb5c39d7a887 ("btrfs: don't use ctl->free_space for max_extent_size")] Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c115
1 files changed, 69 insertions, 46 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a99205e819e7..215937f51cc5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7416,6 +7416,70 @@ refill_cluster:
}
/*
+ * Return >0 to inform caller that we find nothing
+ * Return 0 when we found an free extent and set ffe_ctrl->found_offset
+ * Return -EAGAIN to inform caller that we need to re-search this block group
+ */
+static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
+ struct btrfs_free_cluster *last_ptr,
+ struct find_free_extent_ctl *ffe_ctl)
+{
+ u64 offset;
+
+ /*
+ * We are doing an unclustered allocation, set the fragmented flag so
+ * we don't bother trying to setup a cluster again until we get more
+ * space.
+ */
+ if (unlikely(last_ptr)) {
+ spin_lock(&last_ptr->lock);
+ last_ptr->fragmented = 1;
+ spin_unlock(&last_ptr->lock);
+ }
+ if (ffe_ctl->cached) {
+ struct btrfs_free_space_ctl *free_space_ctl;
+
+ free_space_ctl = bg->free_space_ctl;
+ spin_lock(&free_space_ctl->tree_lock);
+ if (free_space_ctl->free_space <
+ ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
+ ffe_ctl->empty_size) {
+ ffe_ctl->total_free_space = max_t(u64,
+ ffe_ctl->total_free_space,
+ free_space_ctl->free_space);
+ spin_unlock(&free_space_ctl->tree_lock);
+ return 1;
+ }
+ spin_unlock(&free_space_ctl->tree_lock);
+ }
+
+ offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
+ ffe_ctl->num_bytes, ffe_ctl->empty_size,
+ &ffe_ctl->max_extent_size);
+
+ /*
+ * If we didn't find a chunk, and we haven't failed on this block group
+ * before, and this block group is in the middle of caching and we are
+ * ok with waiting, then go ahead and wait for progress to be made, and
+ * set @retry_unclustered to true.
+ *
+ * If @retry_unclustered is true then we've already waited on this
+ * block group once and should move on to the next block group.
+ */
+ if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+ ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
+ wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+ ffe_ctl->empty_size);
+ ffe_ctl->retry_unclustered = true;
+ return -EAGAIN;
+ } else if (!offset) {
+ return 1;
+ }
+ ffe_ctl->found_offset = offset;
+ return 0;
+}
+
+/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
* ins->objectid == start position
@@ -7617,54 +7681,13 @@ have_block_group:
/* ret == -ENOENT case falls through */
}
- /*
- * We are doing an unclustered alloc, set the fragmented flag so
- * we don't bother trying to setup a cluster again until we get
- * more space.
- */
- if (unlikely(last_ptr)) {
- spin_lock(&last_ptr->lock);
- last_ptr->fragmented = 1;
- spin_unlock(&last_ptr->lock);
- }
- if (ffe_ctl.cached) {
- struct btrfs_free_space_ctl *ctl =
- block_group->free_space_ctl;
-
- spin_lock(&ctl->tree_lock);
- if (ctl->free_space <
- num_bytes + ffe_ctl.empty_cluster + empty_size) {
- ffe_ctl.total_free_space = max(ctl->free_space,
- ffe_ctl.total_free_space);
- spin_unlock(&ctl->tree_lock);
- goto loop;
- }
- spin_unlock(&ctl->tree_lock);
- }
-
- ffe_ctl.found_offset = btrfs_find_space_for_alloc(block_group,
- ffe_ctl.search_start, num_bytes, empty_size,
- &ffe_ctl.max_extent_size);
- /*
- * If we didn't find a chunk, and we haven't failed on this
- * block group before, and this block group is in the middle of
- * caching and we are ok with waiting, then go ahead and wait
- * for progress to be made, and set ffe_ctl.retry_unclustered to
- * true.
- *
- * If ffe_ctl.retry_unclustered is true then we've already
- * waited on this block group once and should move on to the
- * next block group.
- */
- if (!ffe_ctl.found_offset && !ffe_ctl.retry_unclustered &&
- !ffe_ctl.cached && ffe_ctl.loop > LOOP_CACHING_NOWAIT) {
- wait_block_group_cache_progress(block_group,
- num_bytes + empty_size);
- ffe_ctl.retry_unclustered = true;
+ ret = find_free_extent_unclustered(block_group, last_ptr,
+ &ffe_ctl);
+ if (ret == -EAGAIN)
goto have_block_group;
- } else if (!ffe_ctl.found_offset) {
+ else if (ret > 0)
goto loop;
- }
+ /* ret == 0 case falls through */
checks:
ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
fs_info->stripesize);