summaryrefslogtreecommitdiff
path: root/fs/btrfs/relocation.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2020-03-05 13:48:31 +0800
committerDavid Sterba <dsterba@suse.com>2020-05-25 11:25:17 +0200
commit84780289335fe614057e5ddf796050ce15751f4a (patch)
tree967607b3f529b05c13de125163cc028a7f4a9c02 /fs/btrfs/relocation.c
parent9569cc203d23ddaed7f7f2ca986a7cda7f1c33c0 (diff)
downloadlwn-84780289335fe614057e5ddf796050ce15751f4a.tar.gz
lwn-84780289335fe614057e5ddf796050ce15751f4a.zip
btrfs: reloc: add backref_cache::pending_edge and backref_cache::useless_node
These two new members will act the same as the existing local lists, @useless and @list in build_backref_tree(). Currently build_backref_tree() is only executed serially, thus moving such local list into backref_cache is still safe. Also since we're here, use list_first_entry() to replace a lot of list_entry() calls after !list_empty(). Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/relocation.c')
-rw-r--r--fs/btrfs/relocation.c74
1 files changed, 46 insertions, 28 deletions
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a8b5ea53e962..39294c595c66 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -158,6 +158,12 @@ struct backref_cache {
int nr_nodes;
int nr_edges;
+
+ /* The list of unchecked backref edges during backref cache build */
+ struct list_head pending_edge;
+
+ /* The list of useless backref nodes during backref cache build */
+ struct list_head useless_node;
};
/*
@@ -269,6 +275,8 @@ static void backref_cache_init(struct backref_cache *cache)
INIT_LIST_HEAD(&cache->changed);
INIT_LIST_HEAD(&cache->detached);
INIT_LIST_HEAD(&cache->leaves);
+ INIT_LIST_HEAD(&cache->pending_edge);
+ INIT_LIST_HEAD(&cache->useless_node);
}
static void backref_cache_cleanup(struct backref_cache *cache)
@@ -292,6 +300,8 @@ static void backref_cache_cleanup(struct backref_cache *cache)
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
ASSERT(list_empty(&cache->pending[i]));
+ ASSERT(list_empty(&cache->pending_edge));
+ ASSERT(list_empty(&cache->useless_node));
ASSERT(list_empty(&cache->changed));
ASSERT(list_empty(&cache->detached));
ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
@@ -699,8 +709,6 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
struct backref_node *exist = NULL;
struct backref_edge *edge;
struct rb_node *rb_node;
- LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
- LIST_HEAD(useless);
int cowonly;
int ret;
int err = 0;
@@ -764,7 +772,7 @@ again:
* check its backrefs
*/
if (!exist->checked)
- list_add_tail(&edge->list[UPPER], &list);
+ list_add_tail(&edge->list[UPPER], &cache->pending_edge);
} else {
exist = NULL;
}
@@ -842,7 +850,8 @@ again:
* backrefs for the upper level block isn't
* cached, add the block to pending list
*/
- list_add_tail(&edge->list[UPPER], &list);
+ list_add_tail(&edge->list[UPPER],
+ &cache->pending_edge);
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
@@ -884,7 +893,7 @@ again:
cur->bytenr);
if (should_ignore_root(root)) {
btrfs_put_root(root);
- list_add(&cur->list, &useless);
+ list_add(&cur->list, &cache->useless_node);
} else {
cur->root = root;
}
@@ -930,7 +939,8 @@ again:
lower->bytenr);
if (should_ignore_root(root)) {
btrfs_put_root(root);
- list_add(&lower->list, &useless);
+ list_add(&lower->list,
+ &cache->useless_node);
} else {
lower->root = root;
}
@@ -979,7 +989,7 @@ again:
if (!upper->checked && need_check) {
need_check = false;
list_add_tail(&edge->list[UPPER],
- &list);
+ &cache->pending_edge);
} else {
if (upper->checked)
need_check = true;
@@ -1017,8 +1027,9 @@ again:
WARN_ON(exist);
/* the pending list isn't empty, take the first block to process */
- if (!list_empty(&list)) {
- edge = list_entry(list.next, struct backref_edge, list[UPPER]);
+ if (!list_empty(&cache->pending_edge)) {
+ edge = list_first_entry(&cache->pending_edge,
+ struct backref_edge, list[UPPER]);
list_del_init(&edge->list[UPPER]);
cur = edge->node[UPPER];
goto again;
@@ -1039,10 +1050,11 @@ again:
}
list_for_each_entry(edge, &node->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
+ list_add_tail(&edge->list[UPPER], &cache->pending_edge);
- while (!list_empty(&list)) {
- edge = list_entry(list.next, struct backref_edge, list[UPPER]);
+ while (!list_empty(&cache->pending_edge)) {
+ edge = list_first_entry(&cache->pending_edge,
+ struct backref_edge, list[UPPER]);
list_del_init(&edge->list[UPPER]);
upper = edge->node[UPPER];
if (upper->detached) {
@@ -1050,7 +1062,7 @@ again:
lower = edge->node[LOWER];
free_backref_edge(cache, edge);
if (list_empty(&lower->upper))
- list_add(&lower->list, &useless);
+ list_add(&lower->list, &cache->useless_node);
continue;
}
@@ -1090,7 +1102,7 @@ again:
list_add_tail(&edge->list[UPPER], &upper->lower);
list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
+ list_add_tail(&edge->list[UPPER], &cache->pending_edge);
}
/*
* process useless backref nodes. backref nodes for tree leaves
@@ -1098,8 +1110,9 @@ again:
* tree blocks are left in the cache to avoid unnecessary backref
* lookup.
*/
- while (!list_empty(&useless)) {
- upper = list_entry(useless.next, struct backref_node, list);
+ while (!list_empty(&cache->useless_node)) {
+ upper = list_first_entry(&cache->useless_node,
+ struct backref_node, list);
list_del_init(&upper->list);
ASSERT(list_empty(&upper->upper));
if (upper == node)
@@ -1109,7 +1122,7 @@ again:
upper->lowest = 0;
}
while (!list_empty(&upper->lower)) {
- edge = list_entry(upper->lower.next,
+ edge = list_first_entry(&upper->lower,
struct backref_edge, list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
@@ -1117,7 +1130,7 @@ again:
free_backref_edge(cache, edge);
if (list_empty(&lower->upper))
- list_add(&lower->list, &useless);
+ list_add(&lower->list, &cache->useless_node);
}
mark_block_processed(rc, upper);
if (upper->level > 0) {
@@ -1132,14 +1145,14 @@ out:
btrfs_backref_iter_free(iter);
btrfs_free_path(path);
if (err) {
- while (!list_empty(&useless)) {
- lower = list_entry(useless.next,
+ while (!list_empty(&cache->useless_node)) {
+ lower = list_first_entry(&cache->useless_node,
struct backref_node, list);
list_del_init(&lower->list);
}
- while (!list_empty(&list)) {
- edge = list_first_entry(&list, struct backref_edge,
- list[UPPER]);
+ while (!list_empty(&cache->pending_edge)) {
+ edge = list_first_entry(&cache->pending_edge,
+ struct backref_edge, list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
@@ -1152,20 +1165,21 @@ out:
*/
if (list_empty(&lower->upper) &&
RB_EMPTY_NODE(&lower->rb_node))
- list_add(&lower->list, &useless);
+ list_add(&lower->list, &cache->useless_node);
if (!RB_EMPTY_NODE(&upper->rb_node))
continue;
/* Add this guy's upper edges to the list to process */
list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
+ list_add_tail(&edge->list[UPPER],
+ &cache->pending_edge);
if (list_empty(&upper->upper))
- list_add(&upper->list, &useless);
+ list_add(&upper->list, &cache->useless_node);
}
- while (!list_empty(&useless)) {
- lower = list_entry(useless.next,
+ while (!list_empty(&cache->useless_node)) {
+ lower = list_first_entry(&cache->useless_node,
struct backref_node, list);
list_del_init(&lower->list);
if (lower == node)
@@ -1174,9 +1188,13 @@ out:
}
remove_backref_node(cache, node);
+ ASSERT(list_empty(&cache->useless_node) &&
+ list_empty(&cache->pending_edge));
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
+ ASSERT(list_empty(&cache->useless_node) &&
+ list_empty(&cache->pending_edge));
return node;
}