diff options
author | Jeff Mahoney <jeffm@suse.com> | 2016-06-22 18:54:23 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2016-12-06 16:06:59 +0100 |
commit | 0b246afa62b0cf5b09d078121f543135f28492ad (patch) | |
tree | 200ad296d09f1b2f5329658c8de81bc625007ace /fs/btrfs/extent-tree.c | |
parent | 6202df6921494f29308307e0ae6f567c2ab2ba19 (diff) | |
download | lwn-0b246afa62b0cf5b09d078121f543135f28492ad.tar.gz lwn-0b246afa62b0cf5b09d078121f543135f28492ad.zip |
btrfs: root->fs_info cleanup, add fs_info convenience variables
In routines where someptr->fs_info is referenced multiple times, we
introduce a convenience variable. This makes the code considerably
more readable.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 751 |
1 files changed, 393 insertions, 358 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2e395d46ba9c..03512c6f4964 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -226,10 +226,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, static int add_excluded_extent(struct btrfs_root *root, u64 start, u64 num_bytes) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 end = start + num_bytes - 1; - set_extent_bits(&root->fs_info->freed_extents[0], + set_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_UPTODATE); - set_extent_bits(&root->fs_info->freed_extents[1], + set_extent_bits(&fs_info->freed_extents[1], start, end, EXTENT_UPTODATE); return 0; } @@ -237,20 +238,22 @@ static int add_excluded_extent(struct btrfs_root *root, static void free_excluded_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 start, end; start = cache->key.objectid; end = start + cache->key.offset - 1; - clear_extent_bits(&root->fs_info->freed_extents[0], + clear_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_UPTODATE); - clear_extent_bits(&root->fs_info->freed_extents[1], + clear_extent_bits(&fs_info->freed_extents[1], start, end, EXTENT_UPTODATE); } static int exclude_super_stripes(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 *logical; int stripe_len; @@ -267,7 +270,7 @@ static int exclude_super_stripes(struct btrfs_root *root, for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(root->fs_info, cache->key.objectid, + ret = btrfs_rmap_block(fs_info, cache->key.objectid, bytenr, 0, &logical, &nr, &stripe_len); if (ret) return ret; @@ -332,10 +335,12 @@ static void put_caching_control(struct btrfs_caching_control *ctl) static void fragment_free_space(struct btrfs_root *root, struct btrfs_block_group_cache *block_group) { + struct btrfs_fs_info *fs_info = root->fs_info; + u64 start = block_group->key.objectid; u64 len = block_group->key.offset; u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? - root->fs_info->nodesize : root->fs_info->sectorsize; + fs_info->nodesize : fs_info->sectorsize; u64 step = chunk << 1; while (len > chunk) { @@ -394,9 +399,9 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group, static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) { - struct btrfs_block_group_cache *block_group; - struct btrfs_fs_info *fs_info; - struct btrfs_root *extent_root; + struct btrfs_block_group_cache *block_group = caching_ctl->block_group; + struct btrfs_fs_info *fs_info = block_group->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; @@ -406,10 +411,6 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) int ret; bool wakeup = true; - block_group = caching_ctl->block_group; - fs_info = block_group->fs_info; - extent_root = fs_info->extent_root; - path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -780,6 +781,7 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) /* simple helper to search for an existing data extent at a given offset */ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; struct btrfs_key key; struct btrfs_path *path; @@ -791,8 +793,7 @@ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len) key.objectid = start; key.offset = len; key.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, - 0, 0); + ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); btrfs_free_path(path); return ret; } @@ -810,6 +811,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 offset, int metadata, u64 *refs, u64 *flags) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_path *path; @@ -825,8 +827,8 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, * If we don't have skinny metadata, don't bother doing anything * different */ - if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) { - offset = root->fs_info->nodesize; + if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { + offset = fs_info->nodesize; metadata = 0; } @@ -847,8 +849,7 @@ search_again: else key.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_search_slot(trans, root->fs_info->extent_root, - &key, path, 0, 0); + ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out_free; @@ -859,7 +860,7 @@ search_again: path->slots[0]); if (key.objectid == bytenr && key.type == BTRFS_EXTENT_ITEM_KEY && - key.offset == root->fs_info->nodesize) + key.offset == fs_info->nodesize) ret = 0; } } @@ -1540,6 +1541,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, u64 parent, u64 root_objectid, u64 owner, u64 offset, int insert) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_item *ei; @@ -1553,8 +1555,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, int want; int ret; int err = 0; - bool skinny_metadata = btrfs_fs_incompat(root->fs_info, - SKINNY_METADATA); + bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; @@ -2025,6 +2026,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 *actual_bytes) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; u64 discarded_bytes = 0; struct btrfs_bio *bbio = NULL; @@ -2034,10 +2036,10 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, * Avoid races with device replace and make sure our bbio has devices * associated to its stripes that don't go away while we are discarding. */ - btrfs_bio_counter_inc_blocked(root->fs_info); + btrfs_bio_counter_inc_blocked(fs_info); /* Tell the block device(s) that the sectors can be discarded */ - ret = btrfs_map_block(root->fs_info, BTRFS_MAP_DISCARD, - bytenr, &num_bytes, &bbio, 0); + ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes, + &bbio, 0); /* Error condition is -ENOMEM */ if (!ret) { struct btrfs_bio_stripe *stripe = bbio->stripes; @@ -2067,7 +2069,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, } btrfs_put_bbio(bbio); } - btrfs_bio_counter_dec(root->fs_info); + btrfs_bio_counter_dec(fs_info); if (actual_bytes) *actual_bytes = discarded_bytes; @@ -2154,7 +2156,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, path->reada = READA_FORWARD; path->leave_spinning = 1; /* now insert the actual backref */ - ret = insert_extent_backref(trans, root->fs_info->extent_root, + ret = insert_extent_backref(trans, fs_info->extent_root, path, bytenr, parent, root_objectid, owner, offset, refs_to_add); if (ret) @@ -2170,6 +2172,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; struct btrfs_delayed_data_ref *ref; struct btrfs_key ins; @@ -2182,7 +2185,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ref = btrfs_delayed_node_to_data_ref(node); - trace_run_delayed_data_ref(root->fs_info, node, ref, node->action); + trace_run_delayed_data_ref(fs_info, node, ref, node->action); if (node->type == BTRFS_SHARED_DATA_REF_KEY) parent = ref->parent; @@ -2234,6 +2237,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key key; struct btrfs_path *path; struct btrfs_extent_item *ei; @@ -2246,7 +2250,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, if (trans->aborted) return 0; - if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) + if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) metadata = 0; path = btrfs_alloc_path(); @@ -2266,8 +2270,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, again: path->reada = READA_FORWARD; path->leave_spinning = 1; - ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, - path, 0, 1); + ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1); if (ret < 0) { err = ret; goto out; @@ -2302,7 +2305,7 @@ again: item_size = btrfs_item_size_nr(leaf, path->slots[0]); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { - ret = convert_extent_item_v0(trans, root->fs_info->extent_root, + ret = convert_extent_item_v0(trans, fs_info->extent_root, path, (u64)-1, 0); if (ret < 0) { err = ret; @@ -2328,16 +2331,16 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; struct btrfs_delayed_tree_ref *ref; struct btrfs_key ins; u64 parent = 0; u64 ref_root = 0; - bool skinny_metadata = btrfs_fs_incompat(root->fs_info, - SKINNY_METADATA); + bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); ref = btrfs_delayed_node_to_tree_ref(node); - trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action); + trace_run_delayed_tree_ref(fs_info, node, ref, node->action); if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) parent = ref->parent; @@ -2388,6 +2391,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; if (trans->aborted) { @@ -2407,22 +2411,20 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, */ BUG_ON(extent_op); head = btrfs_delayed_node_to_head(node); - trace_run_delayed_ref_head(root->fs_info, node, head, - node->action); + trace_run_delayed_ref_head(fs_info, node, head, node->action); if (insert_reserved) { btrfs_pin_extent(root, node->bytenr, node->num_bytes, 1); if (head->is_data) { - ret = btrfs_del_csums(trans, root->fs_info, + ret = btrfs_del_csums(trans, fs_info, node->bytenr, node->num_bytes); } } /* Also free its reserved qgroup space */ - btrfs_qgroup_free_delayed_ref(root->fs_info, - head->qgroup_ref_root, + btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root, head->qgroup_reserved); return ret; } @@ -2748,18 +2750,19 @@ static u64 find_middle(struct rb_root *root) static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 num_bytes; num_bytes = heads * (sizeof(struct btrfs_extent_item) + sizeof(struct btrfs_extent_inline_ref)); - if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) + if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) num_bytes += heads * sizeof(struct btrfs_tree_block_info); /* * We don't ever fill up leaves all the way so multiply by 2 just to be * closer to what we're really going to want to use. */ - return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root->fs_info)); + return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info)); } /* @@ -2768,14 +2771,15 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) */ u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 csum_size; u64 num_csums_per_leaf; u64 num_csums; - csum_size = BTRFS_MAX_ITEM_SIZE(root->fs_info); + csum_size = BTRFS_MAX_ITEM_SIZE(fs_info); num_csums_per_leaf = div64_u64(csum_size, - (u64)btrfs_super_csum_size(root->fs_info->super_copy)); - num_csums = div64_u64(csum_bytes, root->fs_info->sectorsize); + (u64)btrfs_super_csum_size(fs_info->super_copy)); + num_csums = div64_u64(csum_bytes, fs_info->sectorsize); num_csums += num_csums_per_leaf - 1; num_csums = div64_u64(num_csums, num_csums_per_leaf); return num_csums; @@ -2784,6 +2788,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes) int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv; u64 num_heads = trans->transaction->delayed_refs.num_heads_ready; u64 csum_bytes = trans->transaction->delayed_refs.pending_csums; @@ -2791,15 +2796,15 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, u64 num_bytes, num_dirty_bgs_bytes; int ret = 0; - num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1); + num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); num_heads = heads_to_leaves(root, num_heads); if (num_heads > 1) - num_bytes += (num_heads - 1) * root->fs_info->nodesize; + num_bytes += (num_heads - 1) * fs_info->nodesize; num_bytes <<= 1; - num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->fs_info->nodesize; - num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root->fs_info, + num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * fs_info->nodesize; + num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info, num_dirty_bgs); - global_rsv = &root->fs_info->global_block_rsv; + global_rsv = &fs_info->global_block_rsv; /* * If we can't allocate any more chunks lets make sure we have _lots_ of @@ -2892,6 +2897,7 @@ done: int btrfs_async_run_delayed_refs(struct btrfs_root *root, unsigned long count, u64 transid, int wait) { + struct btrfs_fs_info *fs_info = root->fs_info; struct async_delayed_refs *async; int ret; @@ -2899,7 +2905,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, if (!async) return -ENOMEM; - async->root = root->fs_info->tree_root; + async->root = fs_info->tree_root; async->count = count; async->error = 0; async->transid = transid; @@ -2912,7 +2918,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, btrfs_init_work(&async->work, btrfs_extent_refs_helper, delayed_ref_async_start, NULL, NULL); - btrfs_queue_work(root->fs_info->extent_workers, &async->work); + btrfs_queue_work(fs_info->extent_workers, &async->work); if (wait) { wait_for_completion(&async->wait); @@ -2936,6 +2942,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count) { + struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_head *head; @@ -2947,11 +2954,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (trans->aborted) return 0; - if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags)) + if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) return 0; - if (root == root->fs_info->extent_root) - root = root->fs_info->tree_root; + if (root == fs_info->extent_root) + root = fs_info->tree_root; delayed_refs = &trans->transaction->delayed_refs; if (count == 0) @@ -3019,6 +3026,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, u64 flags, int level, int is_data) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_delayed_extent_op *extent_op; int ret; @@ -3032,7 +3040,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, extent_op->is_data = is_data ? true : false; extent_op->level = level; - ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, + ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr, num_bytes, extent_op); if (ret) btrfs_free_delayed_extent_op(extent_op); @@ -3106,7 +3114,8 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { - struct btrfs_root *extent_root = root->fs_info->extent_root; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; struct extent_buffer *leaf; struct btrfs_extent_data_ref *ref; struct btrfs_extent_inline_ref *iref; @@ -3213,6 +3222,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct extent_buffer *buf, int full_backref, int inc) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 num_bytes; u64 parent; @@ -3227,7 +3237,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, u64, u64, u64, u64, u64, u64); - if (btrfs_is_testing(root->fs_info)) + if (btrfs_is_testing(fs_info)) return 0; ref_root = btrfs_header_owner(buf); @@ -3270,7 +3280,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, goto fail; } else { bytenr = btrfs_node_blockptr(buf, i); - num_bytes = root->fs_info->nodesize; + num_bytes = fs_info->nodesize; ret = process_func(trans, root, bytenr, num_bytes, parent, ref_root, level - 1, 0); if (ret) @@ -3300,7 +3310,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *cache) { int ret; - struct btrfs_root *extent_root = root->fs_info->extent_root; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; unsigned long bi; struct extent_buffer *leaf; @@ -3325,19 +3336,18 @@ static struct btrfs_block_group_cache * next_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { + struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *node; - spin_lock(&root->fs_info->block_group_cache_lock); + spin_lock(&fs_info->block_group_cache_lock); /* If our block group was removed, we need a full search. */ if (RB_EMPTY_NODE(&cache->cache_node)) { const u64 next_bytenr = cache->key.objectid + cache->key.offset; - spin_unlock(&root->fs_info->block_group_cache_lock); + spin_unlock(&fs_info->block_group_cache_lock); btrfs_put_block_group(cache); - cache = btrfs_lookup_first_block_group(root->fs_info, - next_bytenr); - return cache; + cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; } node = rb_next(&cache->cache_node); btrfs_put_block_group(cache); @@ -3347,7 +3357,7 @@ next_block_group(struct btrfs_root *root, btrfs_get_block_group(cache); } else cache = NULL; - spin_unlock(&root->fs_info->block_group_cache_lock); + spin_unlock(&fs_info->block_group_cache_lock); return cache; } @@ -3355,7 +3365,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) { - struct btrfs_root *root = block_group->fs_info->tree_root; + struct btrfs_fs_info *fs_info = block_group->fs_info; + struct btrfs_root *root = fs_info->tree_root; struct inode *inode = NULL; u64 alloc_hint = 0; int dcs = BTRFS_DC_ERROR; @@ -3429,7 +3440,7 @@ again: if (i_size_read(inode) > 0) { ret = btrfs_check_trunc_cache_free_space(root, - &root->fs_info->global_block_rsv); + &fs_info->global_block_rsv); if (ret) goto out_put; @@ -3440,7 +3451,7 @@ again: spin_lock(&block_group->lock); if (block_group->cached != BTRFS_CACHE_FINISHED || - !btrfs_test_opt(root->fs_info, SPACE_CACHE)) { + !btrfs_test_opt(fs_info, SPACE_CACHE)) { /* * don't bother trying to write stuff out _if_ * a) we're not cached, @@ -3511,12 +3522,13 @@ out: int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache, *tmp; struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_path *path; if (list_empty(&cur_trans->dirty_bgs) || - !btrfs_test_opt(root->fs_info, SPACE_CACHE)) + !btrfs_test_opt(fs_info, SPACE_CACHE)) return 0; path = btrfs_alloc_path(); @@ -3549,6 +3561,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; @@ -3622,7 +3635,7 @@ again: if (cache->disk_cache_state == BTRFS_DC_SETUP) { cache->io_ctl.inode = NULL; - ret = btrfs_write_out_cache(root->fs_info, trans, + ret = btrfs_write_out_cache(fs_info, trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { num_started++; @@ -3712,6 +3725,7 @@ again: int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; @@ -3775,7 +3789,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { cache->io_ctl.inode = NULL; - ret = btrfs_write_out_cache(root->fs_info, trans, + ret = btrfs_write_out_cache(fs_info, trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { num_started++; @@ -3836,10 +3850,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *block_group; int readonly = 0; - block_group = btrfs_lookup_block_group(root->fs_info, bytenr); + block_group = btrfs_lookup_block_group(fs_info, bytenr); if (!block_group || block_group->ro) readonly = 1; if (block_group) @@ -4050,7 +4065,8 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) */ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { - u64 num_devices = root->fs_info->fs_devices->rw_devices; + struct btrfs_fs_info *fs_info = root->fs_info; + u64 num_devices = fs_info->fs_devices->rw_devices; u64 target; u64 raid_type; u64 allowed = 0; @@ -4059,16 +4075,16 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) * see if restripe for this chunk_type is in progress, if so * try to reduce to the target profile */ - spin_lock(&root->fs_info->balance_lock); - target = get_restripe_target(root->fs_info, flags); + spin_lock(&fs_info->balance_lock); + target = get_restripe_target(fs_info, flags); if (target) { /* pick target profile only if it's already available */ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { - spin_unlock(&root->fs_info->balance_lock); + spin_unlock(&fs_info->balance_lock); return extended_to_chunk(target); } } - spin_unlock(&root->fs_info->balance_lock); + spin_unlock(&fs_info->balance_lock); /* First, mask out the RAID levels which aren't possible */ for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { @@ -4095,32 +4111,34 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags) { + struct btrfs_fs_info *fs_info = root->fs_info; unsigned seq; u64 flags; do { flags = orig_flags; - seq = read_seqbegin(&root->fs_info->profiles_lock); + seq = read_seqbegin(&fs_info->profiles_lock); if (flags & BTRFS_BLOCK_GROUP_DATA) - flags |= root->fs_info->avail_data_alloc_bits; + flags |= fs_info->avail_data_alloc_bits; else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - flags |= root->fs_info->avail_system_alloc_bits; + flags |= fs_info->avail_system_alloc_bits; else if (flags & BTRFS_BLOCK_GROUP_METADATA) - flags |= root->fs_info->avail_metadata_alloc_bits; - } while (read_seqretry(&root->fs_info->profiles_lock, seq)); + flags |= fs_info->avail_metadata_alloc_bits; + } while (read_seqretry(&fs_info->profiles_lock, seq)); return btrfs_reduce_alloc_profile(root, flags); } u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 flags; u64 ret; if (data) flags = BTRFS_BLOCK_GROUP_DATA; - else if (root == root->fs_info->chunk_root) + else if (root == fs_info->chunk_root) flags = BTRFS_BLOCK_GROUP_SYSTEM; else flags = BTRFS_BLOCK_GROUP_METADATA; @@ -4140,7 +4158,7 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) int have_pinned_space; /* make sure bytes are sectorsize aligned */ - bytes = ALIGN(bytes, root->fs_info->sectorsize); + bytes = ALIGN(bytes, fs_info->sectorsize); if (btrfs_is_free_space_inode(inode)) { need_commit = 0; @@ -4186,7 +4204,7 @@ alloc: if (IS_ERR(trans)) return PTR_ERR(trans); - ret = do_chunk_alloc(trans, root->fs_info->extent_root, + ret = do_chunk_alloc(trans, fs_info->extent_root, alloc_target, CHUNK_ALLOC_NO_FORCE); btrfs_end_transaction(trans, root); @@ -4218,12 +4236,13 @@ alloc: /* commit the current transaction and try again */ commit_trans: if (need_commit && - !atomic_read(&root->fs_info->open_ioctl_trans)) { + !atomic_read(&fs_info->open_ioctl_trans)) { need_commit--; if (need_commit > 0) { btrfs_start_delalloc_roots(fs_info, 0, -1); - btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); + btrfs_wait_ordered_roots(fs_info, -1, 0, + (u64)-1); } trans = btrfs_join_transaction(root); @@ -4241,21 +4260,21 @@ commit_trans: * operations. Wait for it to finish so that * more space is released. */ - mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); - mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); + mutex_lock(&fs_info->cleaner_delayed_iput_mutex); + mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); goto again; } else { btrfs_end_transaction(trans, root); } } - trace_btrfs_space_reservation(root->fs_info, + trace_btrfs_space_reservation(fs_info, "space_info:enospc", data_sinfo->flags, bytes, 1); return -ENOSPC; } data_sinfo->bytes_may_use += bytes; - trace_btrfs_space_reservation(root->fs_info, "space_info", + trace_btrfs_space_reservation(fs_info, "space_info", data_sinfo->flags, bytes, 1); spin_unlock(&data_sinfo->lock); @@ -4269,13 +4288,13 @@ commit_trans: */ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); int ret; /* align the range */ - len = round_up(start + len, root->fs_info->sectorsize) - - round_down(start, root->fs_info->sectorsize); - start = round_down(start, root->fs_info->sectorsize); + len = round_up(start + len, fs_info->sectorsize) - + round_down(start, fs_info->sectorsize); + start = round_down(start, fs_info->sectorsize); ret = btrfs_alloc_data_chunk_ondemand(inode, len); if (ret < 0) @@ -4299,21 +4318,21 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len) void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_space_info *data_sinfo; /* Make sure the range is aligned to sectorsize */ - len = round_up(start + len, root->fs_info->sectorsize) - - round_down(start, root->fs_info->sectorsize); - start = round_down(start, root->fs_info->sectorsize); + len = round_up(start + len, fs_info->sectorsize) - + round_down(start, fs_info->sectorsize); + start = round_down(start, fs_info->sectorsize); - data_sinfo = root->fs_info->data_sinfo; + data_sinfo = fs_info->data_sinfo; spin_lock(&data_sinfo->lock); if (WARN_ON(data_sinfo->bytes_may_use < len)) data_sinfo->bytes_may_use = 0; else data_sinfo->bytes_may_use -= len; - trace_btrfs_space_reservation(root->fs_info, "space_info", + trace_btrfs_space_reservation(fs_info, "space_info", data_sinfo->flags, len, 0); spin_unlock(&data_sinfo->lock); } @@ -4359,7 +4378,8 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) static int should_alloc_chunk(struct btrfs_root *root, struct btrfs_space_info *sinfo, int force) { - struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; u64 thresh; @@ -4380,7 +4400,7 @@ static int should_alloc_chunk(struct btrfs_root *root, * about 1% of the FS size. */ if (force == CHUNK_ALLOC_LIMITED) { - thresh = btrfs_super_total_bytes(root->fs_info->super_copy); + thresh = btrfs_super_total_bytes(fs_info->super_copy); thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); if (num_bytes - num_allocated < thresh) @@ -4394,13 +4414,14 @@ static int should_alloc_chunk(struct btrfs_root *root, static u64 get_profile_num_devs(struct btrfs_root *root, u64 type) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 num_dev; if (type & (BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) - num_dev = root->fs_info->fs_devices->rw_devices; + num_dev = fs_info->fs_devices->rw_devices; else if (type & BTRFS_BLOCK_GROUP_RAID1) num_dev = 2; else @@ -4418,6 +4439,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_space_info *info; u64 left; u64 thresh; @@ -4428,9 +4450,9 @@ void check_system_chunk(struct btrfs_trans_handle *trans, * Needed because we can end up allocating a system chunk and for an * atomic and race free space reservation in the chunk block reserve. */ - ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex)); + ASSERT(mutex_is_locked(&fs_info->chunk_mutex)); - info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM); + info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); spin_lock(&info->lock); left = info->total_bytes - info->bytes_used - info->bytes_pinned - info->bytes_reserved - info->bytes_readonly - @@ -4440,19 +4462,19 @@ void check_system_chunk(struct btrfs_trans_handle *trans, num_devs = get_profile_num_devs(root, type); /* num_devs device items to update and 1 chunk item to add or remove */ - thresh = btrfs_calc_trunc_metadata_size(root->fs_info, num_devs) + - btrfs_calc_trans_metadata_size(root->fs_info, 1); + thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) + + btrfs_calc_trans_metadata_size(fs_info, 1); - if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) { - btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu", - left, thresh, type); - dump_space_info(root->fs_info, info, 0, 0); + if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { + btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", + left, thresh, type); + dump_space_info(fs_info, info, 0, 0); } if (left < thresh) { u64 flags; - flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0); + flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0); /* * Ignore failure to create system chunk. We might end up not * needing it, as we might not need to COW all nodes/leafs from @@ -4463,8 +4485,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans, } if (!ret) { - ret = btrfs_block_rsv_add(root->fs_info->chunk_root, - &root->fs_info->chunk_block_rsv, + ret = btrfs_block_rsv_add(fs_info->chunk_root, + &fs_info->chunk_block_rsv, thresh, BTRFS_RESERVE_NO_FLUSH); if (!ret) trans->chunk_bytes_reserved += thresh; @@ -4492,10 +4514,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, if (trans->allocating_chunk) return -ENOSPC; - space_info = __find_space_info(extent_root->fs_info, flags); + space_info = __find_space_info(fs_info, flags); if (!space_info) { - ret = update_space_info(extent_root->fs_info, flags, - 0, 0, 0, &space_info); + ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info); BUG_ON(ret); /* -ENOMEM */ } BUG_ON(!space_info); /* Logic error */ @@ -4607,7 +4628,8 @@ static int can_overcommit(struct btrfs_root *root, struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { - struct btrfs_block_rsv *global_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; u64 profile; u64 space_size; u64 avail; @@ -4617,8 +4639,6 @@ static int can_overcommit(struct btrfs_root *root, if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) return 0; - BUG_ON(root->fs_info == NULL); - global_rsv = &root->fs_info->global_block_rsv; profile = btrfs_get_alloc_profile(root, 0); used = space_info->bytes_used + space_info->bytes_reserved + space_info->bytes_pinned + space_info->bytes_readonly; @@ -4637,9 +4657,9 @@ static int can_overcommit(struct btrfs_root *root, used += space_info->bytes_may_use; - spin_lock(&root->fs_info->free_chunk_lock); - avail = root->fs_info->free_chunk_space; - spin_unlock(&root->fs_info->free_chunk_lock); + spin_lock(&fs_info->free_chunk_lock); + avail = fs_info->free_chunk_space; + spin_unlock(&fs_info->free_chunk_lock); /* * If we have dup, raid1 or raid10 then only half of the free @@ -4670,7 +4690,8 @@ static int can_overcommit(struct btrfs_root *root, static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, unsigned long nr_pages, int nr_items) { - struct super_block *sb = root->fs_info->sb; + struct btrfs_fs_info *fs_info = root->fs_info; + struct super_block *sb = fs_info->sb; if (down_read_trylock(&sb->s_umount)) { writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE); @@ -4683,10 +4704,9 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, * the filesystem is readonly(all dirty pages are written to * the disk). */ - btrfs_start_delalloc_roots(root->fs_info, 0, nr_items); + btrfs_start_delalloc_roots(fs_info, 0, nr_items); if (!current->journal_info) - btrfs_wait_ordered_roots(root->fs_info, nr_items, - 0, (u64)-1); + btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1); } } @@ -4710,6 +4730,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim) static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, bool wait_ordered) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv; struct btrfs_space_info *space_info; struct btrfs_trans_handle *trans; @@ -4726,17 +4747,16 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM; trans = (struct btrfs_trans_handle *)current->journal_info; - block_rsv = &root->fs_info->delalloc_block_rsv; + block_rsv = &fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; delalloc_bytes = percpu_counter_sum_positive( - &root->fs_info->delalloc_bytes); + &fs_info->delalloc_bytes); if (delalloc_bytes == 0) { if (trans) return; if (wait_ordered) - btrfs_wait_ordered_roots(root->fs_info, items, - 0, (u64)-1); + btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); return; } @@ -4749,7 +4769,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, * We need to wait for the async pages to actually start before * we do anything. */ - max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages); + max_reclaim = atomic_read(&fs_info->async_delalloc_pages); if (!max_reclaim) goto skip_async; @@ -4758,8 +4778,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, else max_reclaim -= nr_pages; - wait_event(root->fs_info->async_submit_wait, - atomic_read(&root->fs_info->async_delalloc_pages) <= + wait_event(fs_info->async_submit_wait, + atomic_read(&fs_info->async_delalloc_pages) <= (int)max_reclaim); skip_async: if (!trans) @@ -4780,15 +4800,14 @@ skip_async: loops++; if (wait_ordered && !trans) { - btrfs_wait_ordered_roots(root->fs_info, items, - 0, (u64)-1); + btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); } else { time_left = schedule_timeout_killable(1); if (time_left) break; } delalloc_bytes = percpu_counter_sum_positive( - &root->fs_info->delalloc_bytes); + &fs_info->delalloc_bytes); } } @@ -4806,7 +4825,8 @@ static int may_commit_transaction(struct btrfs_root *root, struct btrfs_space_info *space_info, u64 bytes, int force) { - struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_trans_handle *trans; trans = (struct btrfs_trans_handle *)current->journal_info; @@ -4855,6 +4875,7 @@ static int flush_space(struct btrfs_root *root, struct btrfs_space_info *space_info, u64 num_bytes, u64 orig_bytes, int state) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; int nr; int ret = 0; @@ -4886,7 +4907,7 @@ static int flush_space(struct btrfs_root *root, ret = PTR_ERR(trans); break; } - ret = do_chunk_alloc(trans, root->fs_info->extent_root, + ret = do_chunk_alloc(trans, fs_info->extent_root, btrfs_get_alloc_profile(root, 0), CHUNK_ALLOC_NO_FORCE); btrfs_end_transaction(trans, root); @@ -4901,7 +4922,7 @@ static int flush_space(struct btrfs_root *root, break; } - trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes, + trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, orig_bytes, state, ret); return ret; } @@ -4947,6 +4968,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root, static inline int need_do_async_reclaim(struct btrfs_space_info *space_info, struct btrfs_root *root, u64 used) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 thresh = div_factor_fine(space_info->total_bytes, 98); /* If we're just plain full then async reclaim just slows us down. */ @@ -4956,9 +4978,8 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info, if (!btrfs_calc_reclaim_metadata_size(root, space_info)) return 0; - return (used >= thresh && !btrfs_fs_closing(root->fs_info) && - !test_bit(BTRFS_FS_STATE_REMOUNTING, - &root->fs_info->fs_state)); + return (used >= thresh && !btrfs_fs_closing(fs_info) && + !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } static void wake_all_tickets(struct list_head *head) @@ -5138,6 +5159,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { + struct btrfs_fs_info *fs_info = root->fs_info; struct reserve_ticket ticket; u64 used; int ret = 0; @@ -5158,15 +5180,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, */ if (used + orig_bytes <= space_info->total_bytes) { space_info->bytes_may_use += orig_bytes; - trace_btrfs_space_reservation(root->fs_info, "space_info", - space_info->flags, orig_bytes, - 1); + trace_btrfs_space_reservation(fs_info, "space_info", + space_info->flags, orig_bytes, 1); ret = 0; } else if (can_overcommit(root, space_info, orig_bytes, flush)) { space_info->bytes_may_use += orig_bytes; - trace_btrfs_space_reservation(root->fs_info, "space_info", - space_info->flags, orig_bytes, - 1); + trace_btrfs_space_reservation(fs_info, "space_info", + space_info->flags, orig_bytes, 1); ret = 0; } @@ -5185,7 +5205,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, list_add_tail(&ticket.list, &space_info->tickets); if (!space_info->flush) { space_info->flush = 1; - trace_btrfs_trigger_flush(root->fs_info, + trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "enospc"); @@ -5203,15 +5223,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, * which means we won't have fs_info->fs_root set, so don't do * the async reclaim as we will panic. */ - if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) && + if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && need_do_async_reclaim(space_info, root, used) && - !work_busy(&root->fs_info->async_reclaim_work)) { - trace_btrfs_trigger_flush(root->fs_info, - space_info->flags, - orig_bytes, flush, - "preempt"); + !work_busy(&fs_info->async_reclaim_work)) { + trace_btrfs_trigger_flush(fs_info, space_info->flags, + orig_bytes, flush, "preempt"); queue_work(system_unbound_wq, - &root->fs_info->async_reclaim_work); + &fs_info->async_reclaim_work); } } spin_unlock(&space_info->lock); @@ -5219,19 +5237,19 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, return ret; if (flush == BTRFS_RESERVE_FLUSH_ALL) - return wait_reserve_ticket(root->fs_info, space_info, &ticket, + return wait_reserve_ticket(fs_info, space_info, &ticket, orig_bytes); ret = 0; - priority_reclaim_metadata_space(root->fs_info, space_info, &ticket); + priority_reclaim_metadata_space(fs_info, space_info, &ticket); spin_lock(&space_info->lock); if (ticket.bytes) { if (ticket.bytes < orig_bytes) { u64 num_bytes = orig_bytes - ticket.bytes; space_info->bytes_may_use -= num_bytes; - trace_btrfs_space_reservation(root->fs_info, - "space_info", space_info->flags, - num_bytes, 0); + trace_btrfs_space_reservation(fs_info, "space_info", + space_info->flags, + num_bytes, 0); } list_del_init(&ticket.list); @@ -5261,22 +5279,20 @@ static int reserve_metadata_bytes(struct btrfs_root *root, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; int ret; ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes, flush); if (ret == -ENOSPC && unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { - struct btrfs_block_rsv *global_rsv = - &root->fs_info->global_block_rsv; - if (block_rsv != global_rsv && !block_rsv_use_bytes(global_rsv, orig_bytes)) ret = 0; } if (ret == -ENOSPC) - trace_btrfs_space_reservation(root->fs_info, - "space_info:enospc", + trace_btrfs_space_reservation(fs_info, "space_info:enospc", block_rsv->space_info->flags, orig_bytes, 1); return ret; @@ -5286,18 +5302,19 @@ static struct btrfs_block_rsv *get_block_rsv( const struct btrfs_trans_handle *trans, const struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv = NULL; if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || - (root == root->fs_info->csum_root && trans->adding_csums) || - (root == root->fs_info->uuid_root)) + (root == fs_info->csum_root && trans->adding_csums) || + (root == fs_info->uuid_root)) block_rsv = trans->block_rsv; if (!block_rsv) block_rsv = root->block_rsv; if (!block_rsv) - block_rsv = &root->fs_info->empty_block_rsv; + block_rsv = &fs_info->empty_block_rsv; return block_rsv; } @@ -5619,12 +5636,13 @@ void btrfs_block_rsv_release(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes) { - struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + if (global_rsv == block_rsv || block_rsv->space_info != global_rsv->space_info) global_rsv = NULL; - block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv, - num_bytes); + block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); } static void update_global_block_rsv(struct btrfs_fs_info *fs_info) @@ -5721,13 +5739,15 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; + if (!trans->block_rsv) return; if (!trans->bytes_reserved) return; - trace_btrfs_space_reservation(root->fs_info, "transaction", + trace_btrfs_space_reservation(fs_info, "transaction", trans->transid, trans->bytes_reserved, 0); btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); trans->bytes_reserved = 0; @@ -5755,6 +5775,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, struct inode *inode) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; /* * We always use trans->block_rsv here as we will have reserved space @@ -5770,17 +5791,20 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, * added it, so this takes the reservation so we can release it later * when we are truly done with the orphan item. */ - u64 num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1); - trace_btrfs_space_reservation(root->fs_info, "orphan", + u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), num_bytes, 1); return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); } void btrfs_orphan_release_metadata(struct inode *inode) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1); - trace_btrfs_space_reservation(root->fs_info, "orphan", + u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), num_bytes, 0); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -5807,11 +5831,12 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, { u64 num_bytes; int ret; - struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; - if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) { + if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { /* One for parent inode, two for dir entries */ - num_bytes = 3 * root->fs_info->nodesize; + num_bytes = 3 * fs_info->nodesize; ret = btrfs_qgroup_reserve_meta(root, num_bytes); if (ret) return ret; @@ -5821,8 +5846,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, *qgroup_reserved = num_bytes; - num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, items); - rsv->space_info = __find_space_info(root->fs_info, + num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); + rsv->space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); ret = btrfs_block_rsv_add(root, rsv, num_bytes, BTRFS_RESERVE_FLUSH_ALL); @@ -5906,6 +5931,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, int reserve) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; u64 old_csums, num_csums; @@ -5925,17 +5951,17 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, return 0; if (reserve) - return btrfs_calc_trans_metadata_size(root->fs_info, + return btrfs_calc_trans_metadata_size(fs_info, num_csums - old_csums); - return btrfs_calc_trans_metadata_size(root->fs_info, - old_csums - num_csums); + return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums); } int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; + struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv; u64 to_reserve = 0; u64 csum_bytes; unsigned nr_extents = 0; @@ -5962,13 +5988,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) } if (flush != BTRFS_RESERVE_NO_FLUSH && - btrfs_transaction_in_commit(root->fs_info)) + btrfs_transaction_in_commit(fs_info)) schedule_timeout(1); if (delalloc_lock) mutex_lock(&BTRFS_I(inode)->delalloc_mutex); - num_bytes = ALIGN(num_bytes, root->fs_info->sectorsize); + num_bytes = ALIGN(num_bytes, fs_info->sectorsize); spin_lock(&BTRFS_I(inode)->lock); nr_extents = (unsigned)div64_u64(num_bytes + @@ -5983,15 +6009,14 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) BTRFS_I(inode)->reserved_extents; /* We always want to reserve a slot for updating the inode. */ - to_reserve = btrfs_calc_trans_metadata_size(root->fs_info, - nr_extents + 1); + to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1); to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); csum_bytes = BTRFS_I(inode)->csum_bytes; spin_unlock(&BTRFS_I(inode)->lock); - if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) { + if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { ret = btrfs_qgroup_reserve_meta(root, - nr_extents * root->fs_info->nodesize); + nr_extents * fs_info->nodesize); if (ret) goto out_fail; } @@ -5999,14 +6024,14 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush); if (unlikely(ret)) { btrfs_qgroup_free_meta(root, - nr_extents * root->fs_info->nodesize); + nr_extents * fs_info->nodesize); goto out_fail; } spin_lock(&BTRFS_I(inode)->lock); if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, &BTRFS_I(inode)->runtime_flags)) { - to_reserve -= btrfs_calc_trans_metadata_size(root->fs_info, 1); + to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1); release_extra = true; } BTRFS_I(inode)->reserved_extents += nr_extents; @@ -6016,11 +6041,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); if (to_reserve) - trace_btrfs_space_reservation(root->fs_info, "delalloc", + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), to_reserve, 1); if (release_extra) btrfs_block_rsv_release(root, block_rsv, - btrfs_calc_trans_metadata_size(root->fs_info, 1)); + btrfs_calc_trans_metadata_size(fs_info, 1)); return 0; out_fail: @@ -6075,12 +6100,11 @@ out_fail: } spin_unlock(&BTRFS_I(inode)->lock); if (dropped) - to_free += btrfs_calc_trans_metadata_size(root->fs_info, - dropped); + to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (to_free) { btrfs_block_rsv_release(root, block_rsv, to_free); - trace_btrfs_space_reservation(root->fs_info, "delalloc", + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), to_free, 0); } if (delalloc_lock) @@ -6099,11 +6123,12 @@ out_fail: */ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; u64 to_free = 0; unsigned dropped; - num_bytes = ALIGN(num_bytes, root->fs_info->sectorsize); + num_bytes = ALIGN(num_bytes, fs_info->sectorsize); spin_lock(&BTRFS_I(inode)->lock); dropped = drop_outstanding_extent(inode, num_bytes); @@ -6111,17 +6136,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); spin_unlock(&BTRFS_I(inode)->lock); if (dropped > 0) - to_free += btrfs_calc_trans_metadata_size(root->fs_info, - dropped); + to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); - if (btrfs_is_testing(root->fs_info)) + if (btrfs_is_testing(fs_info)) return; - trace_btrfs_space_reservation(root->fs_info, "delalloc", + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), to_free, 0); - btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, - to_free); + btrfs_block_rsv_release(root, &fs_info->delalloc_block_rsv, to_free); } /** @@ -6251,7 +6274,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - trace_btrfs_space_reservation(root->fs_info, "pinned", + trace_btrfs_space_reservation(info, "pinned", cache->space_info->flags, num_bytes, 1); set_extent_dirty(info->pinned_extents, @@ -6293,17 +6316,18 @@ static int update_block_group(struct btrfs_trans_handle *trans, static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; u64 bytenr; - spin_lock(&root->fs_info->block_group_cache_lock); - bytenr = root->fs_info->first_logical_byte; - spin_unlock(&root->fs_info->block_group_cache_lock); + spin_lock(&fs_info->block_group_cache_lock); + bytenr = fs_info->first_logical_byte; + spin_unlock(&fs_info->block_group_cache_lock); if (bytenr < (u64)-1) return bytenr; - cache = btrfs_lookup_first_block_group(root->fs_info, search_start); + cache = btrfs_lookup_first_block_group(fs_info, search_start); if (!cache) return 0; @@ -6317,6 +6341,8 @@ static int pin_down_extent(struct btrfs_root *root, struct btrfs_block_group_cache *cache, u64 bytenr, u64 num_bytes, int reserved) { + struct btrfs_fs_info *fs_info = cache->fs_info; + spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned += num_bytes; @@ -6328,9 +6354,9 @@ static int pin_down_extent(struct btrfs_root *root, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - trace_btrfs_space_reservation(root->fs_info, "pinned", + trace_btrfs_space_reservation(fs_info, "pinned", cache->space_info->flags, num_bytes, 1); - set_extent_dirty(root->fs_info->pinned_extents, bytenr, + set_extent_dirty(fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); return 0; } @@ -6341,9 +6367,10 @@ static int pin_down_extent(struct btrfs_root *root, int btrfs_pin_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes, int reserved) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; - cache = btrfs_lookup_block_group(root->fs_info, bytenr); + cache = btrfs_lookup_block_group(fs_info, bytenr); BUG_ON(!cache); /* Logic error */ pin_down_extent(root, cache, bytenr, num_bytes, reserved); @@ -6358,10 +6385,11 @@ int btrfs_pin_extent(struct btrfs_root *root, int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, u64 bytenr, u64 num_bytes) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; int ret; - cache = btrfs_lookup_block_group(root->fs_info, bytenr); + cache = btrfs_lookup_block_group(fs_info, bytenr); if (!cache) return -EINVAL; @@ -6383,11 +6411,12 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; struct btrfs_block_group_cache *block_group; struct btrfs_caching_control *caching_ctl; - block_group = btrfs_lookup_block_group(root->fs_info, start); + block_group = btrfs_lookup_block_group(fs_info, start); if (!block_group) return -EINVAL; @@ -6615,8 +6644,9 @@ static struct btrfs_free_cluster * fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info, u64 *empty_cluster) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_free_cluster *ret = NULL; - bool ssd = btrfs_test_opt(root->fs_info, SSD); + bool ssd = btrfs_test_opt(fs_info, SSD); *empty_cluster = 0; if (btrfs_mixed_space_info(space_info)) @@ -6625,11 +6655,11 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info, if (ssd) *empty_cluster = SZ_2M; if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { - ret = &root->fs_info->meta_alloc_cluster; + ret = &fs_info->meta_alloc_cluster; if (!ssd) *empty_cluster = SZ_64K; } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) { - ret = &root->fs_info->data_alloc_cluster; + ret = &fs_info->data_alloc_cluster; } return ret; @@ -6761,7 +6791,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, break; } - if (btrfs_test_opt(root->fs_info, DISCARD)) + if (btrfs_test_opt(fs_info, DISCARD)) ret = btrfs_discard_extent(root, start, end + 1 - start, NULL); @@ -6847,8 +6877,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, u64 bytenr = node->bytenr; u64 num_bytes = node->num_bytes; int last_ref = 0; - bool skinny_metadata = btrfs_fs_incompat(root->fs_info, - SKINNY_METADATA); + bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); path = btrfs_alloc_path(); if (!path) @@ -7048,7 +7077,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, goto out; } } - add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid, + add_pinned_bytes(info, -num_bytes, owner_objectid, root_objectid); } else { if (found_extent) { @@ -7080,15 +7109,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } - ret = add_to_free_space_tree(trans, root->fs_info, bytenr, - num_bytes); + ret = add_to_free_space_tree(trans, info, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } - ret = update_block_group(trans, root->fs_info, bytenr, - num_bytes, 0); + ret = update_block_group(trans, info, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, ret); goto out; @@ -7178,15 +7205,17 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, struct extent_buffer *buf, u64 parent, int last_ref) { + struct btrfs_fs_info *fs_info = root->fs_info; int pin = 1; int ret; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { - ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, - buf->start, buf->len, - parent, root->root_key.objectid, - btrfs_header_level(buf), - BTRFS_DROP_DELAYED_REF, NULL); + ret = btrfs_add_delayed_tree_ref(fs_info, trans, + buf->start, buf->len, + parent, + root->root_key.objectid, + btrfs_header_level(buf), + BTRFS_DROP_DELAYED_REF, NULL); BUG_ON(ret); /* -ENOMEM */ } @@ -7202,7 +7231,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, goto out; } - cache = btrfs_lookup_block_group(root->fs_info, buf->start); + cache = btrfs_lookup_block_group(fs_info, buf->start); if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { pin_down_extent(root, cache, buf->start, buf->len, 1); @@ -7220,8 +7249,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, } out: if (pin) - add_pinned_bytes(root->fs_info, buf->len, - btrfs_header_level(buf), + add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf), root->root_key.objectid); /* @@ -7242,7 +7270,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (btrfs_is_testing(fs_info)) return 0; - add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid); + add_pinned_bytes(fs_info, num_bytes, owner, root_objectid); /* * tree log blocks never actually go into the extent allocation @@ -7442,8 +7470,9 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, u64 hint_byte, struct btrfs_key *ins, u64 flags, int delalloc) { + struct btrfs_fs_info *fs_info = orig_root->fs_info; int ret = 0; - struct btrfs_root *root = orig_root->fs_info->extent_root; + struct btrfs_root *root = fs_info->extent_root; struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_block_group_cache *block_group = NULL; u64 search_start = 0; @@ -7459,16 +7488,16 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, bool orig_have_caching_bg = false; bool full_search = false; - WARN_ON(num_bytes < root->fs_info->sectorsize); + WARN_ON(num_bytes < fs_info->sectorsize); ins->type = BTRFS_EXTENT_ITEM_KEY; ins->objectid = 0; ins->offset = 0; trace_find_free_extent(orig_root, num_bytes, empty_size, flags); - space_info = __find_space_info(root->fs_info, flags); + space_info = __find_space_info(fs_info, flags); if (!space_info) { - btrfs_err(root->fs_info, "No space info for %llu", flags); + btrfs_err(fs_info, "No space info for %llu", flags); return -ENOSPC; } @@ -7515,8 +7544,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, search_start = max(search_start, first_logical_byte(root, 0)); search_start = max(search_start, hint_byte); if (search_start == hint_byte) { - block_group = btrfs_lookup_block_group(root->fs_info, - search_start); + block_group = btrfs_lookup_block_group(fs_info, search_start); /* * we don't want to use the block group if it doesn't match our * allocation bits, or if its not cached. @@ -7769,7 +7797,7 @@ unclustered_alloc: goto loop; } checks: - search_start = ALIGN(offset, root->fs_info->stripesize); + search_start = ALIGN(offset, fs_info->stripesize); /* move on to the next group */ if (search_start + num_bytes > @@ -7968,7 +7996,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, flags = btrfs_get_alloc_profile(root, is_data); again: - WARN_ON(num_bytes < root->fs_info->sectorsize); + WARN_ON(num_bytes < fs_info->sectorsize); ret = find_free_extent(root, ram_bytes, num_bytes, empty_size, hint_byte, ins, flags, delalloc); if (!ret && !is_data) { @@ -7977,7 +8005,7 @@ again: if (!final_tried && ins->offset) { num_bytes = min(num_bytes >> 1, ins->offset); num_bytes = round_down(num_bytes, - root->fs_info->sectorsize); + fs_info->sectorsize); num_bytes = max(num_bytes, min_alloc_size); ram_bytes = num_bytes; if (num_bytes == min_alloc_size) @@ -7987,7 +8015,7 @@ again: struct btrfs_space_info *sinfo; sinfo = __find_space_info(fs_info, flags); - btrfs_err(root->fs_info, + btrfs_err(fs_info, "allocation failed flags %llu, wanted %llu", flags, num_bytes); if (sinfo) @@ -8002,20 +8030,21 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, int pin, int delalloc) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; int ret = 0; - cache = btrfs_lookup_block_group(root->fs_info, start); + cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { - btrfs_err(root->fs_info, "Unable to find block group for %llu", - start); + btrfs_err(fs_info, "Unable to find block group for %llu", + start); return -ENOSPC; } if (pin) pin_down_extent(root, cache, start, len, 1); else { - if (btrfs_test_opt(root->fs_info, DISCARD)) + if (btrfs_test_opt(fs_info, DISCARD)) ret = btrfs_discard_extent(root, start, len, NULL); btrfs_add_free_space(cache, start, len); btrfs_free_reserved_bytes(cache, len, delalloc); @@ -8130,8 +8159,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; u32 size = sizeof(*extent_item) + sizeof(*iref); u64 num_bytes = ins->offset; - bool skinny_metadata = btrfs_fs_incompat(root->fs_info, - SKINNY_METADATA); + bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); if (!skinny_metadata) size += sizeof(*block_info); @@ -8139,7 +8167,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { btrfs_free_and_pin_reserved_extent(root, ins->objectid, - root->fs_info->nodesize); + fs_info->nodesize); return -ENOMEM; } @@ -8149,7 +8177,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, if (ret) { btrfs_free_path(path); btrfs_free_and_pin_reserved_extent(root, ins->objectid, - root->fs_info->nodesize); + fs_info->nodesize); return ret; } @@ -8163,7 +8191,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, if (skinny_metadata) { iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); - num_bytes = root->fs_info->nodesize; + num_bytes = fs_info->nodesize; } else { block_info = (struct btrfs_tree_block_info *)(extent_item + 1); btrfs_set_tree_block_key(leaf, block_info, key); @@ -8199,7 +8227,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, } trace_btrfs_reserved_extent_alloc(root, ins->objectid, - root->fs_info->nodesize); + fs_info->nodesize); return ret; } @@ -8209,11 +8237,12 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, u64 offset, u64 ram_bytes, struct btrfs_key *ins) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); - ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid, + ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid, ins->offset, 0, root_objectid, owner, offset, ram_bytes, BTRFS_ADD_DELAYED_EXTENT, @@ -8231,6 +8260,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, u64 root_objectid, u64 owner, u64 offset, struct btrfs_key *ins) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; struct btrfs_block_group_cache *block_group; struct btrfs_space_info *space_info; @@ -8239,13 +8269,13 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, * Mixed block groups will exclude before processing the log so we only * need to do the exclude dance if this fs isn't mixed. */ - if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { + if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { ret = __exclude_logged_extent(root, ins->objectid, ins->offset); if (ret) return ret; } - block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); + block_group = btrfs_lookup_block_group(fs_info, ins->objectid); if (!block_group) return -EINVAL; @@ -8267,6 +8297,7 @@ static struct extent_buffer * btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, int level) { + struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *buf; buf = btrfs_find_create_tree_block(root, bytenr); @@ -8276,7 +8307,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_set_header_generation(buf, trans->transid); btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); btrfs_tree_lock(buf); - clean_tree_block(trans, root->fs_info, buf); + clean_tree_block(trans, fs_info, buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); btrfs_set_lock_blocking(buf); @@ -8308,8 +8339,9 @@ static struct btrfs_block_rsv * use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv; - struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; int ret; bool global_updated = false; @@ -8327,11 +8359,11 @@ again: if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { global_updated = true; - update_global_block_rsv(root->fs_info); + update_global_block_rsv(fs_info); goto again; } - if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) { + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL * 10, /*DEFAULT_RATELIMIT_BURST*/ 1); @@ -8375,18 +8407,18 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_disk_key *key, int level, u64 hint, u64 empty_size) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key ins; struct btrfs_block_rsv *block_rsv; struct extent_buffer *buf; struct btrfs_delayed_extent_op *extent_op; u64 flags = 0; int ret; - u32 blocksize = root->fs_info->nodesize; - bool skinny_metadata = btrfs_fs_incompat(root->fs_info, - SKINNY_METADATA); + u32 blocksize = fs_info->nodesize; + bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS - if (btrfs_is_testing(root->fs_info)) { + if (btrfs_is_testing(fs_info)) { buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, level); if (!IS_ERR(buf)) @@ -8433,7 +8465,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, extent_op->is_data = false; extent_op->level = level; - ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, + ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid, ins.offset, parent, root_objectid, level, BTRFS_ADD_DELAYED_EXTENT, @@ -8450,7 +8482,7 @@ out_free_buf: out_free_reserved: btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0); out_unuse: - unuse_block_rsv(root->fs_info, block_rsv, blocksize); + unuse_block_rsv(fs_info, block_rsv, blocksize); return ERR_PTR(ret); } @@ -8476,6 +8508,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, struct walk_control *wc, struct btrfs_path *path) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 generation; u64 refs; @@ -8493,7 +8526,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, } else { wc->reada_count = wc->reada_count * 3 / 2; wc->reada_count = min_t(int, wc->reada_count, - BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)); + BTRFS_NODEPTRS_PER_BLOCK(fs_info)); } eb = path->nodes[wc->level]; @@ -8644,6 +8677,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct walk_control *wc, int *lookup_info) { + struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 generation; u64 parent; @@ -8669,9 +8703,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, } bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); - blocksize = root->fs_info->nodesize; + blocksize = fs_info->nodesize; - next = find_extent_buffer(root->fs_info, bytenr); + next = find_extent_buffer(fs_info, bytenr); if (!next) { next = btrfs_find_create_tree_block(root, bytenr); if (IS_ERR(next)) @@ -8691,7 +8725,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, goto out_unlock; if (unlikely(wc->refs[level - 1] == 0)) { - btrfs_err(root->fs_info, "Missing references."); + btrfs_err(fs_info, "Missing references."); ret = -EIO; goto out_unlock; } @@ -8781,7 +8815,7 @@ skip: ret = btrfs_qgroup_trace_subtree(trans, root, next, generation, level - 1); if (ret) { - btrfs_err_rl(root->fs_info, + btrfs_err_rl(fs_info, "Error %d accounting shared subtree. Quota is out of sync, rescan required.", ret); } @@ -8819,6 +8853,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct walk_control *wc) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; @@ -8878,7 +8913,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, BUG_ON(ret); /* -ENOMEM */ ret = btrfs_qgroup_trace_leaf_items(trans, root, eb); if (ret) { - btrfs_err_rl(root->fs_info, + btrfs_err_rl(fs_info, "error %d accounting leaf items. Quota is out of sync, rescan required.", ret); } @@ -8890,7 +8925,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, btrfs_set_lock_blocking(eb); path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; } - clean_tree_block(trans, root->fs_info, eb); + clean_tree_block(trans, fs_info, eb); } if (eb == root->node) { @@ -9094,7 +9129,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, wc->update_ref = update_ref; wc->keep_locks = 0; wc->for_reloc = for_reloc; - wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root->fs_info); + wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); while (1) { @@ -9219,6 +9254,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct extent_buffer *node, struct extent_buffer *parent) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct walk_control *wc; int level; @@ -9258,7 +9294,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, wc->update_ref = 0; wc->keep_locks = 1; wc->for_reloc = 1; - wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root->fs_info); + wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); while (1) { wret = walk_down_tree(trans, root, path, wc); @@ -9377,6 +9413,7 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; u64 alloc_flags; int ret; @@ -9391,11 +9428,11 @@ again: * block groups cache has started writing. If it already started, * back off and let this transaction commit */ - mutex_lock(&root->fs_info->ro_block_group_mutex); + mutex_lock(&fs_info->ro_block_group_mutex); if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { u64 transid = trans->transid; - mutex_unlock(&root->fs_info->ro_block_group_mutex); + mutex_unlock(&fs_info->ro_block_group_mutex); btrfs_end_transaction(trans, root); ret = btrfs_wait_for_commit(root, transid); @@ -9408,7 +9445,7 @@ again: * if we are changing raid levels, try to allocate a corresponding * block group with the new raid level. */ - alloc_flags = update_block_group_flags(root->fs_info, cache->flags); + alloc_flags = update_block_group_flags(fs_info, cache->flags); if (alloc_flags != cache->flags) { ret = do_chunk_alloc(trans, root, alloc_flags, CHUNK_ALLOC_FORCE); @@ -9434,13 +9471,12 @@ again: ret = inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { - alloc_flags = update_block_group_flags(root->fs_info, - cache->flags); - lock_chunks(root->fs_info); + alloc_flags = update_block_group_flags(fs_info, cache->flags); + lock_chunks(fs_info); check_system_chunk(trans, root, alloc_flags); - unlock_chunks(root->fs_info); + unlock_chunks(fs_info); } - mutex_unlock(&root->fs_info->ro_block_group_mutex); + mutex_unlock(&fs_info->ro_block_group_mutex); btrfs_end_transaction(trans, root); return ret; @@ -9527,7 +9563,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) struct btrfs_root *root = fs_info->extent_root; struct btrfs_block_group_cache *block_group; struct btrfs_space_info *space_info; - struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; struct btrfs_trans_handle *trans; u64 min_free; @@ -9539,14 +9575,14 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) int full = 0; int ret = 0; - debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG); + debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG); - block_group = btrfs_lookup_block_group(root->fs_info, bytenr); + block_group = btrfs_lookup_block_group(fs_info, bytenr); /* odd, couldn't find the block group, leave it alone */ if (!block_group) { if (debug) - btrfs_warn(root->fs_info, + btrfs_warn(fs_info, "can't find block group for bytenr %llu", bytenr); return -1; @@ -9596,7 +9632,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) * 3: raid0 * 4: single */ - target = get_restripe_target(root->fs_info, block_group->flags); + target = get_restripe_target(fs_info, block_group->flags); if (target) { index = __get_raid_index(extended_to_chunk(target)); } else { @@ -9606,9 +9642,9 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) */ if (full) { if (debug) - btrfs_warn(root->fs_info, - "no space to alloc new chunk for block group %llu", - block_group->key.objectid); + btrfs_warn(fs_info, + "no space to alloc new chunk for block group %llu", + block_group->key.objectid); goto out; } @@ -9636,7 +9672,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) goto out; } - mutex_lock(&root->fs_info->chunk_mutex); + mutex_lock(&fs_info->chunk_mutex); list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 dev_offset; @@ -9658,10 +9694,10 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) } } if (debug && ret == -1) - btrfs_warn(root->fs_info, - "no space to allocate a new chunk for block group %llu", - block_group->key.objectid); - mutex_unlock(&root->fs_info->chunk_mutex); + btrfs_warn(fs_info, + "no space to allocate a new chunk for block group %llu", + block_group->key.objectid); + mutex_unlock(&fs_info->chunk_mutex); btrfs_end_transaction(trans, root); out: btrfs_put_block_group(block_group); @@ -9706,7 +9742,7 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info, found_key.offset); read_unlock(&em_tree->lock); if (!em) { - btrfs_err(root->fs_info, + btrfs_err(fs_info, "logical %llu len %llu found bg but no related chunk", found_key.objectid, found_key.offset); ret = -ENOENT; @@ -9898,6 +9934,7 @@ out_err: static struct btrfs_block_group_cache * btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; cache = kzalloc(sizeof(*cache), GFP_NOFS); @@ -9915,11 +9952,10 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) cache->key.offset = size; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - cache->sectorsize = root->fs_info->sectorsize; - cache->fs_info = root->fs_info; + cache->sectorsize = fs_info->sectorsize; + cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(root, - &root->fs_info->mapping_tree, - start); + &fs_info->mapping_tree, start); set_free_space_tree_thresholds(cache); atomic_set(&cache->count, 1); @@ -9964,11 +10000,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) return -ENOMEM; path->reada = READA_FORWARD; - cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); - if (btrfs_test_opt(root->fs_info, SPACE_CACHE) && - btrfs_super_generation(root->fs_info->super_copy) != cache_gen) + cache_gen = btrfs_super_cache_generation(info->super_copy); + if (btrfs_test_opt(info, SPACE_CACHE) && + btrfs_super_generation(info->super_copy) != cache_gen) need_clear = 1; - if (btrfs_test_opt(root->fs_info, CLEAR_CACHE)) + if (btrfs_test_opt(info, CLEAR_CACHE)) need_clear = 1; while (1) { @@ -9999,7 +10035,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) * b) Setting 'dirty flag' makes sure that we flush * the new space cache info onto disk. */ - if (btrfs_test_opt(root->fs_info, SPACE_CACHE)) + if (btrfs_test_opt(info, SPACE_CACHE)) cache->disk_cache_state = BTRFS_DC_CLEAR; } @@ -10050,21 +10086,21 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) } else if (btrfs_block_group_used(&cache->item) == 0) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, root->fs_info, + add_new_free_space(cache, info, found_key.objectid, found_key.objectid + found_key.offset); free_excluded_extents(root, cache); } - ret = btrfs_add_block_group_cache(root->fs_info, cache); + ret = btrfs_add_block_group_cache(info, cache); if (ret) { btrfs_remove_free_space_cache(cache); btrfs_put_block_group(cache); goto error; } - trace_btrfs_add_block_group(root->fs_info, cache, 0); + trace_btrfs_add_block_group(info, cache, 0); ret = update_space_info(info, cache->flags, found_key.offset, btrfs_block_group_used(&cache->item), cache->bytes_super, &space_info); @@ -10083,7 +10119,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) __link_block_group(space_info, cache); - set_avail_alloc_bits(root->fs_info, cache->flags); + set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(root, cache->key.objectid)) { inc_block_group_ro(cache, 1); } else if (btrfs_block_group_used(&cache->item) == 0) { @@ -10098,7 +10134,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) } } - list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { + list_for_each_entry_rcu(space_info, &info->space_info, list) { if (!(get_alloc_profile(root, space_info->flags) & (BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID1 | @@ -10130,8 +10166,9 @@ error: void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *block_group, *tmp; - struct btrfs_root *extent_root = root->fs_info->extent_root; + struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_block_group_item item; struct btrfs_key key; int ret = 0; @@ -10151,11 +10188,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, sizeof(item)); if (ret) btrfs_abort_transaction(trans, ret); - ret = btrfs_finish_chunk_alloc(trans, extent_root->fs_info, - key.objectid, key.offset); + ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid, + key.offset); if (ret) btrfs_abort_transaction(trans, ret); - add_block_group_free_space(trans, root->fs_info, block_group); + add_block_group_free_space(trans, fs_info, block_group); /* already aborted the transaction if it failed. */ next: list_del_init(&block_group->bg_list); @@ -10168,12 +10205,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 type, u64 chunk_objectid, u64 chunk_offset, u64 size) { - int ret; - struct btrfs_root *extent_root; + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache; - extent_root = root->fs_info->extent_root; + int ret; - btrfs_set_log_full_commit(root->fs_info, trans); + btrfs_set_log_full_commit(fs_info, trans); cache = btrfs_create_block_group_cache(root, chunk_offset, size); if (!cache) @@ -10198,8 +10234,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, return ret; } - add_new_free_space(cache, root->fs_info, chunk_offset, - chunk_offset + size); + add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size); free_excluded_extents(root, cache); @@ -10216,7 +10251,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, * assigned to our block group, but don't update its counters just yet. * We want our bg to be added to the rbtree with its ->space_info set. */ - ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0, + ret = update_space_info(fs_info, cache->flags, 0, 0, 0, &cache->space_info); if (ret) { btrfs_remove_free_space_cache(cache); @@ -10224,7 +10259,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, return ret; } - ret = btrfs_add_block_group_cache(root->fs_info, cache); + ret = btrfs_add_block_group_cache(fs_info, cache); if (ret) { btrfs_remove_free_space_cache(cache); btrfs_put_block_group(cache); @@ -10235,26 +10270,26 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, * Now that our block group has its ->space_info set and is inserted in * the rbtree, update the space info's counters. */ - trace_btrfs_add_block_group(root->fs_info, cache, 1); - ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, + trace_btrfs_add_block_group(fs_info, cache, 1); + ret = update_space_info(fs_info, cache->flags, size, bytes_used, cache->bytes_super, &cache->space_info); if (ret) { btrfs_remove_free_space_cache(cache); - spin_lock(&root->fs_info->block_group_cache_lock); + spin_lock(&fs_info->block_group_cache_lock); rb_erase(&cache->cache_node, - &root->fs_info->block_group_cache_tree); + &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&cache->cache_node); - spin_unlock(&root->fs_info->block_group_cache_lock); + spin_unlock(&fs_info->block_group_cache_lock); btrfs_put_block_group(cache); return ret; } - update_global_block_rsv(root->fs_info); + update_global_block_rsv(fs_info); __link_block_group(cache->space_info, cache); list_add_tail(&cache->bg_list, &trans->new_bgs); - set_avail_alloc_bits(extent_root->fs_info, type); + set_avail_alloc_bits(fs_info, type); return 0; } @@ -10281,7 +10316,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_block_group_cache *block_group; struct btrfs_free_cluster *cluster; - struct btrfs_root *tree_root = root->fs_info->tree_root; + struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_key key; struct inode *inode; struct kobject *kobj = NULL; @@ -10311,7 +10346,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, factor = 1; /* make sure this block group isn't part of an allocation cluster */ - cluster = &root->fs_info->data_alloc_cluster; + cluster = &fs_info->data_alloc_cluster; spin_lock(&cluster->refill_lock); btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&cluster->refill_lock); @@ -10320,7 +10355,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * make sure this block group isn't part of a metadata * allocation cluster */ - cluster = &root->fs_info->meta_alloc_cluster; + cluster = &fs_info->meta_alloc_cluster; spin_lock(&cluster->refill_lock); btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&cluster->refill_lock); @@ -10400,14 +10435,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, btrfs_release_path(path); } - spin_lock(&root->fs_info->block_group_cache_lock); + spin_lock(&fs_info->block_group_cache_lock); rb_erase(&block_group->cache_node, - &root->fs_info->block_group_cache_tree); + &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); - if (root->fs_info->first_logical_byte == block_group->key.objectid) - root->fs_info->first_logical_byte = (u64)-1; - spin_unlock(&root->fs_info->block_group_cache_lock); + if (fs_info->first_logical_byte == block_group->key.objectid) + fs_info->first_logical_byte = (u64)-1; + spin_unlock(&fs_info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); /* @@ -10418,7 +10453,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (list_empty(&block_group->space_info->block_groups[index])) { kobj = block_group->space_info->block_group_kobjs[index]; block_group->space_info->block_group_kobjs[index] = NULL; - clear_avail_alloc_bits(root->fs_info, block_group->flags); + clear_avail_alloc_bits(fs_info, block_group->flags); } up_write(&block_group->space_info->groups_sem); if (kobj) { @@ -10431,12 +10466,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); if (block_group->has_caching_ctl) { - down_write(&root->fs_info->commit_root_sem); + down_write(&fs_info->commit_root_sem); if (!caching_ctl) { struct btrfs_caching_control *ctl; list_for_each_entry(ctl, - &root->fs_info->caching_block_groups, list) + &fs_info->caching_block_groups, list) if (ctl->block_group == block_group) { caching_ctl = ctl; atomic_inc(&caching_ctl->count); @@ -10445,7 +10480,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } if (caching_ctl) list_del_init(&caching_ctl->list); - up_write(&root->fs_info->commit_root_sem); + up_write(&fs_info->commit_root_sem); if (caching_ctl) { /* Once for the caching bgs list and once for us. */ put_caching_control(caching_ctl); @@ -10466,7 +10501,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_lock(&block_group->space_info->lock); list_del_init(&block_group->ro_list); - if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) { + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { WARN_ON(block_group->space_info->total_bytes < block_group->key.offset); WARN_ON(block_group->space_info->bytes_readonly @@ -10482,7 +10517,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, memcpy(&key, &block_group->key, sizeof(key)); - lock_chunks(root->fs_info); + lock_chunks(fs_info); if (!list_empty(&em->list)) { /* We're in the transaction->pending_chunks list. */ free_extent_map(em); @@ -10530,14 +10565,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * sees the em, either in the pending_chunks list or in the * pinned_chunks list. */ - list_move_tail(&em->list, &root->fs_info->pinned_chunks); + list_move_tail(&em->list, &fs_info->pinned_chunks); } spin_unlock(&block_group->lock); if (remove_em) { struct extent_map_tree *em_tree; - em_tree = &root->fs_info->mapping_tree.map_tree; + em_tree = &fs_info->mapping_tree.map_tree; write_lock(&em_tree->lock); /* * The em might be in the pending_chunks list, so make sure the @@ -10550,9 +10585,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, free_extent_map(em); } - unlock_chunks(root->fs_info); + unlock_chunks(fs_info); - ret = remove_block_group_free_space(trans, root->fs_info, block_group); + ret = remove_block_group_free_space(trans, fs_info, block_group); if (ret) goto out; @@ -10734,7 +10769,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&space_info->lock); /* DISCARD can flip during remount */ - trimming = btrfs_test_opt(root->fs_info, DISCARD); + trimming = btrfs_test_opt(fs_info, DISCARD); /* Implicit trim during transaction commit. */ if (trimming) @@ -10970,8 +11005,8 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) cache = next_block_group(fs_info->tree_root, cache); } - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - devices = &root->fs_info->fs_devices->alloc_list; + mutex_lock(&fs_info->fs_devices->device_list_mutex); + devices = &fs_info->fs_devices->alloc_list; list_for_each_entry(device, devices, dev_alloc_list) { ret = btrfs_trim_free_extents(device, range->minlen, &group_trimmed); @@ -10980,7 +11015,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) trimmed += group_trimmed; } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); range->len = trimmed; return ret; |