From fb25e9141ab843794d5cdef3936ccb58435e2371 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 26 Jul 2011 17:00:46 -0400 Subject: Btrfs: use bytes_may_use for all ENOSPC reservations We have been using bytes_reserved for metadata reservations, which is wrong since we use that to keep track of outstanding reservations from the allocator. This resulted in us doing a lot of silly things to make sure we don't allocate a bunch of metadata chunks since we never had a real view of how much space was actually in use by metadata. This passes Arne's enospc test and xfstests as well as my own enospc tests. Hopefully this will get us moving in the right direction. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 41ac927401d0..79c16a68a2bc 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2472,9 +2472,19 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, spin_unlock(&ctl->tree_lock); if (bytes >= minlen) { - int update_ret; - update_ret = btrfs_update_reserved_bytes(block_group, - bytes, 1, 1); + struct btrfs_space_info *space_info; + int update = 0; + + space_info = block_group->space_info; + spin_lock(&space_info->lock); + spin_lock(&block_group->lock); + if (!block_group->ro) { + block_group->reserved += bytes; + space_info->bytes_reserved += bytes; + update = 1; + } + spin_unlock(&block_group->lock); + spin_unlock(&space_info->lock); ret = btrfs_error_discard_extent(fs_info->extent_root, start, @@ -2482,9 +2492,16 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, &actually_trimmed); btrfs_add_free_space(block_group, start, bytes); - if (!update_ret) - btrfs_update_reserved_bytes(block_group, - bytes, 0, 1); + if (update) { + spin_lock(&space_info->lock); + spin_lock(&block_group->lock); + if (block_group->ro) + space_info->bytes_readonly += bytes; + block_group->reserved -= bytes; + space_info->bytes_reserved -= bytes; + spin_unlock(&space_info->lock); + spin_unlock(&block_group->lock); + } if (ret) break; -- cgit v1.2.3 From 6ab60601d518d563ca1a47eaa399096e69d3b64a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 8 Aug 2011 08:24:46 -0400 Subject: Btrfs: ratelimit the generation printk for the free space cache A user reported getting spammed when moving to 3.0 by this message. Since we switched to the normal checksumming infrastructure all old free space caches will be wrong and need to be regenerated so people are likely to see this message a lot, so ratelimit it so it doesn't fill up their logs and freak them out. Thanks, Reported-by: Andrew Lutomirski Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 79c16a68a2bc..ecc1a4f85d20 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "ctree.h" #include "free-space-cache.h" #include "transaction.h" @@ -341,11 +342,12 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, gen = addr; if (*gen != BTRFS_I(inode)->generation) { - printk(KERN_ERR "btrfs: space cache generation" - " (%llu) does not match inode (%llu)\n", - (unsigned long long)*gen, - (unsigned long long) - BTRFS_I(inode)->generation); + printk_ratelimited(KERN_ERR "btrfs: space cache" + " generation (%llu) does not match " + "inode (%llu)\n", + (unsigned long long)*gen, + (unsigned long long) + BTRFS_I(inode)->generation); kunmap(page); unlock_page(page); page_cache_release(page); -- cgit v1.2.3 From 482e6dc5261406fdb921946e70b51467b0305bad Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Aug 2011 10:31:56 -0400 Subject: Btrfs: allow callers to specify if flushing can occur for btrfs_block_rsv_check If you run xfstest 224 it you will get lots of messages about not being able to delete inodes and that they will be cleaned up next mount. This is because btrfs_block_rsv_check was not calling reserve_metadata_bytes with the ability to flush, so if there was not enough space, it simply failed. But in truncate and evict case we could easily flush space to try and get enough space to do our work, so make btrfs_block_rsv_check take a flush argument to pass down to reserve_metadata_bytes. Now xfstests 224 runs fine without all those complaints. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent-tree.c | 4 ++-- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/inode.c | 6 +++--- fs/btrfs/relocation.c | 4 ++-- fs/btrfs/transaction.c | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e18b068841b..caa73cd8c00a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2244,7 +2244,7 @@ int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 min_reserved, int min_factor); + u64 min_reserved, int min_factor, int flush); int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, struct btrfs_block_rsv *dst_rsv, u64 num_bytes); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d05967e9d613..a71fcf506531 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3705,7 +3705,7 @@ int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 min_reserved, int min_factor) + u64 min_reserved, int min_factor, int flush) { u64 num_bytes = 0; int ret = -ENOSPC; @@ -3728,7 +3728,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, if (!ret) return 0; - ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 0); + ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, flush); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 0); return 0; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ecc1a4f85d20..b0122e19db6b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -199,7 +199,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, trans->block_rsv = root->orphan_block_rsv; ret = btrfs_block_rsv_check(trans, root, root->orphan_block_rsv, - 0, 5); + 0, 5, 0); if (ret) return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8316b570db55..e40b9239660d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3576,10 +3576,10 @@ void btrfs_evict_inode(struct inode *inode) * doing the truncate. */ while (1) { - ret = btrfs_block_rsv_check(NULL, root, rsv, min_size, 0); + ret = btrfs_block_rsv_check(NULL, root, rsv, min_size, 0, 1); if (ret) { printk(KERN_WARNING "Could not get space for a " - "delete, will truncate on mount\n"); + "delete, will truncate on mount %d\n", ret); btrfs_orphan_del(NULL, inode); btrfs_free_block_rsv(root, rsv); goto no_delete; @@ -6575,7 +6575,7 @@ static int btrfs_truncate(struct inode *inode) btrfs_add_ordered_operation(trans, root, inode); while (1) { - ret = btrfs_block_rsv_check(trans, root, rsv, min_size, 0); + ret = btrfs_block_rsv_check(trans, root, rsv, min_size, 0, 1); if (ret) { /* * This can only happen with the original transaction we diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index aeaed99e9cfe..fd9ac66434b0 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2042,7 +2042,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, - min_reserved, 0); + min_reserved, 0, 0); if (ret) { BUG_ON(ret != -EAGAIN); ret = btrfs_commit_transaction(trans, root); @@ -3775,7 +3775,7 @@ restart: } ret = btrfs_block_rsv_check(trans, rc->extent_root, - rc->block_rsv, 0, 5); + rc->block_rsv, 0, 5, 0); if (ret < 0) { if (ret != -EAGAIN) { err = ret; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3e20cc8c1c06..a1d8c322c1ba 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -419,7 +419,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans, { int ret; ret = btrfs_block_rsv_check(trans, root, - &root->fs_info->global_block_rsv, 0, 5); + &root->fs_info->global_block_rsv, 0, 5, 0); return ret ? 1 : 0; } -- cgit v1.2.3 From a9b5fcddce594a408a48d523087b5bb64ce82469 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Aug 2011 12:06:12 -0400 Subject: Btrfs: fix call to btrfs_search_slot in free space cache We are setting ins_len to 1 even tho we are just modifying an item that should be there already. This may cause the search stuff to split nodes on the way down needelessly. Set this to 0 since we aren't inserting anything. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index b0122e19db6b..701ef5951e3f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -802,7 +802,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, key.offset = offset; key.type = 0; - ret = btrfs_search_slot(trans, root, &key, path, 1, 1); + ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, -- cgit v1.2.3 From 300e4f8a56f263797568c95b71c949f9f02e4534 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 29 Aug 2011 14:06:00 -0400 Subject: Btrfs: put the block group cache after we commit the super In moving some enospc stuff around I noticed that when we unmount we are often evicting the free space cache inodes before we do our last commit. This isn't bad, but it makes us constantly have to re-read the inodes back. So instead don't evict the cache until after we do our last commit, this will make things a little less crappy and makes a future enospc change work properly. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/free-space-cache.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0b5643a68d57..4965a0179b31 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2543,8 +2543,6 @@ int close_ctree(struct btrfs_root *root) /* clear out the rbtree of defraggable inodes */ btrfs_run_defrag_inodes(root->fs_info); - btrfs_put_block_group_cache(fs_info); - /* * Here come 2 situations when btrfs is broken to flip readonly: * @@ -2570,6 +2568,8 @@ int close_ctree(struct btrfs_root *root) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } + btrfs_put_block_group_cache(fs_info); + kthread_stop(root->fs_info->transaction_kthread); kthread_stop(root->fs_info->cleaner_kthread); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 701ef5951e3f..1ea10731797a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -105,7 +105,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, block_group->disk_cache_state = BTRFS_DC_CLEAR; } - if (!btrfs_fs_closing(root->fs_info)) { + if (!block_group->iref) { block_group->inode = igrab(inode); block_group->iref = 1; } -- cgit v1.2.3 From c09544e07f8cdc455ed8615d4c067d694c33bd18 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 30 Aug 2011 10:19:10 -0400 Subject: Btrfs: handle enospc accounting for free space inodes Since free space inodes now use normal checksumming we need to make sure to account for their metadata use. So reserve metadata space, and then if we fail to write out the metadata we can just release it, otherwise it will be freed up when the io completes. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 18 +++++++++++++----- fs/btrfs/free-space-cache.c | 44 +++++++++++++++++++++++++++++--------------- fs/btrfs/inode-map.c | 6 ++++-- fs/btrfs/inode.c | 2 +- 4 files changed, 47 insertions(+), 23 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1f1d3e8dcec9..ccdc4d12e8d4 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2755,16 +2755,20 @@ again: num_pages *= 16; num_pages *= PAGE_CACHE_SIZE; - ret = btrfs_check_data_free_space(inode, num_pages); + ret = btrfs_delalloc_reserve_space(inode, num_pages); if (ret) goto out_put; ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, num_pages, num_pages, &alloc_hint); - if (!ret) + if (!ret) { dcs = BTRFS_DC_SETUP; - btrfs_free_reserved_data_space(inode, num_pages); + btrfs_free_reserved_data_space(inode, num_pages); + } else { + btrfs_delalloc_release_space(inode, num_pages); + } + out_put: iput(inode); out_free: @@ -4002,9 +4006,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; u64 to_reserve = 0; unsigned nr_extents = 0; + int flush = 1; int ret; - if (btrfs_transaction_in_commit(root->fs_info)) + if (btrfs_is_free_space_inode(root, inode)) + flush = 0; + + if (flush && btrfs_transaction_in_commit(root->fs_info)) schedule_timeout(1); num_bytes = ALIGN(num_bytes, root->sectorsize); @@ -4023,7 +4031,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); spin_unlock(&BTRFS_I(inode)->lock); - ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); + ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, flush); if (ret) { u64 to_free = 0; unsigned dropped; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 1ea10731797a..3bde17ff14c0 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -532,6 +532,19 @@ out: return ret; } +/** + * __btrfs_write_out_cache - write out cached info to an inode + * @root - the root the inode belongs to + * @ctl - the free space cache we are going to write out + * @block_group - the block_group for this cache if it belongs to a block_group + * @trans - the trans handle + * @path - the path to use + * @offset - the offset for the key we'll insert + * + * This function writes out a free space cache struct to disk for quick recovery + * on mount. This will return 0 if it was successfull in writing the cache out, + * and -1 if it was not. + */ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, struct btrfs_block_group_cache *block_group, @@ -555,7 +568,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = -1; + int ret; + int err = -1; bool next_page = false; bool out_of_space = false; @@ -563,7 +577,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, node = rb_first(&ctl->free_space_offset); if (!node) - return 0; + return -1; if (!i_size_read(inode)) return -1; @@ -767,7 +781,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); - ret = 0; goto out; } @@ -789,10 +802,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); - if (ret) { - ret = 0; + if (ret) goto out; - } BTRFS_I(inode)->generation = trans->transid; @@ -804,7 +815,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { - ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -818,7 +828,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { - ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, @@ -835,16 +844,15 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); - ret = 1; - + err = 0; out: kfree(pages); - if (ret != 1) { + if (err) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); BTRFS_I(inode)->generation = 0; } btrfs_update_inode(trans, root, inode); - return ret; + return err; } int btrfs_write_out_cache(struct btrfs_root *root, @@ -871,14 +879,16 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, path, block_group->key.objectid); - if (ret < 0) { + if (ret) { + btrfs_delalloc_release_metadata(inode, inode->i_size); spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); ret = 0; - +#ifdef DEBUG printk(KERN_ERR "btrfs: failed to write free space cace " "for block group %llu\n", block_group->key.objectid); +#endif } iput(inode); @@ -2662,9 +2672,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, return 0; ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); - if (ret < 0) + if (ret) { + btrfs_delalloc_release_metadata(inode, inode->i_size); +#ifdef DEBUG printk(KERN_ERR "btrfs: failed to write free ino cache " "for root %llu\n", root->root_key.objectid); +#endif + } iput(inode); return ret; diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index b4087e0fa871..53dcbdf446cd 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -465,14 +465,16 @@ again: /* Just to make sure we have enough space */ prealloc += 8 * PAGE_CACHE_SIZE; - ret = btrfs_check_data_free_space(inode, prealloc); + ret = btrfs_delalloc_reserve_space(inode, prealloc); if (ret) goto out_put; ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, prealloc, prealloc, &alloc_hint); - if (ret) + if (ret) { + btrfs_delalloc_release_space(inode, prealloc); goto out_put; + } btrfs_free_reserved_data_space(inode, prealloc); out_put: diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 06ae5b173fd7..78b5ae59ac4f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1792,11 +1792,11 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) } ret = 0; out: + btrfs_delalloc_release_metadata(inode, ordered_extent->len); if (nolock) { if (trans) btrfs_end_transaction_nolock(trans, root); } else { - btrfs_delalloc_release_metadata(inode, ordered_extent->len); if (trans) btrfs_end_transaction(trans, root); } -- cgit v1.2.3 From 4a92b1b8d2810db4ea0c34616b94c0b3810fa027 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 30 Aug 2011 12:34:28 -0400 Subject: Btrfs: stop passing a trans handle all around the reservation code The only thing that we need to have a trans handle for is in reserve_metadata_bytes and thats to know how much flushing we can do. So instead of passing it around, just check current->journal_info for a trans_handle so we know if we can commit a transaction to try and free up space or not. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 6 ++---- fs/btrfs/extent-tree.c | 45 +++++++++++++++++++++++---------------------- fs/btrfs/free-space-cache.c | 4 +--- fs/btrfs/inode.c | 4 ++-- fs/btrfs/relocation.c | 15 +++++++-------- fs/btrfs/transaction.c | 8 ++++---- 6 files changed, 39 insertions(+), 43 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index caa73cd8c00a..a5faf8e33baa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2237,12 +2237,10 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv); struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root); void btrfs_free_block_rsv(struct btrfs_root *root, struct btrfs_block_rsv *rsv); -int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +int btrfs_block_rsv_add(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes); -int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +int btrfs_block_rsv_check(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 min_reserved, int min_factor, int flush); int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d236df790156..9bb71597aa54 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3404,29 +3404,34 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return reclaimed >= to_reclaim; } -/* - * Retries tells us how many times we've called reserve_metadata_bytes. The - * idea is if this is the first call (retries == 0) then we will add to our - * reserved count if we can't make the allocation in order to hold our place - * while we go and try and free up space. That way for retries > 1 we don't try - * and add space, we just check to see if the amount of unused space is >= the - * total space, meaning that our reservation is valid. +/** + * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space + * @root - the root we're allocating for + * @block_rsv - the block_rsv we're allocating for + * @orig_bytes - the number of bytes we want + * @flush - wether or not we can flush to make our reservation * - * However if we don't intend to retry this reservation, pass -1 as retries so - * that it short circuits this logic. + * This will reserve orgi_bytes number of bytes from the space info associated + * with the block_rsv. If there is not enough space it will make an attempt to + * flush out space to make room. It will do this by flushing delalloc if + * possible or committing the transaction. If flush is 0 then no attempts to + * regain reservations will be made and this will fail if there is not enough + * space already. */ -static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +static int reserve_metadata_bytes(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 orig_bytes, int flush) { struct btrfs_space_info *space_info = block_rsv->space_info; + struct btrfs_trans_handle *trans; u64 unused; u64 num_bytes = orig_bytes; int retries = 0; int ret = 0; bool committed = false; bool flushing = false; + + trans = (struct btrfs_trans_handle *)current->journal_info; again: ret = 0; spin_lock(&space_info->lock); @@ -3689,8 +3694,7 @@ void btrfs_free_block_rsv(struct btrfs_root *root, kfree(rsv); } -int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +int btrfs_block_rsv_add(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes) { @@ -3699,7 +3703,7 @@ int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, if (num_bytes == 0) return 0; - ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1); + ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 1); return 0; @@ -3708,8 +3712,7 @@ int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, return ret; } -int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +int btrfs_block_rsv_check(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 min_reserved, int min_factor, int flush) { @@ -3734,7 +3737,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, if (!ret) return 0; - ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, flush); + ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 0); return 0; @@ -4034,7 +4037,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); spin_unlock(&BTRFS_I(inode)->lock); - ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, flush); + ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); if (ret) { u64 to_free = 0; unsigned dropped; @@ -5689,8 +5692,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, block_rsv = get_block_rsv(trans, root); if (block_rsv->size == 0) { - ret = reserve_metadata_bytes(trans, root, block_rsv, - blocksize, 0); + ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0); /* * If we couldn't reserve metadata bytes try and use some from * the global reserve. @@ -5711,8 +5713,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, return block_rsv; if (ret) { WARN_ON(1); - ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, - 0); + ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0); if (!ret) { return block_rsv; } else if (ret && block_rsv != global_rsv) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3bde17ff14c0..ffc42ef44711 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -197,9 +197,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, rsv = trans->block_rsv; trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_block_rsv_check(trans, root, - root->orphan_block_rsv, - 0, 5, 0); + ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 0, 5, 0); if (ret) return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 78b5ae59ac4f..2947e94947b5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3577,7 +3577,7 @@ void btrfs_evict_inode(struct inode *inode) * doing the truncate. */ while (1) { - ret = btrfs_block_rsv_check(NULL, root, rsv, min_size, 0, 1); + ret = btrfs_block_rsv_check(root, rsv, min_size, 0, 1); if (ret) { printk(KERN_WARNING "Could not get space for a " "delete, will truncate on mount %d\n", ret); @@ -6577,7 +6577,7 @@ static int btrfs_truncate(struct inode *inode) btrfs_add_ordered_operation(trans, root, inode); while (1) { - ret = btrfs_block_rsv_check(trans, root, rsv, min_size, 0, 1); + ret = btrfs_block_rsv_check(root, rsv, min_size, 0, 1); if (ret) { /* * This can only happen with the original transaction we diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fd9ac66434b0..3ab67409f90f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2041,8 +2041,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; - ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, - min_reserved, 0, 0); + ret = btrfs_block_rsv_check(root, rc->block_rsv, min_reserved, + 0, 0); if (ret) { BUG_ON(ret != -EAGAIN); ret = btrfs_commit_transaction(trans, root); @@ -2152,8 +2152,7 @@ int prepare_to_merge(struct reloc_control *rc, int err) again: if (!err) { num_bytes = rc->merging_rsv_size; - ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv, - num_bytes); + ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes); if (ret) err = ret; } @@ -2427,7 +2426,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, num_bytes = calcu_metadata_size(rc, node, 1) * 2; trans->block_rsv = rc->block_rsv; - ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes); + ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes); if (ret) { if (ret == -EAGAIN) rc->commit_transaction = 1; @@ -3645,7 +3644,7 @@ int prepare_to_relocate(struct reloc_control *rc) * btrfs_init_reloc_root will use them when there * is no reservation in transaction handle. */ - ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv, + ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv, rc->extent_root->nodesize * 256); if (ret) return ret; @@ -3774,8 +3773,8 @@ restart: } } - ret = btrfs_block_rsv_check(trans, rc->extent_root, - rc->block_rsv, 0, 5, 0); + ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 0, + 5, 0); if (ret < 0) { if (ret != -EAGAIN) { err = ret; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a770f4bd9d31..8d6f4c78f73f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -275,7 +275,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, */ if (num_items > 0 && root != root->fs_info->chunk_root) { num_bytes = btrfs_calc_trans_metadata_size(root, num_items); - ret = btrfs_block_rsv_add(NULL, root, + ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv, num_bytes); if (ret) @@ -418,8 +418,8 @@ static int should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int ret; - ret = btrfs_block_rsv_check(trans, root, - &root->fs_info->global_block_rsv, 0, 5, 0); + ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 0, + 5, 0); return ret ? 1 : 0; } @@ -914,7 +914,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); if (to_reserve > 0) { - ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv, + ret = btrfs_block_rsv_add(root, &pending->block_rsv, to_reserve); if (ret) { pending->error = ret; -- cgit v1.2.3 From 3b16a4e3c355ee3c790473decfcf83d4faeb8ce0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 21 Sep 2011 15:05:58 -0400 Subject: Btrfs: use the inode's mapping mask for allocating pages Johannes pointed out we were allocating only kernel pages for doing writes, which is kind of a big deal if you are on 32bit and have more than a gig of ram. So fix our allocations to use the mapping's gfp but still clear __GFP_FS so we don't re-enter. Thanks, Reported-by: Johannes Weiner Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 6 ++++++ fs/btrfs/file.c | 3 ++- fs/btrfs/free-space-cache.c | 6 ++++-- fs/btrfs/inode.c | 3 ++- fs/btrfs/ioctl.c | 3 ++- fs/btrfs/relocation.c | 3 ++- 6 files changed, 18 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a5faf8e33baa..47dea7118e0e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" @@ -2117,6 +2118,11 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); } +static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) +{ + return mapping_gfp_mask(mapping) & ~__GFP_FS; +} + /* extent-tree.c */ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, unsigned num_items) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index de569af766fe..f2e928289600 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1069,6 +1069,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, int i; unsigned long index = pos >> PAGE_CACHE_SHIFT; struct inode *inode = fdentry(file)->d_inode; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int err = 0; int faili = 0; u64 start_pos; @@ -1080,7 +1081,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, again: for (i = 0; i < num_pages; i++) { pages[i] = find_or_create_page(inode->i_mapping, index + i, - GFP_NOFS); + mask); if (!pages[i]) { faili = i - 1; err = -ENOMEM; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ffc42ef44711..0a8ccdbdd464 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -254,6 +254,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, u64 num_bitmaps; u64 generation; pgoff_t index = 0; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int ret = 0; INIT_LIST_HEAD(&bitmaps); @@ -310,7 +311,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, if (!num_entries && !num_bitmaps) break; - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, index, mask); if (!page) goto free_cache; @@ -563,6 +564,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, u64 start, end, len; u64 bytes = 0; u32 crc = ~(u32)0; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; @@ -612,7 +614,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * know and don't freak out. */ while (index < num_pages) { - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, index, mask); if (!page) { int i; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2947e94947b5..88e3956be57d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3282,6 +3282,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); struct page *page; + gfp_t mask = btrfs_alloc_write_mask(mapping); int ret = 0; u64 page_start; u64 page_end; @@ -3294,7 +3295,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) ret = -ENOMEM; again: - page = find_or_create_page(mapping, index, GFP_NOFS); + page = find_or_create_page(mapping, index, mask); if (!page) { btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 538f65a79ec5..24fd75bb0f96 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -843,6 +843,7 @@ static int cluster_pages_for_defrag(struct inode *inode, int i_done; struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); if (isize == 0) return 0; @@ -860,7 +861,7 @@ again: for (i = 0; i < num_pages; i++) { struct page *page; page = find_or_create_page(inode->i_mapping, - start_index + i, GFP_NOFS); + start_index + i, mask); if (!page) break; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 3ab67409f90f..7fa090fa0d39 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2921,6 +2921,7 @@ static int relocate_file_extent_cluster(struct inode *inode, unsigned long last_index; struct page *page; struct file_ra_state *ra; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int nr = 0; int ret = 0; @@ -2955,7 +2956,7 @@ static int relocate_file_extent_cluster(struct inode *inode, ra, NULL, index, last_index + 1 - index); page = find_or_create_page(inode->i_mapping, index, - GFP_NOFS); + mask); if (!page) { btrfs_delalloc_release_metadata(inode, PAGE_CACHE_SIZE); -- cgit v1.2.3 From f75b130e9bb361850787e156c79311adb84f551e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 5 Oct 2011 10:00:18 -0400 Subject: Btrfs: don't skip writing out a empty block groups cache I noticed a slight bug where we will not bother writing out the block group cache's space cache if it's space tree is empty. Since it could have a cluster or pinned extents that need to be written out this is just not a valid test. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0a8ccdbdd464..b81556ca75ea 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -575,10 +575,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, INIT_LIST_HEAD(&bitmap_list); - node = rb_first(&ctl->free_space_offset); - if (!node) - return -1; - if (!i_size_read(inode)) return -1; @@ -639,6 +635,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, if (block_group) start = block_group->key.objectid; + node = rb_first(&ctl->free_space_offset); + if (!node && cluster) { + node = rb_first(&cluster->root); + cluster = NULL; + } + /* Write out the extent entries */ do { struct btrfs_free_space_entry *entry; -- cgit v1.2.3 From a67509c30079f4c5025fb19ea443fb2906c3a85e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 5 Oct 2011 15:18:58 -0400 Subject: Btrfs: add a io_ctl struct and helpers for dealing with the space cache In writing and reading the space cache we have one big loop that keeps track of which page we are on and then a bunch of sizeable loops underneath this big loop to try and read/write out properly. Especially in the write case this makes things hugely complicated and hard to follow, and makes our error checking and recovery equally as complex. So add a io_ctl struct with a bunch of helpers to keep track of the pages we have, where we are, if we have enough space etc. This unifies how we deal with the pages we're writing and keeps all the messy tracking internal. This allows us to kill the big loops in both the read and write case and makes reviewing and chaning the write and read paths much simpler. I've run xfstests and stress.sh on this code and it survives. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 693 ++++++++++++++++++++++++-------------------- 1 file changed, 375 insertions(+), 318 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index b81556ca75ea..35bfc13c9d42 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -241,27 +241,275 @@ static int readahead_cache(struct inode *inode) return 0; } +struct io_ctl { + void *cur, *orig; + struct page *page; + struct page **pages; + struct btrfs_root *root; + unsigned long size; + int index; + int num_pages; +}; + +static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, + struct btrfs_root *root) +{ + memset(io_ctl, 0, sizeof(struct io_ctl)); + io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages, + GFP_NOFS); + if (!io_ctl->pages) + return -ENOMEM; + io_ctl->root = root; + return 0; +} + +static void io_ctl_free(struct io_ctl *io_ctl) +{ + kfree(io_ctl->pages); +} + +static void io_ctl_unmap_page(struct io_ctl *io_ctl) +{ + if (io_ctl->cur) { + kunmap(io_ctl->page); + io_ctl->cur = NULL; + io_ctl->orig = NULL; + } +} + +static void io_ctl_map_page(struct io_ctl *io_ctl, int clear) +{ + WARN_ON(io_ctl->cur); + BUG_ON(io_ctl->index >= io_ctl->num_pages); + io_ctl->page = io_ctl->pages[io_ctl->index++]; + io_ctl->cur = kmap(io_ctl->page); + io_ctl->orig = io_ctl->cur; + io_ctl->size = PAGE_CACHE_SIZE; + if (clear) + memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); +} + +static void io_ctl_drop_pages(struct io_ctl *io_ctl) +{ + int i; + + io_ctl_unmap_page(io_ctl); + + for (i = 0; i < io_ctl->num_pages; i++) { + ClearPageChecked(io_ctl->pages[i]); + unlock_page(io_ctl->pages[i]); + page_cache_release(io_ctl->pages[i]); + } +} + +static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, + int uptodate) +{ + struct page *page; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); + int i; + + for (i = 0; i < io_ctl->num_pages; i++) { + page = find_or_create_page(inode->i_mapping, i, mask); + if (!page) { + io_ctl_drop_pages(io_ctl); + return -ENOMEM; + } + io_ctl->pages[i] = page; + if (uptodate && !PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + printk(KERN_ERR "btrfs: error reading free " + "space cache\n"); + io_ctl_drop_pages(io_ctl); + return -EIO; + } + } + } + + return 0; +} + +static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) +{ + u64 *val; + + io_ctl_map_page(io_ctl, 1); + + /* + * Skip the first 64bits to make sure theres a bogus crc for old + * kernels + */ + io_ctl->cur += sizeof(u64); + + val = io_ctl->cur; + *val = cpu_to_le64(generation); + io_ctl->cur += sizeof(u64); + io_ctl->size -= sizeof(u64) * 2; +} + +static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) +{ + u64 *gen; + + io_ctl_map_page(io_ctl, 0); + + /* Skip the bogus crc area */ + io_ctl->cur += sizeof(u64); + gen = io_ctl->cur; + if (le64_to_cpu(*gen) != generation) { + printk_ratelimited(KERN_ERR "btrfs: space cache generation " + "(%Lu) does not match inode (%Lu)\n", *gen, + generation); + io_ctl_unmap_page(io_ctl); + return -EIO; + } + io_ctl->cur += sizeof(u64); + io_ctl->size -= sizeof(u64) * 2; + return 0; +} + +static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes, + void *bitmap) +{ + struct btrfs_free_space_entry *entry; + + if (!io_ctl->cur) + return -ENOSPC; + + entry = io_ctl->cur; + entry->offset = cpu_to_le64(offset); + entry->bytes = cpu_to_le64(bytes); + entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : + BTRFS_FREE_SPACE_EXTENT; + io_ctl->cur += sizeof(struct btrfs_free_space_entry); + io_ctl->size -= sizeof(struct btrfs_free_space_entry); + + if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) + return 0; + + /* + * index == 1 means the current page is 0, we need to generate a bogus + * crc for older kernels. + */ + if (io_ctl->index == 1) { + u32 *tmp; + u32 crc = ~(u32)0; + + crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + sizeof(u64), + crc, PAGE_CACHE_SIZE - sizeof(u64)); + btrfs_csum_final(crc, (char *)&crc); + crc++; + tmp = io_ctl->orig; + *tmp = crc; + } + io_ctl_unmap_page(io_ctl); + + /* No more pages to map */ + if (io_ctl->index >= io_ctl->num_pages) + return 0; + + /* map the next page */ + io_ctl_map_page(io_ctl, 1); + return 0; +} + +static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap) +{ + if (!io_ctl->cur) + return -ENOSPC; + + /* + * If we aren't at the start of the current page, unmap this one and + * map the next one if there is any left. + */ + if (io_ctl->cur != io_ctl->orig) { + io_ctl_unmap_page(io_ctl); + if (io_ctl->index >= io_ctl->num_pages) + return -ENOSPC; + io_ctl_map_page(io_ctl, 0); + } + + memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); + io_ctl_unmap_page(io_ctl); + if (io_ctl->index < io_ctl->num_pages) + io_ctl_map_page(io_ctl, 0); + return 0; +} + +static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl) +{ + io_ctl_unmap_page(io_ctl); + + while (io_ctl->index < io_ctl->num_pages) { + io_ctl_map_page(io_ctl, 1); + io_ctl_unmap_page(io_ctl); + } +} + +static u8 io_ctl_read_entry(struct io_ctl *io_ctl, + struct btrfs_free_space *entry) +{ + struct btrfs_free_space_entry *e; + u8 type; + + e = io_ctl->cur; + entry->offset = le64_to_cpu(e->offset); + entry->bytes = le64_to_cpu(e->bytes); + type = e->type; + io_ctl->cur += sizeof(struct btrfs_free_space_entry); + io_ctl->size -= sizeof(struct btrfs_free_space_entry); + + if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) + return type; + + io_ctl_unmap_page(io_ctl); + + if (io_ctl->index >= io_ctl->num_pages) + return type; + + io_ctl_map_page(io_ctl, 0); + return type; +} + +static void io_ctl_read_bitmap(struct io_ctl *io_ctl, + struct btrfs_free_space *entry) +{ + BUG_ON(!io_ctl->cur); + if (io_ctl->cur != io_ctl->orig) { + io_ctl_unmap_page(io_ctl); + io_ctl_map_page(io_ctl, 0); + } + memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); + io_ctl_unmap_page(io_ctl); + if (io_ctl->index < io_ctl->num_pages) + io_ctl_map_page(io_ctl, 0); +} + int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, struct btrfs_path *path, u64 offset) { struct btrfs_free_space_header *header; struct extent_buffer *leaf; - struct page *page; + struct io_ctl io_ctl; struct btrfs_key key; + struct btrfs_free_space *e, *n; struct list_head bitmaps; u64 num_entries; u64 num_bitmaps; u64 generation; - pgoff_t index = 0; - gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); + u8 type; int ret = 0; INIT_LIST_HEAD(&bitmaps); /* Nothing in the space cache, goodbye */ if (!i_size_read(inode)) - goto out; + return 0; key.objectid = BTRFS_FREE_SPACE_OBJECTID; key.offset = offset; @@ -269,11 +517,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; + return 0; else if (ret > 0) { btrfs_release_path(path); - ret = 0; - goto out; + return 0; } ret = -1; @@ -291,170 +538,89 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, " not match free space cache generation (%llu)\n", (unsigned long long)BTRFS_I(inode)->generation, (unsigned long long)generation); - goto out; + return 0; } if (!num_entries) - goto out; + return 0; + io_ctl_init(&io_ctl, inode, root); ret = readahead_cache(inode); if (ret) goto out; - while (1) { - struct btrfs_free_space_entry *entry; - struct btrfs_free_space *e; - void *addr; - unsigned long offset = 0; - int need_loop = 0; + ret = io_ctl_prepare_pages(&io_ctl, inode, 1); + if (ret) + goto out; - if (!num_entries && !num_bitmaps) - break; + ret = io_ctl_check_generation(&io_ctl, generation); + if (ret) + goto free_cache; - page = find_or_create_page(inode->i_mapping, index, mask); - if (!page) + while (num_entries) { + e = kmem_cache_zalloc(btrfs_free_space_cachep, + GFP_NOFS); + if (!e) goto free_cache; - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - printk(KERN_ERR "btrfs: error reading free " - "space cache\n"); - goto free_cache; - } + type = io_ctl_read_entry(&io_ctl, e); + if (!e->bytes) { + kmem_cache_free(btrfs_free_space_cachep, e); + goto free_cache; } - addr = kmap(page); - - if (index == 0) { - u64 *gen; - /* - * We put a bogus crc in the front of the first page in - * case old kernels try to mount a fs with the new - * format to make sure they discard the cache. - */ - addr += sizeof(u64); - offset += sizeof(u64); - - gen = addr; - if (*gen != BTRFS_I(inode)->generation) { - printk_ratelimited(KERN_ERR "btrfs: space cache" - " generation (%llu) does not match " - "inode (%llu)\n", - (unsigned long long)*gen, - (unsigned long long) - BTRFS_I(inode)->generation); - kunmap(page); - unlock_page(page); - page_cache_release(page); + if (type == BTRFS_FREE_SPACE_EXTENT) { + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + spin_unlock(&ctl->tree_lock); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); + kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } - addr += sizeof(u64); - offset += sizeof(u64); - } - entry = addr; - - while (1) { - if (!num_entries) - break; - - need_loop = 1; - e = kmem_cache_zalloc(btrfs_free_space_cachep, - GFP_NOFS); - if (!e) { - kunmap(page); - unlock_page(page); - page_cache_release(page); + } else { + BUG_ON(!num_bitmaps); + num_bitmaps--; + e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + if (!e->bitmap) { + kmem_cache_free( + btrfs_free_space_cachep, e); goto free_cache; } - - e->offset = le64_to_cpu(entry->offset); - e->bytes = le64_to_cpu(entry->bytes); - if (!e->bytes) { - kunmap(page); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + ctl->total_bitmaps++; + ctl->op->recalc_thresholds(ctl); + spin_unlock(&ctl->tree_lock); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); kmem_cache_free(btrfs_free_space_cachep, e); - unlock_page(page); - page_cache_release(page); goto free_cache; } - - if (entry->type == BTRFS_FREE_SPACE_EXTENT) { - spin_lock(&ctl->tree_lock); - ret = link_free_space(ctl, e); - spin_unlock(&ctl->tree_lock); - if (ret) { - printk(KERN_ERR "Duplicate entries in " - "free space cache, dumping\n"); - kunmap(page); - unlock_page(page); - page_cache_release(page); - goto free_cache; - } - } else { - e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - if (!e->bitmap) { - kunmap(page); - kmem_cache_free( - btrfs_free_space_cachep, e); - unlock_page(page); - page_cache_release(page); - goto free_cache; - } - spin_lock(&ctl->tree_lock); - ret = link_free_space(ctl, e); - ctl->total_bitmaps++; - ctl->op->recalc_thresholds(ctl); - spin_unlock(&ctl->tree_lock); - if (ret) { - printk(KERN_ERR "Duplicate entries in " - "free space cache, dumping\n"); - kunmap(page); - unlock_page(page); - page_cache_release(page); - goto free_cache; - } - list_add_tail(&e->list, &bitmaps); - } - - num_entries--; - offset += sizeof(struct btrfs_free_space_entry); - if (offset + sizeof(struct btrfs_free_space_entry) >= - PAGE_CACHE_SIZE) - break; - entry++; + list_add_tail(&e->list, &bitmaps); } - /* - * We read an entry out of this page, we need to move on to the - * next page. - */ - if (need_loop) { - kunmap(page); - goto next; - } + num_entries--; + } - /* - * We add the bitmaps at the end of the entries in order that - * the bitmap entries are added to the cache. - */ - e = list_entry(bitmaps.next, struct btrfs_free_space, list); + /* + * We add the bitmaps at the end of the entries in order that + * the bitmap entries are added to the cache. + */ + list_for_each_entry_safe(e, n, &bitmaps, list) { list_del_init(&e->list); - memcpy(e->bitmap, addr, PAGE_CACHE_SIZE); - kunmap(page); - num_bitmaps--; -next: - unlock_page(page); - page_cache_release(page); - index++; + io_ctl_read_bitmap(&io_ctl, e); } + io_ctl_drop_pages(&io_ctl); ret = 1; out: + io_ctl_free(&io_ctl); return ret; free_cache: + io_ctl_drop_pages(&io_ctl); __btrfs_remove_free_space_cache(ctl); goto out; } @@ -554,40 +720,28 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct extent_buffer *leaf; struct rb_node *node; struct list_head *pos, *n; - struct page **pages; - struct page *page; struct extent_state *cached_state = NULL; struct btrfs_free_cluster *cluster = NULL; struct extent_io_tree *unpin = NULL; + struct io_ctl io_ctl; struct list_head bitmap_list; struct btrfs_key key; u64 start, end, len; - u64 bytes = 0; - u32 crc = ~(u32)0; - gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); - int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; int ret; int err = -1; - bool next_page = false; - bool out_of_space = false; INIT_LIST_HEAD(&bitmap_list); if (!i_size_read(inode)) return -1; - num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); - pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); - if (!pages) - return -1; + io_ctl_init(&io_ctl, inode, root); /* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) @@ -601,30 +755,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, */ unpin = root->fs_info->pinned_extents; - /* - * Lock all pages first so we can lock the extent safely. - * - * NOTE: Because we hold the ref the entire time we're going to write to - * the page find_get_page should never fail, so we don't do a check - * after find_get_page at this point. Just putting this here so people - * know and don't freak out. - */ - while (index < num_pages) { - page = find_or_create_page(inode->i_mapping, index, mask); - if (!page) { - int i; - - for (i = 0; i < num_pages; i++) { - unlock_page(pages[i]); - page_cache_release(pages[i]); - } - goto out; - } - pages[index] = page; - index++; - } + /* Lock all pages first so we can lock the extent safely. */ + io_ctl_prepare_pages(&io_ctl, inode, 0); - index = 0; lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 0, &cached_state, GFP_NOFS); @@ -641,166 +774,78 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, cluster = NULL; } - /* Write out the extent entries */ - do { - struct btrfs_free_space_entry *entry; - void *addr, *orig; - unsigned long offset = 0; - - next_page = false; + io_ctl_set_generation(&io_ctl, trans->transid); - if (index >= num_pages) { - out_of_space = true; - break; - } - - page = pages[index]; + /* Write out the extent entries */ + while (node) { + struct btrfs_free_space *e; - orig = addr = kmap(page); - if (index == 0) { - u64 *gen; + e = rb_entry(node, struct btrfs_free_space, offset_index); + entries++; - /* - * We're going to put in a bogus crc for this page to - * make sure that old kernels who aren't aware of this - * format will be sure to discard the cache. - */ - addr += sizeof(u64); - offset += sizeof(u64); + ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes, + e->bitmap); + if (ret) + goto out_nospc; - gen = addr; - *gen = trans->transid; - addr += sizeof(u64); - offset += sizeof(u64); + if (e->bitmap) { + list_add_tail(&e->list, &bitmap_list); + bitmaps++; } - entry = addr; - - memset(addr, 0, PAGE_CACHE_SIZE - offset); - while (node && !next_page) { - struct btrfs_free_space *e; - - e = rb_entry(node, struct btrfs_free_space, offset_index); - entries++; - - entry->offset = cpu_to_le64(e->offset); - entry->bytes = cpu_to_le64(e->bytes); - if (e->bitmap) { - entry->type = BTRFS_FREE_SPACE_BITMAP; - list_add_tail(&e->list, &bitmap_list); - bitmaps++; - } else { - entry->type = BTRFS_FREE_SPACE_EXTENT; - } - node = rb_next(node); - if (!node && cluster) { - node = rb_first(&cluster->root); - cluster = NULL; - } - offset += sizeof(struct btrfs_free_space_entry); - if (offset + sizeof(struct btrfs_free_space_entry) >= - PAGE_CACHE_SIZE) - next_page = true; - entry++; + node = rb_next(node); + if (!node && cluster) { + node = rb_first(&cluster->root); + cluster = NULL; } + } - /* - * We want to add any pinned extents to our free space cache - * so we don't leak the space - */ - while (block_group && !next_page && - (start < block_group->key.objectid + - block_group->key.offset)) { - ret = find_first_extent_bit(unpin, start, &start, &end, - EXTENT_DIRTY); - if (ret) { - ret = 0; - break; - } - - /* This pinned extent is out of our range */ - if (start >= block_group->key.objectid + - block_group->key.offset) - break; - - len = block_group->key.objectid + - block_group->key.offset - start; - len = min(len, end + 1 - start); - - entries++; - entry->offset = cpu_to_le64(start); - entry->bytes = cpu_to_le64(len); - entry->type = BTRFS_FREE_SPACE_EXTENT; - - start = end + 1; - offset += sizeof(struct btrfs_free_space_entry); - if (offset + sizeof(struct btrfs_free_space_entry) >= - PAGE_CACHE_SIZE) - next_page = true; - entry++; + /* + * We want to add any pinned extents to our free space cache + * so we don't leak the space + */ + while (block_group && (start < block_group->key.objectid + + block_group->key.offset)) { + ret = find_first_extent_bit(unpin, start, &start, &end, + EXTENT_DIRTY); + if (ret) { + ret = 0; + break; } - /* Generate bogus crc value */ - if (index == 0) { - u32 *tmp; - crc = btrfs_csum_data(root, orig + sizeof(u64), crc, - PAGE_CACHE_SIZE - sizeof(u64)); - btrfs_csum_final(crc, (char *)&crc); - crc++; - tmp = orig; - *tmp = crc; - } + /* This pinned extent is out of our range */ + if (start >= block_group->key.objectid + + block_group->key.offset) + break; - kunmap(page); + len = block_group->key.objectid + + block_group->key.offset - start; + len = min(len, end + 1 - start); - bytes += PAGE_CACHE_SIZE; + entries++; + ret = io_ctl_add_entry(&io_ctl, start, len, NULL); + if (ret) + goto out_nospc; - index++; - } while (node || next_page); + start = end + 1; + } /* Write out the bitmaps */ list_for_each_safe(pos, n, &bitmap_list) { - void *addr; struct btrfs_free_space *entry = list_entry(pos, struct btrfs_free_space, list); - if (index >= num_pages) { - out_of_space = true; - break; - } - page = pages[index]; - - addr = kmap(page); - memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); - kunmap(page); - bytes += PAGE_CACHE_SIZE; - + ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap); + if (ret) + goto out_nospc; list_del_init(&entry->list); - index++; - } - - if (out_of_space) { - btrfs_drop_pages(pages, num_pages); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, - i_size_read(inode) - 1, &cached_state, - GFP_NOFS); - goto out; } /* Zero out the rest of the pages just to make sure */ - while (index < num_pages) { - void *addr; + io_ctl_zero_remaining_pages(&io_ctl); - page = pages[index]; - addr = kmap(page); - memset(addr, 0, PAGE_CACHE_SIZE); - kunmap(page); - bytes += PAGE_CACHE_SIZE; - index++; - } - - ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, - bytes, &cached_state); - btrfs_drop_pages(pages, num_pages); + ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages, + 0, i_size_read(inode), &cached_state); + io_ctl_drop_pages(&io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); @@ -817,7 +862,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); goto out; @@ -830,7 +875,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, + inode->i_size - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -848,13 +894,24 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, err = 0; out: - kfree(pages); + io_ctl_free(&io_ctl); if (err) { - invalidate_inode_pages2_range(inode->i_mapping, 0, index); + invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; } btrfs_update_inode(trans, root, inode); return err; + +out_nospc: + list_for_each_safe(pos, n, &bitmap_list) { + struct btrfs_free_space *entry = + list_entry(pos, struct btrfs_free_space, list); + list_del_init(&entry->list); + } + io_ctl_drop_pages(&io_ctl); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, GFP_NOFS); + goto out; } int btrfs_write_out_cache(struct btrfs_root *root, -- cgit v1.2.3 From 549b4fdb8f3c0708bbc0ee12ff955cd206c0f60c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 5 Oct 2011 16:33:53 -0400 Subject: Btrfs: check the return value of filemap_write_and_wait in the space cache We need to check the return value of filemap_write_and_wait in the space cache writeout code. Also don't set the inode's generation until we're sure nothing else is going to fail. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 35bfc13c9d42..abc924c9467c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -852,9 +852,10 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, if (ret) goto out; - BTRFS_I(inode)->generation = trans->transid; - filemap_write_and_wait(inode->i_mapping); + ret = filemap_write_and_wait(inode->i_mapping); + if (ret) + goto out; key.objectid = BTRFS_FREE_SPACE_OBJECTID; key.offset = offset; @@ -884,6 +885,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, goto out; } } + + BTRFS_I(inode)->generation = trans->transid; header = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_header); btrfs_set_free_space_entries(leaf, header, entries); -- cgit v1.2.3 From 5b0e95bf607ddd59b39f52d3d55e6581c817b530 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 6 Oct 2011 08:58:24 -0400 Subject: Btrfs: inline checksums into the disk free space cache Yeah yeah I know this is how we used to do it and then I changed it, but damnit I'm changing it back. The fact is that writing out checksums will modify metadata, which could cause us to dirty a block group we've already written out, so we have to truncate it and all of it's checksums and re-write it which will write new checksums which could dirty a blockg roup that has already been written and you see where I'm going with this? This can cause unmount or really anything that depends on a transaction to commit to take it's sweet damned time to happen. So go back to the way it was, only this time we're specifically setting NODATACOW because we can't go through the COW pathway anyway and we're doing our own built-in cow'ing by truncating the free space cache. The other new thing is once we truncate the old cache and preallocate the new space, we don't need to do that song and dance at all for the rest of the transaction, we can just overwrite the existing space with the new cache if the block group changes for whatever reason, and the NODATACOW will let us do this fine. So keep track of which transaction we last cleared our cache in and if we cleared it in this transaction just say we're all setup and carry on. This survives xfstests and stress.sh. The inode cache will continue to use the normal csum infrastructure since it only gets written once and there will be no more modifications to the fs tree in a transaction commit. Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 18 ++-- fs/btrfs/free-space-cache.c | 211 ++++++++++++++++++++++++++++++++------------ fs/btrfs/inode.c | 10 +-- 4 files changed, 172 insertions(+), 68 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1eafccb162ee..ea60897a9171 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -838,6 +838,7 @@ struct btrfs_block_group_cache { u64 bytes_super; u64 flags; u64 sectorsize; + u64 cache_generation; unsigned int ro:1; unsigned int dirty:1; unsigned int iref:1; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f95e55083bdb..0abf70c984e9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2717,6 +2717,13 @@ again: goto again; } + /* We've already setup this transaction, go ahead and exit */ + if (block_group->cache_generation == trans->transid && + i_size_read(inode)) { + dcs = BTRFS_DC_SETUP; + goto out_put; + } + /* * We want to set the generation to 0, that way if anything goes wrong * from here on out we know not to trust this cache when we load up next @@ -2756,19 +2763,16 @@ again: num_pages *= 16; num_pages *= PAGE_CACHE_SIZE; - ret = btrfs_delalloc_reserve_space(inode, num_pages); + ret = btrfs_check_data_free_space(inode, num_pages); if (ret) goto out_put; ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, num_pages, num_pages, &alloc_hint); - if (!ret) { + if (!ret) dcs = BTRFS_DC_SETUP; - btrfs_free_reserved_data_space(inode, num_pages); - } else { - btrfs_delalloc_release_space(inode, num_pages); - } + btrfs_free_reserved_data_space(inode, num_pages); out_put: iput(inode); @@ -2776,6 +2780,8 @@ out_free: btrfs_release_path(path); out: spin_lock(&block_group->lock); + if (!ret) + block_group->cache_generation = trans->transid; block_group->disk_cache_state = dcs; spin_unlock(&block_group->lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index abc924c9467c..5d40c1ed8225 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -85,6 +85,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, *block_group, struct btrfs_path *path) { struct inode *inode = NULL; + u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; spin_lock(&block_group->lock); if (block_group->inode) @@ -99,9 +100,10 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; spin_lock(&block_group->lock); - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) { + if (!((BTRFS_I(inode)->flags & flags) == flags)) { printk(KERN_INFO "Old style space inode found, converting.\n"); - BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM; + BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | + BTRFS_INODE_NODATACOW; block_group->disk_cache_state = BTRFS_DC_CLEAR; } @@ -123,12 +125,17 @@ int __create_free_space_inode(struct btrfs_root *root, struct btrfs_free_space_header *header; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; + u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC; int ret; ret = btrfs_insert_empty_inode(trans, root, path, ino); if (ret) return ret; + /* We inline crc's for the free disk space cache */ + if (ino != BTRFS_FREE_INO_OBJECTID) + flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; + leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); @@ -141,8 +148,7 @@ int __create_free_space_inode(struct btrfs_root *root, btrfs_set_inode_uid(leaf, inode_item, 0); btrfs_set_inode_gid(leaf, inode_item, 0); btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); - btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS | - BTRFS_INODE_PREALLOC); + btrfs_set_inode_flags(leaf, inode_item, flags); btrfs_set_inode_nlink(leaf, inode_item, 1); btrfs_set_inode_transid(leaf, inode_item, trans->transid); btrfs_set_inode_block_group(leaf, inode_item, offset); @@ -249,6 +255,7 @@ struct io_ctl { unsigned long size; int index; int num_pages; + unsigned check_crcs:1; }; static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, @@ -262,6 +269,8 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, if (!io_ctl->pages) return -ENOMEM; io_ctl->root = root; + if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) + io_ctl->check_crcs = 1; return 0; } @@ -340,25 +349,39 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) io_ctl_map_page(io_ctl, 1); /* - * Skip the first 64bits to make sure theres a bogus crc for old - * kernels + * Skip the csum areas. If we don't check crcs then we just have a + * 64bit chunk at the front of the first page. */ - io_ctl->cur += sizeof(u64); + if (io_ctl->check_crcs) { + io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); + io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); + } else { + io_ctl->cur += sizeof(u64); + io_ctl->size -= sizeof(u64) * 2; + } val = io_ctl->cur; *val = cpu_to_le64(generation); io_ctl->cur += sizeof(u64); - io_ctl->size -= sizeof(u64) * 2; } static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) { u64 *gen; - io_ctl_map_page(io_ctl, 0); + /* + * Skip the crc area. If we don't check crcs then we just have a 64bit + * chunk at the front of the first page. + */ + if (io_ctl->check_crcs) { + io_ctl->cur += sizeof(u32) * io_ctl->num_pages; + io_ctl->size -= sizeof(u64) + + (sizeof(u32) * io_ctl->num_pages); + } else { + io_ctl->cur += sizeof(u64); + io_ctl->size -= sizeof(u64) * 2; + } - /* Skip the bogus crc area */ - io_ctl->cur += sizeof(u64); gen = io_ctl->cur; if (le64_to_cpu(*gen) != generation) { printk_ratelimited(KERN_ERR "btrfs: space cache generation " @@ -368,7 +391,63 @@ static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) return -EIO; } io_ctl->cur += sizeof(u64); - io_ctl->size -= sizeof(u64) * 2; + return 0; +} + +static void io_ctl_set_crc(struct io_ctl *io_ctl, int index) +{ + u32 *tmp; + u32 crc = ~(u32)0; + unsigned offset = 0; + + if (!io_ctl->check_crcs) { + io_ctl_unmap_page(io_ctl); + return; + } + + if (index == 0) + offset = sizeof(u32) * io_ctl->num_pages;; + + crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc, + PAGE_CACHE_SIZE - offset); + btrfs_csum_final(crc, (char *)&crc); + io_ctl_unmap_page(io_ctl); + tmp = kmap(io_ctl->pages[0]); + tmp += index; + *tmp = crc; + kunmap(io_ctl->pages[0]); +} + +static int io_ctl_check_crc(struct io_ctl *io_ctl, int index) +{ + u32 *tmp, val; + u32 crc = ~(u32)0; + unsigned offset = 0; + + if (!io_ctl->check_crcs) { + io_ctl_map_page(io_ctl, 0); + return 0; + } + + if (index == 0) + offset = sizeof(u32) * io_ctl->num_pages; + + tmp = kmap(io_ctl->pages[0]); + tmp += index; + val = *tmp; + kunmap(io_ctl->pages[0]); + + io_ctl_map_page(io_ctl, 0); + crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc, + PAGE_CACHE_SIZE - offset); + btrfs_csum_final(crc, (char *)&crc); + if (val != crc) { + printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free " + "space cache\n"); + io_ctl_unmap_page(io_ctl); + return -EIO; + } + return 0; } @@ -391,22 +470,7 @@ static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes, if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) return 0; - /* - * index == 1 means the current page is 0, we need to generate a bogus - * crc for older kernels. - */ - if (io_ctl->index == 1) { - u32 *tmp; - u32 crc = ~(u32)0; - - crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + sizeof(u64), - crc, PAGE_CACHE_SIZE - sizeof(u64)); - btrfs_csum_final(crc, (char *)&crc); - crc++; - tmp = io_ctl->orig; - *tmp = crc; - } - io_ctl_unmap_page(io_ctl); + io_ctl_set_crc(io_ctl, io_ctl->index - 1); /* No more pages to map */ if (io_ctl->index >= io_ctl->num_pages) @@ -427,14 +491,14 @@ static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap) * map the next one if there is any left. */ if (io_ctl->cur != io_ctl->orig) { - io_ctl_unmap_page(io_ctl); + io_ctl_set_crc(io_ctl, io_ctl->index - 1); if (io_ctl->index >= io_ctl->num_pages) return -ENOSPC; io_ctl_map_page(io_ctl, 0); } memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); - io_ctl_unmap_page(io_ctl); + io_ctl_set_crc(io_ctl, io_ctl->index - 1); if (io_ctl->index < io_ctl->num_pages) io_ctl_map_page(io_ctl, 0); return 0; @@ -442,51 +506,60 @@ static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap) static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl) { - io_ctl_unmap_page(io_ctl); + /* + * If we're not on the boundary we know we've modified the page and we + * need to crc the page. + */ + if (io_ctl->cur != io_ctl->orig) + io_ctl_set_crc(io_ctl, io_ctl->index - 1); + else + io_ctl_unmap_page(io_ctl); while (io_ctl->index < io_ctl->num_pages) { io_ctl_map_page(io_ctl, 1); - io_ctl_unmap_page(io_ctl); + io_ctl_set_crc(io_ctl, io_ctl->index - 1); } } -static u8 io_ctl_read_entry(struct io_ctl *io_ctl, - struct btrfs_free_space *entry) +static int io_ctl_read_entry(struct io_ctl *io_ctl, + struct btrfs_free_space *entry, u8 *type) { struct btrfs_free_space_entry *e; - u8 type; e = io_ctl->cur; entry->offset = le64_to_cpu(e->offset); entry->bytes = le64_to_cpu(e->bytes); - type = e->type; + *type = e->type; io_ctl->cur += sizeof(struct btrfs_free_space_entry); io_ctl->size -= sizeof(struct btrfs_free_space_entry); if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) - return type; + return 0; io_ctl_unmap_page(io_ctl); if (io_ctl->index >= io_ctl->num_pages) - return type; + return 0; - io_ctl_map_page(io_ctl, 0); - return type; + return io_ctl_check_crc(io_ctl, io_ctl->index); } -static void io_ctl_read_bitmap(struct io_ctl *io_ctl, - struct btrfs_free_space *entry) +static int io_ctl_read_bitmap(struct io_ctl *io_ctl, + struct btrfs_free_space *entry) { - BUG_ON(!io_ctl->cur); - if (io_ctl->cur != io_ctl->orig) { + int ret; + + if (io_ctl->cur && io_ctl->cur != io_ctl->orig) io_ctl_unmap_page(io_ctl); - io_ctl_map_page(io_ctl, 0); - } + + ret = io_ctl_check_crc(io_ctl, io_ctl->index); + if (ret) + return ret; + memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); io_ctl_unmap_page(io_ctl); - if (io_ctl->index < io_ctl->num_pages) - io_ctl_map_page(io_ctl, 0); + + return 0; } int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, @@ -553,6 +626,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, if (ret) goto out; + ret = io_ctl_check_crc(&io_ctl, 0); + if (ret) + goto free_cache; + ret = io_ctl_check_generation(&io_ctl, generation); if (ret) goto free_cache; @@ -563,7 +640,12 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, if (!e) goto free_cache; - type = io_ctl_read_entry(&io_ctl, e); + ret = io_ctl_read_entry(&io_ctl, e, &type); + if (ret) { + kmem_cache_free(btrfs_free_space_cachep, e); + goto free_cache; + } + if (!e->bytes) { kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; @@ -611,7 +693,9 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, */ list_for_each_entry_safe(e, n, &bitmaps, list) { list_del_init(&e->list); - io_ctl_read_bitmap(&io_ctl, e); + ret = io_ctl_read_bitmap(&io_ctl, e); + if (ret) + goto free_cache; } io_ctl_drop_pages(&io_ctl); @@ -632,7 +716,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root = fs_info->tree_root; struct inode *inode; struct btrfs_path *path; - int ret; + int ret = 0; bool matched; u64 used = btrfs_block_group_used(&block_group->item); @@ -664,6 +748,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, return 0; } + /* We may have converted the inode and made the cache invalid. */ + spin_lock(&block_group->lock); + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + spin_unlock(&block_group->lock); + goto out; + } + spin_unlock(&block_group->lock); + ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, path, block_group->key.objectid); btrfs_free_path(path); @@ -774,6 +866,13 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, cluster = NULL; } + /* Make sure we can fit our crcs into the first page */ + if (io_ctl.check_crcs && + (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { + WARN_ON(1); + goto out_nospc; + } + io_ctl_set_generation(&io_ctl, trans->transid); /* Write out the extent entries */ @@ -864,8 +963,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); + EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, + GFP_NOFS); goto out; } leaf = path->nodes[0]; @@ -878,9 +977,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, found_key.offset != offset) { clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, - GFP_NOFS); + EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, + NULL, GFP_NOFS); btrfs_release_path(path); goto out; } @@ -942,7 +1040,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, path, block_group->key.objectid); if (ret) { - btrfs_delalloc_release_metadata(inode, inode->i_size); spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 15adfb542502..246397d8478f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1792,12 +1792,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) } ret = 0; out: - btrfs_delalloc_release_metadata(inode, ordered_extent->len); - if (nolock) { - if (trans) + if (root != root->fs_info->tree_root) + btrfs_delalloc_release_metadata(inode, ordered_extent->len); + if (trans) { + if (nolock) btrfs_end_transaction_nolock(trans, root); - } else { - if (trans) + else btrfs_end_transaction(trans, root); } -- cgit v1.2.3 From 36ba022ac0b748dd543f43430b03198e899426c9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 18 Oct 2011 12:15:48 -0400 Subject: Btrfs: seperate out btrfs_block_rsv_check out into 2 different functions Currently btrfs_block_rsv_check does 2 things, it will either refill a block reserve like in the truncate or refill case, or it will check to see if there is enough space in the global reserve and possibly refill it. However because of overcommit we could be well overcommitting ourselves just to try and refill the global reserve, when really we should just be committing the transaction. So breack this out into btrfs_block_rsv_refill and btrfs_block_rsv_check. Refill will try to reserve more metadata if it can and btrfs_block_rsv_check will not, it will only tell you if the factor of the total space is still reserved. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 4 +++- fs/btrfs/extent-tree.c | 41 +++++++++++++++++++++++++++-------------- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/inode.c | 4 ++-- fs/btrfs/relocation.c | 6 ++---- fs/btrfs/transaction.c | 4 ++-- 6 files changed, 37 insertions(+), 24 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ea60897a9171..227620993bce 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2252,8 +2252,10 @@ int btrfs_block_rsv_add(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes); int btrfs_block_rsv_check(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int min_factor); +int btrfs_block_rsv_refill(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 min_reserved, int min_factor, int flush); + u64 min_reserved); int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, struct btrfs_block_rsv *dst_rsv, u64 num_bytes); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index eb4fe56b08bb..a5f1421eeee9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3422,7 +3422,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, * @block_rsv - the block_rsv we're allocating for * @orig_bytes - the number of bytes we want * @flush - wether or not we can flush to make our reservation - * @check - wether this is just to check if we have enough space or not * * This will reserve orgi_bytes number of bytes from the space info associated * with the block_rsv. If there is not enough space it will make an attempt to @@ -3433,7 +3432,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, */ static int reserve_metadata_bytes(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 orig_bytes, int flush, int check) + u64 orig_bytes, int flush) { struct btrfs_space_info *space_info = block_rsv->space_info; struct btrfs_trans_handle *trans; @@ -3507,7 +3506,7 @@ again: (orig_bytes * (retries + 1)); } - if (ret && !check) { + if (ret) { u64 profile = btrfs_get_alloc_profile(root, 0); u64 avail; @@ -3742,7 +3741,7 @@ int btrfs_block_rsv_add(struct btrfs_root *root, if (num_bytes == 0) return 0; - ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1, 0); + ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 1); return 0; @@ -3752,8 +3751,7 @@ int btrfs_block_rsv_add(struct btrfs_root *root, } int btrfs_block_rsv_check(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, - u64 min_reserved, int min_factor, int flush) + struct btrfs_block_rsv *block_rsv, int min_factor) { u64 num_bytes = 0; int ret = -ENOSPC; @@ -3762,11 +3760,26 @@ int btrfs_block_rsv_check(struct btrfs_root *root, return 0; spin_lock(&block_rsv->lock); - if (min_factor > 0) - num_bytes = div_factor(block_rsv->size, min_factor); - if (min_reserved > num_bytes) - num_bytes = min_reserved; + num_bytes = div_factor(block_rsv->size, min_factor); + if (block_rsv->reserved >= num_bytes) + ret = 0; + spin_unlock(&block_rsv->lock); + return ret; +} + +int btrfs_block_rsv_refill(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 min_reserved) +{ + u64 num_bytes = 0; + int ret = -ENOSPC; + + if (!block_rsv) + return 0; + + spin_lock(&block_rsv->lock); + num_bytes = min_reserved; if (block_rsv->reserved >= num_bytes) ret = 0; else @@ -3776,7 +3789,7 @@ int btrfs_block_rsv_check(struct btrfs_root *root, if (!ret) return 0; - ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush, !flush); + ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 0); return 0; @@ -4073,7 +4086,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); spin_unlock(&BTRFS_I(inode)->lock); - ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush, 0); + ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); if (ret) { u64 to_free = 0; unsigned dropped; @@ -5728,7 +5741,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, block_rsv = get_block_rsv(trans, root); if (block_rsv->size == 0) { - ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0, 0); + ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0); /* * If we couldn't reserve metadata bytes try and use some from * the global reserve. @@ -5749,7 +5762,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, return block_rsv; if (ret) { WARN_ON(1); - ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0, 0); + ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0); if (!ret) { return block_rsv; } else if (ret && block_rsv != global_rsv) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 5d40c1ed8225..2fecfc3183ee 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -203,7 +203,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, rsv = trans->block_rsv; trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 0, 5, 0); + ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 5); if (ret) return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b6b70bdd0992..f12747c9447b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3604,7 +3604,7 @@ void btrfs_evict_inode(struct inode *inode) * doing the truncate. */ while (1) { - ret = btrfs_block_rsv_check(root, rsv, min_size, 0, 1); + ret = btrfs_block_rsv_refill(root, rsv, min_size); /* * Try and steal from the global reserve since we will @@ -6613,7 +6613,7 @@ static int btrfs_truncate(struct inode *inode) btrfs_add_ordered_operation(trans, root, inode); while (1) { - ret = btrfs_block_rsv_check(root, rsv, min_size, 0, 1); + ret = btrfs_block_rsv_refill(root, rsv, min_size); if (ret) { /* * This can only happen with the original transaction we diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 7fa090fa0d39..10af6a0e0865 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2041,8 +2041,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; - ret = btrfs_block_rsv_check(root, rc->block_rsv, min_reserved, - 0, 0); + ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved); if (ret) { BUG_ON(ret != -EAGAIN); ret = btrfs_commit_transaction(trans, root); @@ -3774,8 +3773,7 @@ restart: } } - ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 0, - 5, 0); + ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); if (ret < 0) { if (ret != -EAGAIN) { err = ret; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index d064fa0a4a07..29bef63e23ba 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -418,8 +418,8 @@ static int should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int ret; - ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 0, - 5, 0); + + ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); return ret ? 1 : 0; } -- cgit v1.2.3 From 016fc6a63e465d5b94e4028f6d05d9703e195428 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 19 Oct 2011 10:31:37 -0400 Subject: Btrfs: don't flush the cache inode before writing it I noticed we had a little bit of latency when writing out the space cache inodes. It's because we flush it before we write anything in case we have dirty pages already there. This doesn't matter though since we're just going to overwrite the space, and there really shouldn't be any dirty pages anyway. This makes some of my tests run a little bit faster. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2fecfc3183ee..de205d59b74b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -829,10 +829,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, if (!i_size_read(inode)) return -1; - filemap_write_and_wait(inode->i_mapping); - btrfs_wait_ordered_range(inode, inode->i_size & - ~(root->sectorsize - 1), (u64)-1); - io_ctl_init(&io_ctl, inode, root); /* Get the cluster for this block_group if it exists */ -- cgit v1.2.3 From 1eae31e918972bbeefc119d23c1d67674f49a301 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 14 Oct 2011 06:31:20 -0400 Subject: Btrfs: make sure btrfs_remove_free_space doesn't leak EAGAIN btrfs_remove_free_space needs to make sure to set ret back to a valid return value after setting it to EAGAIN, otherwise we return it to the callers. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index de205d59b74b..f49475dfa954 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1868,6 +1868,7 @@ again: ctl->total_bitmaps--; } kmem_cache_free(btrfs_free_space_cachep, info); + ret = 0; goto out_lock; } @@ -1875,7 +1876,8 @@ again: unlink_free_space(ctl, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(ctl, info); + ret = link_free_space(ctl, info); + WARN_ON(ret); goto out_lock; } -- cgit v1.2.3 From c8174313a8102e874aaa321e2fc4c7c460a87151 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 2 Nov 2011 09:29:35 -0400 Subject: Btrfs: use the global reserve when truncating the free space cache inode We no longer use the orphan block rsv for holding the reservation for truncating the inode, so instead use the global block rsv and check to make sure it has enough space for us to truncate the space. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f49475dfa954..7a15fcfb3e1f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -198,14 +198,24 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct inode *inode) { struct btrfs_block_rsv *rsv; + u64 needed_bytes; loff_t oldsize; int ret = 0; rsv = trans->block_rsv; - trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 5); - if (ret) - return ret; + trans->block_rsv = &root->fs_info->global_block_rsv; + + /* 1 for slack space, 1 for updating the inode */ + needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + + btrfs_calc_trans_metadata_size(root, 1); + + spin_lock(&trans->block_rsv->lock); + if (trans->block_rsv->reserved < needed_bytes) { + spin_unlock(&trans->block_rsv->lock); + trans->block_rsv = rsv; + return -ENOSPC; + } + spin_unlock(&trans->block_rsv->lock); oldsize = i_size_read(inode); btrfs_i_size_write(inode, 0); @@ -218,13 +228,15 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, ret = btrfs_truncate_inode_items(trans, root, inode, 0, BTRFS_EXTENT_DATA_KEY); - trans->block_rsv = rsv; if (ret) { + trans->block_rsv = rsv; WARN_ON(1); return ret; } ret = btrfs_update_inode(trans, root, inode); + trans->block_rsv = rsv; + return ret; } -- cgit v1.2.3 From 2f120c05e67ae34c93786b1050c6828904314429 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 10 Nov 2011 20:45:05 -0500 Subject: Btrfs: only map pages if we know we need them when reading the space cache People have been running into a warning when loading space cache because the page is already mapped when trying to read in a bitmap. The way we read in entries and pages is kind of convoluted, so fix it so that io_ctl_read_entry maps the entries if it needs to, and if it hits the end of the page it simply unmaps the page. That way we can unconditionally unmap the io_ctl before reading in the bitmap and we should stop hitting these warnings. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 7a15fcfb3e1f..181760f9d2ab 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl, struct btrfs_free_space *entry, u8 *type) { struct btrfs_free_space_entry *e; + int ret; + + if (!io_ctl->cur) { + ret = io_ctl_check_crc(io_ctl, io_ctl->index); + if (ret) + return ret; + } e = io_ctl->cur; entry->offset = le64_to_cpu(e->offset); @@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl, io_ctl_unmap_page(io_ctl); - if (io_ctl->index >= io_ctl->num_pages) - return 0; - - return io_ctl_check_crc(io_ctl, io_ctl->index); + return 0; } static int io_ctl_read_bitmap(struct io_ctl *io_ctl, @@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl, { int ret; - if (io_ctl->cur && io_ctl->cur != io_ctl->orig) - io_ctl_unmap_page(io_ctl); - ret = io_ctl_check_crc(io_ctl, io_ctl->index); if (ret) return ret; @@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, num_entries--; } + io_ctl_unmap_page(&io_ctl); + /* * We add the bitmaps at the end of the entries in order that * the bitmap entries are added to the cache. -- cgit v1.2.3 From 0f0fbf1d0e188d129756e9508090af4bdbfde00b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sun, 20 Nov 2011 07:33:38 -0500 Subject: Btrfs: fix to search one more bitmap for cluster setup Suppose there are two bitmaps [0, 256], [256, 512] and one extent [100, 120] in the free space cache, and we want to setup a cluster with offset=100, bytes=50. In this case, there will be only one bitmap [256, 512] in the temporary bitmaps list, and then setup_cluster_bitmap() won't search bitmap [0, 256]. The cause is, the list is constructed in setup_cluster_no_bitmap(), and only bitmaps with bitmap_entry->offset >= offset will be added into the list, and the very bitmap that convers offset has bitmap_entry->offset <= offset. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 181760f9d2ab..8f792f41feab 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2453,10 +2453,22 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry; struct rb_node *node; int ret = -ENOSPC; + u64 bitmap_offset = offset_to_bitmap(ctl, offset); if (ctl->total_bitmaps == 0) return -ENOSPC; + /* + * The bitmap that covers offset won't be in the list unless offset + * is just its start offset. + */ + entry = list_first_entry(bitmaps, struct btrfs_free_space, list); + if (entry->offset != bitmap_offset) { + entry = tree_search_offset(ctl, bitmap_offset, 1, 0); + if (entry && list_empty(&entry->list)) + list_add(&entry->list, bitmaps); + } + /* * First check our cached list of bitmaps and see if there is an entry * here that will work. -- cgit v1.2.3 From 52621cb6ed0e0e14358bb317bda7cd5fbd5c2a27 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sun, 20 Nov 2011 07:33:38 -0500 Subject: Btrfs: avoid unnecessary bitmap search for cluster setup setup_cluster_no_bitmap() searches all the extents and bitmaps starting from offset. Therefore if it returns -ENOSPC, all the bitmaps starting from offset are in the bitmaps list, so it's sufficient to search from this list in setup_cluser_bitmap(). Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 42 ++++-------------------------------------- 1 file changed, 4 insertions(+), 38 deletions(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8f792f41feab..8c32434da2cb 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2451,7 +2451,6 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; - struct rb_node *node; int ret = -ENOSPC; u64 bitmap_offset = offset_to_bitmap(ctl, offset); @@ -2469,10 +2468,6 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, list_add(&entry->list, bitmaps); } - /* - * First check our cached list of bitmaps and see if there is an entry - * here that will work. - */ list_for_each_entry(entry, bitmaps, list) { if (entry->bytes < min_bytes) continue; @@ -2483,38 +2478,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, } /* - * If we do have entries on our list and we are here then we didn't find - * anything, so go ahead and get the next entry after the last entry in - * this list and start the search from there. + * The bitmaps list has all the bitmaps that record free space + * starting after offset, so no more search is required. */ - if (!list_empty(bitmaps)) { - entry = list_entry(bitmaps->prev, struct btrfs_free_space, - list); - node = rb_next(&entry->offset_index); - if (!node) - return -ENOSPC; - entry = rb_entry(node, struct btrfs_free_space, offset_index); - goto search; - } - - entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); - if (!entry) - return -ENOSPC; - -search: - node = &entry->offset_index; - do { - entry = rb_entry(node, struct btrfs_free_space, offset_index); - node = rb_next(&entry->offset_index); - if (!entry->bitmap) - continue; - if (entry->bytes < min_bytes) - continue; - ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, - bytes, min_bytes); - } while (ret && node); - - return ret; + return -ENOSPC; } /* @@ -2532,8 +2499,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, u64 offset, u64 bytes, u64 empty_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct list_head bitmaps; struct btrfs_free_space *entry, *tmp; + LIST_HEAD(bitmaps); u64 min_bytes; int ret; @@ -2572,7 +2539,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } - INIT_LIST_HEAD(&bitmaps); ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, bytes, min_bytes); if (ret) -- cgit v1.2.3 From f7d61dcd6873c49bcc42be2caa2af1c2511aa915 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 15 Nov 2011 09:31:24 -0500 Subject: Btrfs: clear pages dirty for io and set them extent mapped When doing the io_ctl helpers to clean up the free space cache stuff I stopped using our normal prepare_pages stuff, which means I of course forgot to do things like set the pages extent mapped, which will cause us all sorts of wonderful propblems. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8c32434da2cb..aedacdbf77e2 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, } } + for (i = 0; i < io_ctl->num_pages; i++) { + clear_page_dirty_for_io(io_ctl->pages[i]); + set_page_extent_mapped(io_ctl->pages[i]); + } + return 0; } -- cgit v1.2.3 From 24a70313969fc3fc440216b40babdb42564acff3 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 21 Nov 2011 09:39:11 -0500 Subject: Btrfs: remove free-space-cache.c WARN during log replay The log replay code only partially loads block groups, since the block group caching code is able to detect and deal with extents the logging code has pinned down. While the logging code is pinning down block groups, there is a bogus WARN_ON we're hitting if the code wasn't able to find an extent in the cache. This commit removes the warning because it can happen any time there isn't a valid free space cache for that block group. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index aedacdbf77e2..6e5b7e463698 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1849,7 +1849,13 @@ again: info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!info) { - WARN_ON(1); + /* the tree logging code might be calling us before we + * have fully loaded the free space rbtree for this + * block group. So it is possible the entry won't + * be in the rbtree yet at all. The caching code + * will make sure not to put it in the rbtree if + * the logging code has pinned it. + */ goto out_lock; } } -- cgit v1.2.3 From f2d0f6765d6332f9be732965a0c6f3b8a55082b4 Mon Sep 17 00:00:00 2001 From: Alexandre Oliva Date: Mon, 28 Nov 2011 12:04:43 -0200 Subject: Btrfs: initialize new bitmaps' list We're failing to create clusters with bitmaps because setup_cluster_no_bitmap checks that the list is empty before inserting the bitmap entry in the list for setup_cluster_bitmap, but the list field is only initialized when it is restored from the on-disk free space cache, or when it is written out to disk. Besides a potential race condition due to the multiple use of the list field, filesystem performance severely degrades over time: as we use up all non-bitmap free extents, the try-to-set-up-cluster dance is done at every metadata block allocation. For every block group, we fail to set up a cluster, and after failing on them all up to twice, we fall back to the much slower unclustered allocation. To make matters worse, before the unclustered allocation, we try to create new block groups until we reach the 1% threshold, which introduces additional bitmaps and thus block groups that we'll iterate over at each metadata block request. --- fs/btrfs/free-space-cache.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 6e5b7e463698..ff179b1e7423 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, { info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; + INIT_LIST_HEAD(&info->list); link_free_space(ctl, info); ctl->total_bitmaps++; -- cgit v1.2.3 From b78d09bceb524ee6481c21b77bda22d766b10e6a Mon Sep 17 00:00:00 2001 From: Alexandre Oliva Date: Wed, 30 Nov 2011 13:43:00 -0500 Subject: Btrfs: reset cluster's max_size when creating bitmap The field that indicates the size of the largest contiguous chunk of free space in the cluster is not initialized when setting up bitmaps, it's only increased when we find a larger contiguous chunk. We end up retaining a larger value than appropriate for highly-fragmented clusters, which may cause pointless searches for large contiguous groups, and even cause clusters that do not meet the density requirements to be set up. Signed-off-by: Alexandre Oliva Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs/free-space-cache.c') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ff179b1e7423..ec23d43d0c35 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2320,6 +2320,7 @@ again: if (!found) { start = i; + cluster->max_size = 0; found = true; } -- cgit v1.2.3