summaryrefslogtreecommitdiff
path: root/fs/ext4/resize.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 12:59:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 12:59:42 -0800
commitae9a8c4bdc91202b4236372eed53c54d2297c71b (patch)
tree4596680ee808334d246ad2f93bdd743d76c3741a /fs/ext4/resize.c
parent32190f0afbf4f1c0a9142e5a886a078ee0b794fd (diff)
parent232530680290ba94ca37852ab10d9556ea28badf (diff)
downloadlwn-ae9a8c4bdc91202b4236372eed53c54d2297c71b.tar.gz
lwn-ae9a8c4bdc91202b4236372eed53c54d2297c71b.zip
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: - Add support for online resizing of file systems with bigalloc - Fix a two data corruption bugs involving DAX, as well as a corruption bug after a crash during a racing fallocate and delayed allocation. - Finally, a number of cleanups and optimizations. * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: improve smp scalability for inode generation ext4: add support for online resizing with bigalloc ext4: mention noload when recovering on read-only device Documentation: fix little inconsistencies ext4: convert timers to use timer_setup() jbd2: convert timers to use timer_setup() ext4: remove duplicate extended attributes defs ext4: add ext4_should_use_dax() ext4: add sanity check for encryption + DAX ext4: prevent data corruption with journaling + DAX ext4: prevent data corruption with inline data + DAX ext4: fix interaction between i_size, fallocate, and delalloc after a crash ext4: retry allocations conservatively ext4: Switch to iomap for SEEK_HOLE / SEEK_DATA ext4: Add iomap support for inline data iomap: Add IOMAP_F_DATA_INLINE flag iomap: Switch from blkno to disk offset
Diffstat (limited to 'fs/ext4/resize.c')
-rw-r--r--fs/ext4/resize.c104
1 files changed, 66 insertions, 38 deletions
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 1dac59c24792..50443bda8e98 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -107,7 +107,7 @@ static int verify_group_input(struct super_block *sb,
overhead = ext4_group_overhead_blocks(sb, group);
metaend = start + overhead;
- input->free_blocks_count = free_blocks_count =
+ input->free_clusters_count = free_blocks_count =
input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
if (test_opt(sb, DEBUG))
@@ -258,6 +258,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
ext4_group_t last_group;
unsigned overhead;
__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
+ int i;
BUG_ON(flex_gd->count == 0 || group_data == NULL);
@@ -294,7 +295,7 @@ next_group:
group_data[bb_index].block_bitmap = start_blk++;
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count--;
+ group_data[group].mdata_blocks++;
flex_gd->bg_flags[group] &= uninit_mask;
}
@@ -305,7 +306,7 @@ next_group:
group_data[ib_index].inode_bitmap = start_blk++;
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count--;
+ group_data[group].mdata_blocks++;
flex_gd->bg_flags[group] &= uninit_mask;
}
@@ -324,15 +325,22 @@ next_group:
if (start_blk + itb > next_group_start) {
flex_gd->bg_flags[group + 1] &= uninit_mask;
overhead = start_blk + itb - next_group_start;
- group_data[group + 1].free_blocks_count -= overhead;
+ group_data[group + 1].mdata_blocks += overhead;
itb -= overhead;
}
- group_data[group].free_blocks_count -= itb;
+ group_data[group].mdata_blocks += itb;
flex_gd->bg_flags[group] &= uninit_mask;
start_blk += EXT4_SB(sb)->s_itb_per_group;
}
+ /* Update free clusters count to exclude metadata blocks */
+ for (i = 0; i < flex_gd->count; i++) {
+ group_data[i].free_clusters_count -=
+ EXT4_NUM_B2C(EXT4_SB(sb),
+ group_data[i].mdata_blocks);
+ }
+
if (test_opt(sb, DEBUG)) {
int i;
group = group_data[0].group;
@@ -342,12 +350,13 @@ next_group:
flexbg_size);
for (i = 0; i < flex_gd->count; i++) {
- printk(KERN_DEBUG "adding %s group %u: %u "
- "blocks (%d free)\n",
+ ext4_debug(
+ "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
ext4_bg_has_super(sb, group + i) ? "normal" :
"no-super", group + i,
group_data[i].blocks_count,
- group_data[i].free_blocks_count);
+ group_data[i].free_clusters_count,
+ group_data[i].mdata_blocks);
}
}
return 0;
@@ -399,7 +408,7 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
}
/*
- * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
+ * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
*
* Helper function for ext4_setup_new_group_blocks() which set .
*
@@ -409,22 +418,26 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
*/
static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
struct ext4_new_flex_group_data *flex_gd,
- ext4_fsblk_t block, ext4_group_t count)
+ ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t count = last_cluster - first_cluster + 1;
ext4_group_t count2;
- ext4_debug("mark blocks [%llu/%u] used\n", block, count);
- for (count2 = count; count > 0; count -= count2, block += count2) {
+ ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
+ last_cluster);
+ for (count2 = count; count > 0;
+ count -= count2, first_cluster += count2) {
ext4_fsblk_t start;
struct buffer_head *bh;
ext4_group_t group;
int err;
- group = ext4_get_group_number(sb, block);
- start = ext4_group_first_block_no(sb, group);
+ group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
+ start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
group -= flex_gd->groups[0].group;
- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
+ count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
if (count2 > count)
count2 = count;
@@ -445,9 +458,9 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
err = ext4_journal_get_write_access(handle, bh);
if (err)
return err;
- ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
- block - start, count2);
- ext4_set_bits(bh->b_data, block - start, count2);
+ ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
+ first_cluster, first_cluster - start, count2);
+ ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (unlikely(err))
@@ -596,9 +609,10 @@ handle_bb:
if (overhead != 0) {
ext4_debug("mark backup superblock %#04llx (+0)\n",
start);
- ext4_set_bits(bh->b_data, 0, overhead);
+ ext4_set_bits(bh->b_data, 0,
+ EXT4_NUM_B2C(sbi, overhead));
}
- ext4_mark_bitmap_end(group_data[i].blocks_count,
+ ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (err)
@@ -643,7 +657,11 @@ handle_ib:
continue;
}
err = set_flexbg_block_bitmap(sb, handle,
- flex_gd, start, count);
+ flex_gd,
+ EXT4_B2C(sbi, start),
+ EXT4_B2C(sbi,
+ start + count
+ - 1));
if (err)
goto out;
count = group_table_count[j];
@@ -653,7 +671,11 @@ handle_ib:
if (count) {
err = set_flexbg_block_bitmap(sb, handle,
- flex_gd, start, count);
+ flex_gd,
+ EXT4_B2C(sbi, start),
+ EXT4_B2C(sbi,
+ start + count
+ - 1));
if (err)
goto out;
}
@@ -841,7 +863,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
ext4_std_error(sb, err);
goto exit_inode;
}
- inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
+ inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
+ (9 - EXT4_SB(sb)->s_cluster_bits);
ext4_mark_iloc_dirty(handle, inode, &iloc);
memset(gdb_bh->b_data, 0, sb->s_blocksize);
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
@@ -936,6 +959,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
{
struct super_block *sb = inode->i_sb;
int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
+ int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
struct buffer_head **primary;
struct buffer_head *dind;
struct ext4_iloc iloc;
@@ -1011,7 +1035,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
if (!err)
err = err2;
}
- inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
+
+ inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
ext4_mark_iloc_dirty(handle, inode, &iloc);
exit_bh:
@@ -1245,7 +1270,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_group_t group;
__u16 *bg_flags = flex_gd->bg_flags;
int i, gdb_off, gdb_num, err = 0;
-
+
for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
group = group_data->group;
@@ -1272,7 +1297,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_inode_table_set(sb, gdp, group_data->inode_table);
ext4_free_group_clusters_set(sb, gdp,
- EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
+ group_data->free_clusters_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
if (ext4_has_group_desc_csum(sb))
ext4_itable_unused_set(sb, gdp,
@@ -1328,7 +1353,7 @@ static void ext4_update_super(struct super_block *sb,
*/
for (i = 0; i < flex_gd->count; i++) {
blocks_count += group_data[i].blocks_count;
- free_blocks += group_data[i].free_blocks_count;
+ free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
}
reserved_blocks = ext4_r_blocks_count(es) * 100;
@@ -1500,17 +1525,18 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
ext4_fsblk_t n_blocks_count,
unsigned long flexbg_size)
{
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
struct ext4_new_group_data *group_data = flex_gd->groups;
ext4_fsblk_t o_blocks_count;
ext4_group_t n_group;
ext4_group_t group;
ext4_group_t last_group;
ext4_grpblk_t last;
- ext4_grpblk_t blocks_per_group;
+ ext4_grpblk_t clusters_per_group;
unsigned long i;
- blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
+ clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
o_blocks_count = ext4_blocks_count(es);
@@ -1531,9 +1557,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
int overhead;
group_data[i].group = group + i;
- group_data[i].blocks_count = blocks_per_group;
+ group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
overhead = ext4_group_overhead_blocks(sb, group + i);
- group_data[i].free_blocks_count = blocks_per_group - overhead;
+ group_data[i].mdata_blocks = overhead;
+ group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
if (ext4_has_group_desc_csum(sb)) {
flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
EXT4_BG_INODE_UNINIT;
@@ -1547,10 +1574,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
/* We need to initialize block bitmap of last group. */
flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
- if ((last_group == n_group) && (last != blocks_per_group - 1)) {
- group_data[i - 1].blocks_count = last + 1;
- group_data[i - 1].free_blocks_count -= blocks_per_group-
- last - 1;
+ if ((last_group == n_group) && (last != clusters_per_group - 1)) {
+ group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
+ group_data[i - 1].free_clusters_count -= clusters_per_group -
+ last - 1;
}
return 1;
@@ -1797,7 +1824,8 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
}
/* Do a quick sanity check of the resize inode */
- if (inode->i_blocks != 1 << (inode->i_blkbits - 9))
+ if (inode->i_blocks != 1 << (inode->i_blkbits -
+ (9 - sbi->s_cluster_bits)))
goto invalid_resize_inode;
for (i = 0; i < EXT4_N_BLOCKS; i++) {
if (i == EXT4_DIND_BLOCK) {
@@ -1960,7 +1988,7 @@ retry:
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
else
- add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1);
+ add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
if (add > 0) {
err = ext4_group_extend_no_check(sb, o_blocks_count, add);
if (err)