diff options
author | Jaegeuk Kim <jaegeuk@kernel.org> | 2015-12-22 13:23:35 -0800 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2015-12-30 10:14:09 -0800 |
commit | 2a3407607028f7c780f1c20faa4e922bf631d340 (patch) | |
tree | 77385c73948461f554fcc71e831e1bf81bb5295f /fs/f2fs/data.c | |
parent | 3104af35eb6a2452ccc9912997e7728777100de2 (diff) | |
download | lwn-2a3407607028f7c780f1c20faa4e922bf631d340.tar.gz lwn-2a3407607028f7c780f1c20faa4e922bf631d340.zip |
f2fs: call f2fs_balance_fs only when node was changed
If user tries to update or read data, we don't need to call f2fs_balance_fs
which triggers f2fs_gc, which increases unnecessary long latency.
Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r-- | fs/f2fs/data.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 82ecaa30fd77..958d8261b258 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -509,7 +509,6 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, u64 end_offset; while (len) { - f2fs_balance_fs(sbi); f2fs_lock_op(sbi); /* When reading holes, we need its node page */ @@ -542,6 +541,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); + + if (dn.node_changed) + f2fs_balance_fs(sbi); } return; @@ -551,6 +553,8 @@ sync_out: f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); + if (dn.node_changed) + f2fs_balance_fs(sbi); return; } @@ -649,6 +653,8 @@ get_next: if (create) { f2fs_unlock_op(sbi); + if (dn.node_changed) + f2fs_balance_fs(sbi); f2fs_lock_op(sbi); } @@ -706,8 +712,11 @@ sync_out: put_out: f2fs_put_dnode(&dn); unlock_out: - if (create) + if (create) { f2fs_unlock_op(sbi); + if (dn.node_changed) + f2fs_balance_fs(sbi); + } out: trace_f2fs_map_blocks(inode, map, err); return err; @@ -1415,8 +1424,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, trace_f2fs_write_begin(inode, pos, len, flags); - f2fs_balance_fs(sbi); - /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: @@ -1466,6 +1473,17 @@ put_next: f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); + if (dn.node_changed && has_not_enough_free_secs(sbi, 0)) { + unlock_page(page); + f2fs_balance_fs(sbi); + lock_page(page); + if (page->mapping != mapping) { + /* The page got truncated from under us */ + f2fs_put_page(page, 1); + goto repeat; + } + } + f2fs_wait_on_page_writeback(page, DATA); /* wait for GCed encrypted page writeback */ |