summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-01-18 14:54:13 +0900
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-01-22 10:48:59 +0900
commita7fdffbd3ea4b3cc2993af006bde38a423b38b72 (patch)
tree4d642d76fa47af5514eca7be710b1f8c063cd637 /fs
parentc01e54b770e69c65525295eb2668be3dc0822406 (diff)
downloadlwn-a7fdffbd3ea4b3cc2993af006bde38a423b38b72.tar.gz
lwn-a7fdffbd3ea4b3cc2993af006bde38a423b38b72.zip
f2fs: avoid issuing small bios due to several dirty node pages
If some small bios of dirty node pages are supposed to be issued during the sequential data writes, there-in well-produced consecutive data bios are able to be split by the small node bios, resulting in performance degradation. So, let's collect a number of dirty node pages until reaching a threshold. And, by default, I set the threshold as 2MB, a segment size. This improves sequential write performance on i5, 512GB SSD (830 w/ SATA2) as follows. Before: 231 MB/s -> After: 255 MB/s Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/node.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f177c018745c..9bda63c9c166 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
return 0;
}
+/*
+ * It is very important to gather dirty pages and write at once, so that we can
+ * submit a big bio without interfering other data writes.
+ * Be default, 512 pages (2MB), a segment size, is quite reasonable.
+ */
+#define COLLECT_DIRTY_NODES 512
static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct block_device *bdev = sbi->sb->s_bdev;
long nr_to_write = wbc->nr_to_write;
- if (wbc->for_kupdate)
- return 0;
-
- if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
- return 0;
-
+ /* First check balancing cached NAT entries */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
write_checkpoint(sbi, false, false);
return 0;
}
+ /* collect a number of dirty node pages and write together */
+ if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
+ return 0;
+
/* if mounting is failed, skip writing node pages */
wbc->nr_to_write = bio_get_nr_vecs(bdev);
sync_node_pages(sbi, 0, wbc);