diff options
author | Chao Yu <chao2.yu@samsung.com> | 2013-11-20 14:46:39 +0800 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-12-23 10:18:02 +0900 |
commit | 924b720b589f91311657216c97edbb3337449270 (patch) | |
tree | 4ff02959ff2ebe1007dc8da0cb77c83d0c05cc72 /fs/f2fs/data.c | |
parent | ce3b7d80edad7bc5ff347b9ff02f1484265b1f05 (diff) | |
download | lwn-924b720b589f91311657216c97edbb3337449270.tar.gz lwn-924b720b589f91311657216c97edbb3337449270.zip |
f2fs: add a new function to support for merging contiguous read
For better read performance, we add a new function to support for merging
contiguous read as the one for write.
v1-->v2:
o add declarations here as Gu Zheng suggested.
o use new structure f2fs_bio_info introduced by Jaegeuk Kim.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Acked-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r-- | fs/f2fs/data.c | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2e54522a8061..ce3cbd92585e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -414,6 +414,56 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, return 0; } +void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, int rw) +{ + struct f2fs_bio_info *io = &sbi->read_io; + + if (!io->bio) + return; + + mutex_lock(&io->io_mutex); + if (io->bio) { + submit_bio(rw, io->bio); + io->bio = NULL; + } + mutex_unlock(&io->io_mutex); +} + +void submit_read_page(struct f2fs_sb_info *sbi, struct page *page, + block_t blk_addr, int rw) +{ + struct block_device *bdev = sbi->sb->s_bdev; + struct f2fs_bio_info *io = &sbi->read_io; + int bio_blocks; + + verify_block_addr(sbi, blk_addr); + + mutex_lock(&io->io_mutex); + + if (io->bio && io->last_block_in_bio != blk_addr - 1) { + submit_bio(rw, io->bio); + io->bio = NULL; + } +alloc_new: + if (io->bio == NULL) { + bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + io->bio = f2fs_bio_alloc(bdev, bio_blocks); + io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); + io->bio->bi_end_io = read_end_io; + } + + if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + submit_bio(rw, io->bio); + io->bio = NULL; + goto alloc_new; + } + + io->last_block_in_bio = blk_addr; + + mutex_unlock(&io->io_mutex); +} + /* * This function should be used by the data read flow only where it * does not check the "create" flag that indicates block allocation. |