summaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c192
1 files changed, 92 insertions, 100 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index de4da6d9cd93..54f89f0ee69b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -319,8 +319,7 @@ static void f2fs_read_end_io(struct bio *bio)
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
iostat_update_and_unbind_ctx(bio);
sbi = bio->bi_private;
@@ -328,34 +327,41 @@ static void f2fs_write_end_io(struct bio *bio)
if (time_to_inject(sbi, FAULT_WRITE_IO))
bio->bi_status = BLK_STS_IOERR;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- enum count_type type = WB_DATA_TYPE(page, false);
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ enum count_type type;
+
+ if (fscrypt_is_bounce_folio(folio)) {
+ struct folio *io_folio = folio;
- fscrypt_finalize_bounce_page(&page);
+ folio = fscrypt_pagecache_folio(io_folio);
+ fscrypt_free_bounce_page(&io_folio->page);
+ }
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_is_compressed_page(page)) {
- f2fs_compress_write_end_io(bio, page);
+ if (f2fs_is_compressed_page(&folio->page)) {
+ f2fs_compress_write_end_io(bio, &folio->page);
continue;
}
#endif
+ type = WB_DATA_TYPE(&folio->page, false);
+
if (unlikely(bio->bi_status)) {
- mapping_set_error(page->mapping, -EIO);
+ mapping_set_error(folio->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
f2fs_stop_checkpoint(sbi, true,
STOP_CP_REASON_WRITE_FAIL);
}
- f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page_folio(page)->index != nid_of_node(page));
+ f2fs_bug_on(sbi, folio->mapping == NODE_MAPPING(sbi) &&
+ folio->index != nid_of_node(&folio->page));
dec_page_count(sbi, type);
- if (f2fs_in_warm_node_list(sbi, page))
- f2fs_del_fsync_node_entry(sbi, page);
- clear_page_private_gcing(page);
- end_page_writeback(page);
+ if (f2fs_in_warm_node_list(sbi, folio))
+ f2fs_del_fsync_node_entry(sbi, &folio->page);
+ clear_page_private_gcing(&folio->page);
+ folio_end_writeback(folio);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
@@ -413,6 +419,7 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
{
unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
+ struct folio *fio_folio = page_folio(fio->page);
unsigned int fua_flag, meta_flag, io_flag;
blk_opf_t op_flags = 0;
@@ -438,6 +445,11 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
op_flags |= REQ_META;
if (BIT(fio->temp) & fua_flag)
op_flags |= REQ_FUA;
+
+ if (fio->type == DATA &&
+ F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
+ op_flags |= REQ_PRIO;
+
return op_flags;
}
@@ -876,6 +888,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
struct bio *bio = *fio->bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct folio *folio = page_folio(fio->page);
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@@ -889,8 +902,8 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- page_folio(fio->page)->index, fio, GFP_NOIO);
+ f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
+ folio->index, fio, GFP_NOIO);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
@@ -899,8 +912,7 @@ alloc_new:
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
@@ -1041,8 +1053,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
REQ_OP_READ | op_flag,
for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
- if (!bio)
- return ERR_PTR(-ENOMEM);
bio->bi_iter.bi_sector = sector;
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
bio->bi_end_io = f2fs_read_end_io;
@@ -1193,18 +1203,17 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err;
}
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- blk_opf_t op_flags, bool for_write,
- pgoff_t *next_pgofs)
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio;
int err;
- page = f2fs_grab_cache_page(mapping, index, for_write);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(mapping, index, for_write);
+ if (IS_ERR(folio))
+ return folio;
if (f2fs_lookup_read_extent_cache_block(inode, index,
&dn.data_blkaddr)) {
@@ -1239,9 +1248,9 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
goto put_err;
}
got_it:
- if (PageUptodate(page)) {
- unlock_page(page);
- return page;
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ return folio;
}
/*
@@ -1252,48 +1261,51 @@ got_it:
* f2fs_init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- unlock_page(page);
- return page;
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ return folio;
}
- err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
+ err = f2fs_submit_page_read(inode, folio, dn.data_blkaddr,
op_flags, for_write);
if (err)
goto put_err;
- return page;
+ return folio;
put_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
- page = find_get_page_flags(mapping, index, FGP_ACCESSED);
- if (page && PageUptodate(page))
- return page;
- f2fs_put_page(page, 0);
+ folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
+ goto read;
+ if (folio_test_uptodate(folio))
+ return folio;
+ f2fs_folio_put(folio, false);
- page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
- if (IS_ERR(page))
- return page;
+read:
+ folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs);
+ if (IS_ERR(folio))
+ return folio;
- if (PageUptodate(page))
- return page;
+ if (folio_test_uptodate(folio))
+ return folio;
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 0);
+ folio_wait_locked(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, false);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -1301,23 +1313,23 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
- page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL);
+ if (IS_ERR(folio))
+ return folio;
/* wait for read completion */
- lock_page(page);
- if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, true);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -2178,6 +2190,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
int i;
int ret = 0;
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
+ from_dnode = false;
+ goto out_put_dnode;
+ }
+
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
@@ -2221,10 +2239,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (ret)
goto out;
- if (unlikely(f2fs_cp_error(sbi))) {
- ret = -EIO;
- goto out_put_dnode;
- }
f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
skip_reading_dnode:
@@ -2500,7 +2514,7 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
return 0;
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
@@ -2921,29 +2935,6 @@ redirty_out:
return err;
}
-static int f2fs_write_data_page(struct page *page,
- struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = folio->mapping->host;
-
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
- goto out;
-
- if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, folio->index)) {
- folio_redirty_for_writepage(wbc, folio);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- }
-out:
-#endif
-
- return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
- wbc, FS_DATA_IO, 0, true);
-}
-
/*
* This function was copied from write_cache_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
@@ -3266,10 +3257,6 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
int ret;
bool locked = false;
- /* deal with chardevs and other special file */
- if (!mapping->a_ops->writepage)
- return 0;
-
/* skip writing if there is no dirty page in this inode */
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
@@ -3390,7 +3377,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
restart:
/* check inline_data */
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ ipage = f2fs_get_inode_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
goto unlock_out;
@@ -3453,7 +3440,7 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
struct page *ipage;
int err = 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
+ ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ipage))
return PTR_ERR(ipage);
@@ -3483,7 +3470,7 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ ipage = f2fs_get_inode_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
goto unlock_out;
@@ -4101,7 +4088,6 @@ static void f2fs_swap_deactivate(struct file *file)
const struct address_space_operations f2fs_dblock_aops = {
.read_folio = f2fs_read_data_folio,
.readahead = f2fs_readahead,
- .writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
@@ -4195,7 +4181,13 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_next_pgofs = &next_pgofs;
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
inode->i_write_hint);
- if (flags & IOMAP_WRITE)
+
+ /*
+ * If the blocks being overwritten are already allocated,
+ * f2fs_map_lock and f2fs_balance_fs are not necessary.
+ */
+ if ((flags & IOMAP_WRITE) &&
+ !f2fs_overwrite_io(inode, offset, length))
map.m_may_create = true;
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);