summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-30 23:53:28 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-05-09 23:12:33 -0400
commit5784f09bf4dfdc76d8d68437efd01c6a0646a08e (patch)
tree2f59808c051f8b6848af54498202f5dbb9151019
parente45c20d110414730db224d78db4cfb44495c0d8a (diff)
downloadlwn-5784f09bf4dfdc76d8d68437efd01c6a0646a08e.tar.gz
lwn-5784f09bf4dfdc76d8d68437efd01c6a0646a08e.zip
hfs: Convert to release_folio
Use a folio throughout hfs_release_folio(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jeff Layton <jlayton@kernel.org>
-rw-r--r--fs/hfs/inode.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index ba3ff9cd7cfc..86fd50e5fccb 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -69,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block);
}
-static int hfs_releasepage(struct page *page, gfp_t mask)
+static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
- int i, res = 1;
+ int i;
+ bool res = true;
switch (inode->i_ino) {
case HFS_EXT_CNID:
@@ -87,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
- return 0;
+ return false;
}
if (!tree)
- return 0;
+ return false;
if (tree->node_size >= PAGE_SIZE) {
- nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
+ nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
- res = 0;
+ res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+ nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
@@ -115,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
- res = 0;
+ res = false;
break;
}
hfs_bnode_unhash(node);
@@ -123,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
- return res ? try_to_free_buffers(page) : 0;
+ return res ? try_to_free_buffers(&folio->page) : false;
}
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -165,7 +166,7 @@ const struct address_space_operations hfs_btree_aops = {
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
- .releasepage = hfs_releasepage,
+ .release_folio = hfs_release_folio,
};
const struct address_space_operations hfs_aops = {