summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-17 14:35:22 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:59:02 -0400
commite20c41b1091a24dff7ad4cfd99cd5a4f527fe3c4 (patch)
tree5a2073d0f328ccc8d1f11829ec187b6b0475f938 /mm/vmscan.c
parentcbcc268bb1ce5b539e7652d398e08e9b83dc4cef (diff)
downloadlwn-e20c41b1091a24dff7ad4cfd99cd5a4f527fe3c4.tar.gz
lwn-e20c41b1091a24dff7ad4cfd99cd5a4f527fe3c4.zip
mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback()
Saves a few calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a018aa5ab7c..815fe89d37f9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1437,7 +1437,7 @@ static enum page_references page_check_references(struct page *page,
}
/* Check if a page is dirty or under writeback */
-static void page_check_dirty_writeback(struct page *page,
+static void folio_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct address_space *mapping;
@@ -1446,24 +1446,24 @@ static void page_check_dirty_writeback(struct page *page,
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
*/
- if (!page_is_file_lru(page) ||
- (PageAnon(page) && !PageSwapBacked(page))) {
+ if (!folio_is_file_lru(folio) ||
+ (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
*dirty = false;
*writeback = false;
return;
}
- /* By default assume that the page flags are accurate */
- *dirty = PageDirty(page);
- *writeback = PageWriteback(page);
+ /* By default assume that the folio flags are accurate */
+ *dirty = folio_test_dirty(folio);
+ *writeback = folio_test_writeback(folio);
/* Verify dirty/writeback state if the filesystem supports it */
- if (!page_has_private(page))
+ if (!folio_test_private(folio))
return;
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback)
- mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
+ mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback);
}
static struct page *alloc_demote_page(struct page *page, unsigned long node)
@@ -1572,7 +1572,7 @@ retry:
* reclaim_congested. kswapd will stall and start writing
* pages if the tail of the LRU is all dirty unqueued pages.
*/
- page_check_dirty_writeback(page, &dirty, &writeback);
+ folio_check_dirty_writeback(folio, &dirty, &writeback);
if (dirty || writeback)
stat->nr_dirty++;