summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-08-16 23:36:31 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-04 13:15:33 -0500
commit9f2b04a25a41b1f41b3cead4f56854a4192ec5b0 (patch)
tree27f4d1ed8d4626e3bf9938c1e768eefdc4e4bfd9 /mm/filemap.c
parent5bf34d7c7ffe773c3b3c1b6ebf39e0f34a2436ec (diff)
downloadlwn-9f2b04a25a41b1f41b3cead4f56854a4192ec5b0.tar.gz
lwn-9f2b04a25a41b1f41b3cead4f56854a4192ec5b0.zip
filemap: Add folio_put_wait_locked()
Convert all three callers of put_and_wait_on_page_locked() to folio_put_wait_locked(). This shrinks the kernel overall by 19 bytes. filemap_update_page() shrinks by 19 bytes while __migration_entry_wait() is unchanged. folio_put_wait_locked() is 14 bytes smaller than put_and_wait_on_page_locked(), but pmd_migration_entry_wait() grows by 14 bytes. It removes the assumption from pmd_migration_entry_wait() that pages cannot be larger than a PMD (which is true today, but may be interesting to explore in the future). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 39c4c46c6133..5dd3c6e39c9f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1259,10 +1259,10 @@ enum behavior {
* __folio_lock() waiting on then setting PG_locked.
*/
SHARED, /* Hold ref to page and check the bit when woken, like
- * wait_on_page_writeback() waiting on PG_writeback.
+ * folio_wait_writeback() waiting on PG_writeback.
*/
DROP, /* Drop ref to page before wait, no check when woken,
- * like put_and_wait_on_page_locked() on PG_locked.
+ * like folio_put_wait_locked() on PG_locked.
*/
};
@@ -1439,22 +1439,21 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr)
EXPORT_SYMBOL(folio_wait_bit_killable);
/**
- * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
- * @page: The page to wait for.
+ * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
+ * @folio: The folio to wait for.
* @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
*
- * The caller should hold a reference on @page. They expect the page to
+ * The caller should hold a reference on @folio. They expect the page to
* become unlocked relatively soon, but do not wish to hold up migration
- * (for example) by holding the reference while waiting for the page to
+ * (for example) by holding the reference while waiting for the folio to
* come unlocked. After this function returns, the caller should not
- * dereference @page.
+ * dereference @folio.
*
- * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
+ * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
*/
-int put_and_wait_on_page_locked(struct page *page, int state)
+int folio_put_wait_locked(struct folio *folio, int state)
{
- return folio_wait_bit_common(page_folio(page), PG_locked, state,
- DROP);
+ return folio_wait_bit_common(folio, PG_locked, state, DROP);
}
/**
@@ -2447,7 +2446,11 @@ static int filemap_update_page(struct kiocb *iocb,
goto unlock_mapping;
if (!(iocb->ki_flags & IOCB_WAITQ)) {
filemap_invalidate_unlock_shared(mapping);
- put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE);
+ /*
+ * This is where we usually end up waiting for a
+ * previously submitted readahead to finish.
+ */
+ folio_put_wait_locked(folio, TASK_KILLABLE);
return AOP_TRUNCATED_PAGE;
}
error = __folio_lock_async(folio, iocb->ki_waitq);