summaryrefslogtreecommitdiff
path: root/fs/nfs/file.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 14:47:20 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 14:47:20 -0800
commitd8ca6dbb8de7923fcfb18e0b0b123f37c3225519 (patch)
treea198d66351677a5c36995068fe436acb83766a84 /fs/nfs/file.c
parent9fc2f99030b55027d84723b0dcbbe9f7e21b9c6c (diff)
parent1683ed16ff1a51705f58e8083ed93a7428a543f2 (diff)
downloadlwn-d8ca6dbb8de7923fcfb18e0b0b123f37c3225519.tar.gz
lwn-d8ca6dbb8de7923fcfb18e0b0b123f37c3225519.zip
Merge tag 'nfs-for-6.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
Pull NFS client updates from Anna Schumaker: "New Features: - Convert the read and write paths to use folios Bugfixes and Cleanups: - Fix tracepoint state manager flag printing - Fix disabling swap files - Fix NFSv4 client identifier sysfs path in the documentation - Don't clear NFS_CAP_COPY if server returns NFS4ERR_OFFLOAD_DENIED - Treat GETDEVICEINFO errors as a layout failure - Replace kmap_atomic() calls with kmap_local_page() - Constify sunrpc sysfs kobj_type structures" * tag 'nfs-for-6.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (25 commits) fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c pNFS/filelayout: treat GETDEVICEINFO errors as layout failure Documentation: Fix sysfs path for the NFSv4 client identifier nfs42: do not fail with EIO if ssc returns NFS4ERR_OFFLOAD_DENIED NFS: fix disabling of swap SUNRPC: make kobj_type structures constant nfs4trace: fix state manager flag printing NFS: Remove unnecessary check in nfs_read_folio() NFS: Improve tracing of nfs_wb_folio() NFS: Enable tracing of nfs_invalidate_folio() and nfs_launder_folio() NFS: fix up nfs_release_folio() to try to release the page NFS: Clean up O_DIRECT request allocation NFS: Fix up nfs_vm_page_mkwrite() for folios NFS: Convert nfs_write_begin/end to use folios NFS: Remove unused function nfs_wb_page() NFS: Convert buffered writes to use folios NFS: Convert the function nfs_wb_page() to use folios NFS: Convert buffered reads to use folios NFS: Add a helper nfs_wb_folio() NFS: Convert the remaining pagelist helper functions to support folios ...
Diffstat (limited to 'fs/nfs/file.c')
-rw-r--r--fs/nfs/file.c124
1 files changed, 72 insertions, 52 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index b0f3c9339e70..893625eacab9 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -277,27 +277,28 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync);
* and that the new data won't completely replace the old data in
* that range of the file.
*/
-static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
+static bool nfs_folio_is_full_write(struct folio *folio, loff_t pos,
+ unsigned int len)
{
- unsigned int pglen = nfs_page_length(page);
- unsigned int offset = pos & (PAGE_SIZE - 1);
+ unsigned int pglen = nfs_folio_length(folio);
+ unsigned int offset = offset_in_folio(folio, pos);
unsigned int end = offset + len;
return !pglen || (end >= pglen && !offset);
}
-static bool nfs_want_read_modify_write(struct file *file, struct page *page,
- loff_t pos, unsigned int len)
+static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
+ loff_t pos, unsigned int len)
{
/*
* Up-to-date pages, those with ongoing or full-page write
* don't need read/modify/write
*/
- if (PageUptodate(page) || PagePrivate(page) ||
- nfs_full_page_write(page, pos, len))
+ if (folio_test_uptodate(folio) || folio_test_private(folio) ||
+ nfs_folio_is_full_write(folio, pos, len))
return false;
- if (pnfs_ld_read_whole_page(file->f_mapping->host))
+ if (pnfs_ld_read_whole_page(file_inode(file)))
return true;
/* Open for reading too? */
if (file->f_mode & FMODE_READ)
@@ -305,6 +306,15 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
return false;
}
+static struct folio *
+nfs_folio_grab_cache_write_begin(struct address_space *mapping, pgoff_t index)
+{
+ unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+
+ return __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+}
+
/*
* This does the "real" work of the write. We must allocate and lock the
* page to be sent back to the generic routine, which then copies the
@@ -314,32 +324,31 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
* increment the page use counts until he is done with the page.
*/
static int nfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep,
+ void **fsdata)
{
- int ret;
- pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
int once_thru = 0;
+ int ret;
dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
start:
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
+ folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT);
+ if (!folio)
return -ENOMEM;
- *pagep = page;
+ *pagep = &folio->page;
- ret = nfs_flush_incompatible(file, page);
+ ret = nfs_flush_incompatible(file, folio);
if (ret) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
} else if (!once_thru &&
- nfs_want_read_modify_write(file, page, pos, len)) {
+ nfs_want_read_modify_write(file, folio, pos, len)) {
once_thru = 1;
- ret = nfs_read_folio(file, page_folio(page));
- put_page(page);
+ ret = nfs_read_folio(file, folio);
+ folio_put(folio);
if (!ret)
goto start;
}
@@ -347,11 +356,12 @@ start:
}
static int nfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
{
- unsigned offset = pos & (PAGE_SIZE - 1);
struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct folio *folio = page_folio(page);
+ unsigned offset = offset_in_folio(folio, pos);
int status;
dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
@@ -361,26 +371,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* Zero any uninitialised parts of the page, and then mark the page
* as up to date if it turns out that we're extending the file.
*/
- if (!PageUptodate(page)) {
- unsigned pglen = nfs_page_length(page);
+ if (!folio_test_uptodate(folio)) {
+ size_t fsize = folio_size(folio);
+ unsigned pglen = nfs_folio_length(folio);
unsigned end = offset + copied;
if (pglen == 0) {
- zero_user_segments(page, 0, offset,
- end, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segments(folio, 0, offset, end, fsize);
+ folio_mark_uptodate(folio);
} else if (end >= pglen) {
- zero_user_segment(page, end, PAGE_SIZE);
+ folio_zero_segment(folio, end, fsize);
if (offset == 0)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else
- zero_user_segment(page, pglen, PAGE_SIZE);
+ folio_zero_segment(folio, pglen, fsize);
}
- status = nfs_updatepage(file, page, offset, copied);
+ status = nfs_update_folio(file, folio, offset, copied);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (status < 0)
return status;
@@ -402,14 +412,16 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
static void nfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
+ struct inode *inode = folio_file_mapping(folio)->host;
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
if (offset != 0 || length < folio_size(folio))
return;
/* Cancel any unstarted writes on this page */
- nfs_wb_folio_cancel(folio->mapping->host, folio);
+ nfs_wb_folio_cancel(inode, folio);
folio_wait_fscache(folio);
+ trace_nfs_invalidate_folio(inode, folio);
}
/*
@@ -423,8 +435,13 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
/* If the private flag is set, then the folio is not freeable */
- if (folio_test_private(folio))
- return false;
+ if (folio_test_private(folio)) {
+ if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
+ current_is_kswapd())
+ return false;
+ if (nfs_wb_folio(folio_file_mapping(folio)->host, folio) < 0)
+ return false;
+ }
return nfs_fscache_release_folio(folio, gfp);
}
@@ -465,12 +482,15 @@ static void nfs_check_dirty_writeback(struct folio *folio,
static int nfs_launder_folio(struct folio *folio)
{
struct inode *inode = folio->mapping->host;
+ int ret;
dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
inode->i_ino, folio_pos(folio));
folio_wait_fscache(folio);
- return nfs_wb_page(inode, &folio->page);
+ ret = nfs_wb_folio(inode, folio);
+ trace_nfs_launder_folio_done(inode, folio, ret);
+ return ret;
}
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
@@ -547,22 +567,22 @@ const struct address_space_operations nfs_file_aops = {
*/
static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
unsigned pagelen;
vm_fault_t ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
+ struct folio *folio = page_folio(vmf->page);
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
- filp, filp->f_mapping->host->i_ino,
- (long long)page_offset(page));
+ filp, filp->f_mapping->host->i_ino,
+ (long long)folio_file_pos(folio));
sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */
- if (PageFsCache(page) &&
- wait_on_page_fscache_killable(vmf->page) < 0) {
+ if (folio_test_fscache(folio) &&
+ folio_wait_fscache_killable(folio) < 0) {
ret = VM_FAULT_RETRY;
goto out;
}
@@ -571,25 +591,25 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
nfs_wait_bit_killable,
TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
- lock_page(page);
- mapping = page_file_mapping(page);
+ folio_lock(folio);
+ mapping = folio_file_mapping(folio);
if (mapping != inode->i_mapping)
goto out_unlock;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- pagelen = nfs_page_length(page);
+ pagelen = nfs_folio_length(folio);
if (pagelen == 0)
goto out_unlock;
ret = VM_FAULT_LOCKED;
- if (nfs_flush_incompatible(filp, page) == 0 &&
- nfs_updatepage(filp, page, 0, pagelen) == 0)
+ if (nfs_flush_incompatible(filp, folio) == 0 &&
+ nfs_update_folio(filp, folio, 0, pagelen) == 0)
goto out;
ret = VM_FAULT_SIGBUS;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
out:
sb_end_pagefault(inode->i_sb);
return ret;