summaryrefslogtreecommitdiff
path: root/fs/bcachefs/fs-io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-10-09 00:09:20 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:44 -0400
commit9ba2eb25f017800c3d00eac2bbc3c99451c3bae2 (patch)
tree377cc58f33a39da93e383af39f343bdf9cc6ecaa /fs/bcachefs/fs-io.c
parent61ce38b862c17acccd0df0004d69710d8b438e99 (diff)
downloadlwn-9ba2eb25f017800c3d00eac2bbc3c99451c3bae2.tar.gz
lwn-9ba2eb25f017800c3d00eac2bbc3c99451c3bae2.zip
bcachefs: Fix __bch2_truncate_page()
__bch2_truncate_page() will mark some of the blocks in a page as unallocated. But, if the page is mmapped (and writable), every block in the page needs to be marked dirty, else those blocks won't be written by __bch2_writepage(). The solution is to change those userspace mappings to RO, so that we force bch2_page_mkwrite() to be called again. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/fs-io.c')
-rw-r--r--fs/bcachefs/fs-io.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index c0995723ddd2..0290f7410a5c 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -27,6 +27,7 @@
#include <linux/migrate.h>
#include <linux/mmu_context.h>
#include <linux/pagevec.h>
+#include <linux/rmap.h>
#include <linux/sched/signal.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uio.h>
@@ -2160,6 +2161,12 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
ret = bch2_get_page_disk_reservation(c, inode, page, false);
BUG_ON(ret);
+ /*
+ * This removes any writeable userspace mappings; we need to force
+ * .page_mkwrite to be called again before any mmapped writes, to
+ * redirty the full page:
+ */
+ page_mkclean(page);
filemap_dirty_folio(mapping, page_folio(page));
unlock:
unlock_page(page);