summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-06-04 11:29:58 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-09 16:47:33 -0400
commit155130a4f7848b1aac439cab6bda1a175507c71c (patch)
tree5019b14b0a9221b08959196e01e3f9326957d678 /fs/buffer.c
parent6e1db88d536adcbbfe562b2d4b7d6425784fff12 (diff)
downloadlwn-155130a4f7848b1aac439cab6bda1a175507c71c.tar.gz
lwn-155130a4f7848b1aac439cab6bda1a175507c71c.zip
get rid of block_write_begin_newtrunc
Move the call to vmtruncate to get rid of accessive blocks to the callers in preparation of the new truncate sequence and rename the non-truncating version to block_write_begin. While we're at it also remove several unused arguments to block_write_begin. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c61
1 files changed, 9 insertions, 52 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index c319c49da511..50efa339e051 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1962,14 +1962,13 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
EXPORT_SYMBOL(__block_write_begin);
/*
- * Filesystems implementing the new truncate sequence should use the
- * _newtrunc postfix variant which won't incorrectly call vmtruncate.
+ * block_write_begin takes care of the basic task of block allocation and
+ * bringing partial write blocks uptodate first.
+ *
* The filesystem needs to handle block truncation upon failure.
*/
-int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
+int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
+ unsigned flags, struct page **pagep, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
@@ -1989,44 +1988,6 @@ int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
*pagep = page;
return status;
}
-EXPORT_SYMBOL(block_write_begin_newtrunc);
-
-/*
- * block_write_begin takes care of the basic task of block allocation and
- * bringing partial write blocks uptodate first.
- *
- * If *pagep is not NULL, then block_write_begin uses the locked page
- * at *pagep rather than allocating its own. In this case, the page will
- * not be unlocked or deallocated on failure.
- */
-int block_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
-{
- int ret;
-
- ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, get_block);
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- *
- * Filesystems which pass down their own page also cannot
- * call into vmtruncate here because it would lead to lock
- * inversion problems (*pagep is locked). This is a further
- * example of where the old truncate sequence is inadequate.
- */
- if (unlikely(ret) && *pagep == NULL) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
-
- return ret;
-}
EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping,
@@ -2357,7 +2318,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
err = cont_expand_zero(file, mapping, pos, bytes);
if (err)
- goto out;
+ return err;
zerofrom = *bytes & ~PAGE_CACHE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
@@ -2365,11 +2326,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- *pagep = NULL;
- err = block_write_begin_newtrunc(file, mapping, pos, len,
- flags, pagep, fsdata, get_block);
-out:
- return err;
+ return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
@@ -2511,8 +2468,8 @@ int nobh_write_begin(struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
*pagep = NULL;
- return block_write_begin_newtrunc(NULL, mapping, pos, len,
- flags, pagep, fsdata, get_block);
+ return block_write_begin(mapping, pos, len, flags, pagep,
+ get_block);
}
if (PageMappedToDisk(page))