summaryrefslogtreecommitdiff
path: root/fs/fuse/file.c
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2012-10-26 19:49:00 +0400
committerMiklos Szeredi <mszeredi@suse.cz>2013-01-24 16:21:26 +0100
commitd07f09f509fb21482096e1975f160b694c0edf84 (patch)
tree657076e651b8dee622581b754f9e752461bad98d /fs/fuse/file.c
parentf8dbdf81821b5ab4c5e86e7b2bd7edb892c159c2 (diff)
downloadlwn-d07f09f509fb21482096e1975f160b694c0edf84.tar.gz
lwn-d07f09f509fb21482096e1975f160b694c0edf84.zip
fuse: rework fuse_perform_write()
The patch allocates as many page pointers in fuse_req as needed to cover interval [pos .. pos+len-1]. Inline helper fuse_wr_pages() is introduced to hide this cumbersome arithmetic. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse/file.c')
-rw-r--r--fs/fuse/file.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5fd06bae1790..b9972502f43a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -881,11 +881,19 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
if (!fc->big_writes)
break;
} while (iov_iter_count(ii) && count < fc->max_write &&
- req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
+ req->num_pages < req->max_pages && offset == 0);
return count > 0 ? count : err;
}
+static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
+{
+ return min_t(unsigned,
+ ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
+ (pos >> PAGE_CACHE_SHIFT) + 1,
+ FUSE_MAX_PAGES_PER_REQ);
+}
+
static ssize_t fuse_perform_write(struct file *file,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos)
@@ -901,8 +909,9 @@ static ssize_t fuse_perform_write(struct file *file,
do {
struct fuse_req *req;
ssize_t count;
+ unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
- req = fuse_get_req(fc, FUSE_MAX_PAGES_PER_REQ);
+ req = fuse_get_req(fc, nr_pages);
if (IS_ERR(req)) {
err = PTR_ERR(req);
break;