summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-06-15 13:10:37 +0200
committerJens Axboe <jens.axboe@oracle.com>2007-06-15 13:10:37 +0200
commit17ee4f49ab2c802c7818fa71c4e7e351a7230b86 (patch)
tree0ef214dd76cf9f95aa023021aa2f2c0173a9b41d /fs
parent22b1a9203ea634ac0ee5240e021613da3328275f (diff)
downloadlwn-17ee4f49ab2c802c7818fa71c4e7e351a7230b86.tar.gz
lwn-17ee4f49ab2c802c7818fa71c4e7e351a7230b86.zip
splice: adjust balance_dirty_pages_ratelimited() call
As we have potentially dirtied more than 1 page, we should indicate as such to the dirty page balancing. So call balance_dirty_pages_ratelimited_nr() and pass in the approximate number of pages we dirtied. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/splice.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/splice.c b/fs/splice.c
index cb211360273a..12d247f6ece5 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -811,7 +811,10 @@ generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
if (ret > 0) {
+ unsigned long nr_pages;
+
*ppos += ret;
+ nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
/*
* If file or inode is SYNC and we actually wrote some data,
@@ -824,7 +827,7 @@ generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
if (err)
ret = err;
}
- balance_dirty_pages_ratelimited(mapping);
+ balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
}
return ret;
@@ -863,7 +866,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
if (ret > 0) {
+ unsigned long nr_pages;
+
*ppos += ret;
+ nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
/*
* If file or inode is SYNC and we actually wrote some data,
@@ -878,7 +884,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
if (err)
ret = err;
}
- balance_dirty_pages_ratelimited(mapping);
+ balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
}
return ret;