summaryrefslogtreecommitdiff
path: root/fs/splice.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-05-01 19:50:48 +0200
committerJens Axboe <axboe@suse.de>2006-05-01 19:50:48 +0200
commit0568b409c74f7a125d92a09a3f386785700ef688 (patch)
tree79125b2f4755e98949f0d941a0092e5d3367bbff /fs/splice.c
parent46e678c96bbd775abd05d3ddbe2fd334794f9157 (diff)
downloadlwn-0568b409c74f7a125d92a09a3f386785700ef688.tar.gz
lwn-0568b409c74f7a125d92a09a3f386785700ef688.zip
[PATCH] splice: fix bugs in pipe_to_file()
Found by Oleg Nesterov <oleg@tv-sign.ru>, fixed by me. - Only allow full pages to go to the page cache. - Check page != buf->page instead of using PIPE_BUF_FLAG_STOLEN. - Remember to clear 'stolen' if add_to_page_cache() fails. And as a cleanup on that: - Make the bottom fall-through logic a little less convoluted. Also make the steal path hold an extra reference to the page, so we don't have to differentiate between stolen and non-stolen at the end. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 9df28d30efa0..1633778f3652 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -78,7 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
return 1;
}
- buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
+ buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
@@ -87,7 +87,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
{
page_cache_release(buf->page);
buf->page = NULL;
- buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
+ buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static void *page_cache_pipe_buf_map(struct file *file,
@@ -587,9 +587,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
this_len = PAGE_CACHE_SIZE - offset;
/*
- * Reuse buf page, if SPLICE_F_MOVE is set.
+ * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
+ * page.
*/
- if (sd->flags & SPLICE_F_MOVE) {
+ if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
* If steal succeeds, buf->page is now pruned from the vm
* side (LRU and page cache) and we can reuse it. The page
@@ -604,6 +605,8 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
goto find_page;
}
+ page_cache_get(page);
+
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add(page);
} else {
@@ -662,7 +665,7 @@ find_page:
} else if (ret)
goto out;
- if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
+ if (buf->page != page) {
char *dst = kmap_atomic(page, KM_USER0);
memcpy(dst + offset, src + buf->offset, this_len);
@@ -671,22 +674,20 @@ find_page:
}
ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
- if (ret == AOP_TRUNCATED_PAGE) {
+ if (!ret) {
+ /*
+ * Return the number of bytes written and mark page as
+ * accessed, we are now done!
+ */
+ ret = this_len;
+ mark_page_accessed(page);
+ balance_dirty_pages_ratelimited(mapping);
+ } else if (ret == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
- } else if (ret)
- goto out;
-
- /*
- * Return the number of bytes written.
- */
- ret = this_len;
- mark_page_accessed(page);
- balance_dirty_pages_ratelimited(mapping);
+ }
out:
- if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
- page_cache_release(page);
-
+ page_cache_release(page);
unlock_page(page);
out_nomem:
buf->ops->unmap(info, buf);