summaryrefslogtreecommitdiff
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-05-04 19:54:37 -0600
committerWu Fengguang <fengguang.wu@intel.com>2011-07-09 22:09:01 -0700
commitd46db3d58233be4be980eb1e42eebe7808bcabab (patch)
tree6d813b33938d915f0c0633e8615d1ffdcc554c96 /mm/page-writeback.c
parent36715cef0770b7e2547892b7c3197fc024274630 (diff)
downloadlwn-d46db3d58233be4be980eb1e42eebe7808bcabab.tar.gz
lwn-d46db3d58233be4be980eb1e42eebe7808bcabab.zip
writeback: make writeback_control.nr_to_write straight
Pass struct wb_writeback_work all the way down to writeback_sb_inodes(), and initialize the struct writeback_control there. struct writeback_control is basically designed to control writeback of a single file, but we keep abuse it for writing multiple files in writeback_sb_inodes() and its callers. It immediately clean things up, e.g. suddenly wbc.nr_to_write vs work->nr_pages starts to make sense, and instead of saving and restoring pages_skipped in writeback_sb_inodes it can always start with a clean zero value. It also makes a neat IO pattern change: large dirty files are now written in the full 4MB writeback chunk size, rather than whatever remained quota in wbc->nr_to_write. Acked-by: Jan Kara <jack@suse.cz> Proposed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1965d05a29cc..9d6ac2b6d942 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -491,13 +491,6 @@ static void balance_dirty_pages(struct address_space *mapping,
struct backing_dev_info *bdi = mapping->backing_dev_info;
for (;;) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = NULL,
- .nr_to_write = write_chunk,
- .range_cyclic = 1,
- };
-
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK);
@@ -559,17 +552,17 @@ static void balance_dirty_pages(struct address_space *mapping,
* threshold otherwise wait until the disk writes catch
* up.
*/
- trace_wbc_balance_dirty_start(&wbc, bdi);
+ trace_balance_dirty_start(bdi);
if (bdi_nr_reclaimable > bdi_thresh) {
- writeback_inodes_wb(&bdi->wb, &wbc);
- pages_written += write_chunk - wbc.nr_to_write;
- trace_wbc_balance_dirty_written(&wbc, bdi);
+ pages_written += writeback_inodes_wb(&bdi->wb,
+ write_chunk);
+ trace_balance_dirty_written(bdi, pages_written);
if (pages_written >= write_chunk)
break; /* We've done our duty */
}
- trace_wbc_balance_dirty_wait(&wbc, bdi);
__set_current_state(TASK_UNINTERRUPTIBLE);
io_schedule_timeout(pause);
+ trace_balance_dirty_wait(bdi);
/*
* Increase the delay for each loop, up to our previous