summaryrefslogtreecommitdiff
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-22 18:23:29 -0400
committerJens Axboe <axboe@fb.com>2015-06-02 08:38:13 -0600
commitc7981433ef05e67b1b40740b2c40edbd4476b659 (patch)
treebf44403c5d6d62fa0c9111db3a9f2d857b431a3b /mm/page-writeback.c
parente9f07dfd7086a0b7e9ce98bb97b7422861aad40b (diff)
downloadlwn-c7981433ef05e67b1b40740b2c40edbd4476b659.tar.gz
lwn-c7981433ef05e67b1b40740b2c40edbd4476b659.zip
writeback: make __wb_writeout_inc() and hard_dirty_limit() take wb_domaas a parameter
Currently __wb_writeout_inc() and hard_dirty_limit() assume global_wb_domain; however, cgroup writeback support requires considering per-memcg wb_domain too. This patch separates out domain-specific part of __wb_writeout_inc() into wb_domain_writeout_inc() which takes wb_domain as a parameter and adds the parameter to hard_dirty_limit(). This will allow these two functions to handle per-memcg wb_domains. This patch doesn't introduce any behavioral changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 38d45d8c71f5..a4d0ceefd097 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -445,17 +445,12 @@ static unsigned long wp_next_time(unsigned long cur_time)
return cur_time;
}
-/*
- * Increment the wb's writeout completion count and the global writeout
- * completion count. Called from test_clear_page_writeback().
- */
-static inline void __wb_writeout_inc(struct bdi_writeback *wb)
+static void wb_domain_writeout_inc(struct wb_domain *dom,
+ struct fprop_local_percpu *completions,
+ unsigned int max_prop_frac)
{
- struct wb_domain *dom = &global_wb_domain;
-
- __inc_wb_stat(wb, WB_WRITTEN);
- __fprop_inc_percpu_max(&dom->completions, &wb->completions,
- wb->bdi->max_prop_frac);
+ __fprop_inc_percpu_max(&dom->completions, completions,
+ max_prop_frac);
/* First event after period switching was turned off? */
if (!unlikely(dom->period_time)) {
/*
@@ -469,6 +464,17 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb)
}
}
+/*
+ * Increment @wb's writeout completion count and the global writeout
+ * completion count. Called from test_clear_page_writeback().
+ */
+static inline void __wb_writeout_inc(struct bdi_writeback *wb)
+{
+ __inc_wb_stat(wb, WB_WRITTEN);
+ wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
+ wb->bdi->max_prop_frac);
+}
+
void wb_writeout_inc(struct bdi_writeback *wb)
{
unsigned long flags;
@@ -571,10 +577,9 @@ static unsigned long dirty_freerun_ceiling(unsigned long thresh,
return (thresh + bg_thresh) / 2;
}
-static unsigned long hard_dirty_limit(unsigned long thresh)
+static unsigned long hard_dirty_limit(struct wb_domain *dom,
+ unsigned long thresh)
{
- struct wb_domain *dom = &global_wb_domain;
-
return max(thresh, dom->dirty_limit);
}
@@ -744,7 +749,7 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
struct bdi_writeback *wb = dtc->wb;
unsigned long write_bw = wb->avg_write_bandwidth;
unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
- unsigned long limit = hard_dirty_limit(dtc->thresh);
+ unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
unsigned long wb_thresh = dtc->wb_thresh;
unsigned long x_intercept;
unsigned long setpoint; /* dirty pages' target balance point */
@@ -1029,7 +1034,7 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
struct bdi_writeback *wb = dtc->wb;
unsigned long dirty = dtc->dirty;
unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
- unsigned long limit = hard_dirty_limit(dtc->thresh);
+ unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
unsigned long setpoint = (freerun + limit) / 2;
unsigned long write_bw = wb->avg_write_bandwidth;
unsigned long dirty_ratelimit = wb->dirty_ratelimit;
@@ -1681,7 +1686,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
for ( ; ; ) {
global_dirty_limits(&background_thresh, &dirty_thresh);
- dirty_thresh = hard_dirty_limit(dirty_thresh);
+ dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
/*
* Boost the allowable dirty threshold a bit for page