diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-16 23:25:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 08:42:45 -0700 |
commit | 3e26c149c358529b1605f8959341d34bc4b880a3 (patch) | |
tree | 9d173b1753b86bcf03a8591e2509e3162234447c | |
parent | 04fbfdc14e5f48463820d6b9807daa5e9c92c51f (diff) | |
download | lwn-3e26c149c358529b1605f8959341d34bc4b880a3.tar.gz lwn-3e26c149c358529b1605f8959341d34bc4b880a3.zip |
mm: dirty balancing for tasks
Based on ideas of Andrew:
http://marc.info/?l=linux-kernel&m=102912915020543&w=2
Scale the bdi dirty limit inversly with the tasks dirty rate.
This makes heavy writers have a lower dirty limit than the occasional writer.
Andrea proposed something similar:
http://lwn.net/Articles/152277/
The main disadvantage to his patch is that he uses an unrelated quantity to
measure time, which leaves him with a workload dependant tunable. Other than
that the two approaches appear quite similar.
[akpm@linux-foundation.org: fix warning]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/init_task.h | 1 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/fork.c | 10 | ||||
-rw-r--r-- | mm/page-writeback.c | 50 |
4 files changed, 62 insertions, 1 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 513bc3e489f0..3a619f57a2b2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -171,6 +171,7 @@ extern struct group_info init_groups; [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ }, \ + .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ } diff --git a/include/linux/sched.h b/include/linux/sched.h index 592e3a55f818..59738efff8ad 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -74,6 +74,7 @@ struct sched_param { #include <linux/pid.h> #include <linux/percpu.h> #include <linux/topology.h> +#include <linux/proportions.h> #include <linux/seccomp.h> #include <linux/rcupdate.h> #include <linux/futex.h> @@ -1149,6 +1150,7 @@ struct task_struct { #ifdef CONFIG_FAULT_INJECTION int make_it_fail; #endif + struct prop_local_single dirties; }; /* diff --git a/kernel/fork.c b/kernel/fork.c index 3fc3c1383912..163325af8179 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep; void free_task(struct task_struct *tsk) { + prop_local_destroy_single(&tsk->dirties); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); free_task_struct(tsk); @@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; + int err; prepare_to_copy(orig); @@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) *tsk = *orig; tsk->stack = ti; + + err = prop_local_init_single(&tsk->dirties); + if (err) { + free_thread_info(ti); + free_task_struct(tsk); + return NULL; + } + setup_thread_stack(tsk, orig); #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b0360546ac86..4073d531cd7b 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages); * */ static struct prop_descriptor vm_completions; +static struct prop_descriptor vm_dirties; static unsigned long determine_dirtyable_memory(void); @@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write, if (ret == 0 && write && vm_dirty_ratio != old_ratio) { int shift = calc_period_shift(); prop_change_shift(&vm_completions, shift); + prop_change_shift(&vm_dirties, shift); } return ret; } @@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) __prop_inc_percpu(&vm_completions, &bdi->completions); } +static inline void task_dirty_inc(struct task_struct *tsk) +{ + prop_inc_single(&vm_dirties, &tsk->dirties); +} + /* * Obtain an accurate fraction of the BDI's portion. */ @@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty) *pbdi_dirty = min(*pbdi_dirty, avail_dirty); } +static inline void task_dirties_fraction(struct task_struct *tsk, + long *numerator, long *denominator) +{ + prop_fraction_single(&vm_dirties, &tsk->dirties, + numerator, denominator); +} + +/* + * scale the dirty limit + * + * task specific dirty limit: + * + * dirty -= (dirty/8) * p_{t} + */ +void task_dirty_limit(struct task_struct *tsk, long *pdirty) +{ + long numerator, denominator; + long dirty = *pdirty; + u64 inv = dirty >> 3; + + task_dirties_fraction(tsk, &numerator, &denominator); + inv *= numerator; + do_div(inv, denominator); + + dirty -= inv; + if (dirty < *pdirty/2) + dirty = *pdirty/2; + + *pdirty = dirty; +} + /* * Work out the current dirty-memory clamping and background writeout * thresholds. @@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, *pbdi_dirty = bdi_dirty; clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); + task_dirty_limit(current, pbdi_dirty); } } @@ -720,6 +759,7 @@ void __init page_writeback_init(void) shift = calc_period_shift(); prop_descriptor_init(&vm_completions, shift); + prop_descriptor_init(&vm_dirties, shift); } /** @@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); * If the mapping doesn't provide a set_page_dirty a_op, then * just fall through and assume that it wants buffer_heads. */ -int fastcall set_page_dirty(struct page *page) +static int __set_page_dirty(struct page *page) { struct address_space *mapping = page_mapping(page); @@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page) } return 0; } + +int fastcall set_page_dirty(struct page *page) +{ + int ret = __set_page_dirty(page); + if (ret) + task_dirty_inc(current); + return ret; +} EXPORT_SYMBOL(set_page_dirty); /* |