summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 15:47:08 -0800
committerIngo Molnar <mingo@elte.hu>2010-11-18 13:27:49 +0100
commit3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37 (patch)
tree6839bc473200dcb69c5de998921684ac569ce18b /kernel/sched.c
parentc66eaf619c0c7937e9ded160ae83b5a7a6b19b56 (diff)
downloadlwn-3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37.tar.gz
lwn-3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37.zip
sched: Implement demand based update_cfs_load()
When the system is busy, dilation of rq->next_balance makes lb->update_shares() insufficiently frequent for threads which don't sleep (no dequeue/enqueue updates). Adjust for this by making demand based updates based on the accumulation of execution time sufficient to wrap our averaging window. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.291159744@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dadab4d13875..e914a716e1d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -353,9 +353,16 @@ struct cfs_rq {
*/
unsigned long h_load;
+ /*
+ * Maintaining per-cpu shares distribution for group scheduling
+ *
+ * load_stamp is the last time we updated the load average
+ * load_last is the last time we updated the load average and saw load
+ * load_unacc_exec_time is currently unaccounted execution time
+ */
u64 load_avg;
u64 load_period;
- u64 load_stamp, load_last;
+ u64 load_stamp, load_last, load_unacc_exec_time;
unsigned long load_contribution;
#endif