summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:25 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:38 +0200
commit039a1c41b3a489e34593ea1e1687f6fdad6b13ab (patch)
tree162e9b0b7a2472be292089bf4f84623bdc6f34ab /kernel/sched.c
parent3e5459b4bea3ca2618cc02d56d12639f2cba531d (diff)
downloadlwn-039a1c41b3a489e34593ea1e1687f6fdad6b13ab.tar.gz
lwn-039a1c41b3a489e34593ea1e1687f6fdad6b13ab.zip
sched: fix sched_balance_self() smp group balancing
Finding the least idle cpu is more accurate when done with updated shares. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index cdd09462fc98..39d5495540d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2128,6 +2128,9 @@ static int sched_balance_self(int cpu, int flag)
sd = tmp;
}
+ if (sd)
+ update_shares(sd);
+
while (sd) {
cpumask_t span, tmpmask;
struct sched_group *group;