summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 17:17:16 +0100
committerGreg Kroah-Hartman <gregkh@suse.de>2010-09-20 13:18:11 -0700
commitac8f51da79873b84461e77ee2b19d02a253add3e (patch)
treea9ecad6238a478526f8db5223dfa4ae3f9caf25a /kernel/sched_fair.c
parentb971284b4af79650f4d7f4caae21c824be3dd7fb (diff)
downloadlwn-ac8f51da79873b84461e77ee2b19d02a253add3e.tar.gz
lwn-ac8f51da79873b84461e77ee2b19d02a253add3e.zip
sched: Fix select_idle_sibling()
commit 8b911acdf08477c059d1c36c21113ab1696c612b upstream Don't bother with selection when the current cpu is idle. Recent load balancing changes also make it no longer necessary to check wake_affine() success before returning the selected sibling, so we now always use it. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301369.6785.36.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f719dfabf78e..76d9dc0d6ba0 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1432,7 +1432,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
- int want_affine = 0;
+ int want_affine = 0, cpu_idle = !current->pid;
int want_sd = 1;
int sync = wake_flags & WF_SYNC;
@@ -1490,13 +1490,15 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
* If there's an idle sibling in this domain, make that
* the wake_affine target instead of the current cpu.
*/
- if (tmp->flags & SD_SHARE_PKG_RESOURCES)
+ if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
target = select_idle_sibling(p, tmp, target);
if (target >= 0) {
if (tmp->flags & SD_WAKE_AFFINE) {
affine_sd = tmp;
want_affine = 0;
+ if (target != cpu)
+ cpu_idle = 1;
}
cpu = target;
}
@@ -1512,6 +1514,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
sd = tmp;
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
if (sched_feat(LB_SHARES_UPDATE)) {
/*
* Pick the largest domain to update shares over
@@ -1528,9 +1531,12 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
spin_lock(&rq->lock);
}
}
+#endif
- if (affine_sd && wake_affine(affine_sd, p, sync))
- return cpu;
+ if (affine_sd) {
+ if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+ return cpu;
+ }
while (sd) {
int load_idx = sd->forkexec_idx;