summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-08-28 10:32:32 +0200
committerIngo Molnar <mingo@kernel.org>2013-09-12 19:14:43 +0200
commitb72ff13ce6021b37459afacbccc0bc9b16989013 (patch)
tree7e390bbfaa3072b2d52dd3edac1439f5b808b969 /kernel
parent6263322c5e8ffdaf5eaaa29e9d02d84a786aa970 (diff)
downloadlwn-b72ff13ce6021b37459afacbccc0bc9b16989013.tar.gz
lwn-b72ff13ce6021b37459afacbccc0bc9b16989013.zip
sched/fair: Reduce local_group logic
Try and reduce the local_group logic by pulling most of it into update_sd_lb_stats. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-mgezl354xgyhiyrte78fdkpd@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7325ca7b8978..f9f438530bee 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4563,6 +4563,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
unsigned long load;
int i;
+ memset(sgs, 0, sizeof(*sgs));
+
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
@@ -4581,10 +4583,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->idle_cpus++;
}
- if (local_group && (env->idle != CPU_NEWLY_IDLE ||
- time_after_eq(jiffies, group->sgp->next_update)))
- update_group_power(env->sd, env->dst_cpu);
-
/* Adjust by relative CPU power of the group */
sgs->group_power = group->sgp->power;
sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
@@ -4677,11 +4675,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
if (local_group) {
sds->local = sg;
sgs = &sds->local_stat;
+
+ if (env->idle != CPU_NEWLY_IDLE ||
+ time_after_eq(jiffies, sg->sgp->next_update))
+ update_group_power(env->sd, env->dst_cpu);
}
- memset(sgs, 0, sizeof(*sgs));
update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
+ if (local_group)
+ goto next_group;
+
/*
* In case the child domain prefers tasks go to siblings
* first, lower the sg capacity to one so that we'll try
@@ -4692,19 +4696,20 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* heaviest group when it is already under-utilized (possible
* with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling && !local_group &&
- sds->local && sds->local_stat.group_has_capacity)
+ if (prefer_sibling && sds->local &&
+ sds->local_stat.group_has_capacity)
sgs->group_capacity = min(sgs->group_capacity, 1U);
- /* Now, start updating sd_lb_stats */
- sds->total_load += sgs->group_load;
- sds->total_pwr += sgs->group_power;
-
- if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
+ if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
}
+next_group:
+ /* Now, start updating sd_lb_stats */
+ sds->total_load += sgs->group_load;
+ sds->total_pwr += sgs->group_power;
+
sg = sg->next;
} while (sg != env->sd->groups);
}