summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-10-22 18:07:47 +0530
committerThomas Gleixner <tglx@linutronix.de>2009-10-29 09:08:24 +0100
commit581d1c646e0504492bff3e97a113eba7ec8b40a3 (patch)
tree95dee7fb83a26f85b46daa02ada3d82cdf0e9185
parent337946f84f00487e20e4bc946ac272afeb3256c7 (diff)
downloadlwn-581d1c646e0504492bff3e97a113eba7ec8b40a3.tar.gz
lwn-581d1c646e0504492bff3e97a113eba7ec8b40a3.zip
sched: add smt_gain
The idea is that multi-threading a core yields more work capacity than a single thread, provide a way to express a static gain for threads. [ dino: backport to 31-rt ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Dinakar Guniguntala <dino@in.ibm.com> Cc: John Stultz <johnstul@us.ibm.com> Cc: Darren Hart <dvhltc@us.ibm.com> Cc: John Kacur <jkacur@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/topology.h1
-rw-r--r--kernel/sched.c8
3 files changed, 9 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42b302f12392..09dea08f4d8c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -966,6 +966,7 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
+ unsigned int smt_gain;
int flags; /* See SD_* */
enum sched_domain_level level;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..6203ae5067ce 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,6 +99,7 @@ int arch_update_cpu_topology(void);
| SD_SHARE_CPUPOWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
+ .smt_gain = 1178, /* 15% */ \
}
#endif
#endif /* CONFIG_SCHED_SMT */
diff --git a/kernel/sched.c b/kernel/sched.c
index e5b344b5b89e..9c750e363b96 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8729,9 +8729,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
weight = cpumask_weight(sched_domain_span(sd));
/*
* SMT siblings share the power of a single core.
+ * Usually multiple threads get a better yield out of
+ * that one core than a single thread would have,
+ * reflect that in sd->smt_gain.
*/
- if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1)
+ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+ power *= sd->smt_gain;
power /= weight;
+ power >>= SCHED_LOAD_SHIFT;
+ }
sg_inc_cpu_power(sd->groups, power);
return;
}