summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:18 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:18 +0200
commitda84d96176729fb48a8458561e5d8647103168b8 (patch)
tree24c952d956178faa9ac8b5c505513c93a87f4f6b /kernel/sched.c
parente5f32a3856caabe745381279f7f32e3b581b59dc (diff)
downloadlwn-da84d96176729fb48a8458561e5d8647103168b8.tar.gz
lwn-da84d96176729fb48a8458561e5d8647103168b8.zip
sched: reintroduce cache-hot affinity
reintroduce a simplified version of cache-hot/cold scheduling affinity. This improves performance with certain SMP workloads, such as sysbench. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 791dd08c692f..089d8b12ab76 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2119,6 +2119,17 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
}
/*
+ * Is this task likely cache-hot:
+ */
+static inline int
+task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
+{
+ s64 delta = now - p->se.exec_start;
+
+ return delta < (long long)sysctl_sched_migration_cost;
+}
+
+/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
@@ -2139,6 +2150,22 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
if (task_running(rq, p))
return 0;
+ /*
+ * Aggressive migration if:
+ * 1) task is cache cold, or
+ * 2) too many balance attempts have failed.
+ */
+
+ if (sd->nr_balance_failed > sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+ if (task_hot(p, rq->clock, sd))
+ schedstat_inc(sd, lb_hot_gained[idle]);
+#endif
+ return 1;
+ }
+
+ if (task_hot(p, rq->clock, sd))
+ return 0;
return 1;
}