summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2005-11-08 21:38:58 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-09 07:56:32 -0800
commit3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 (patch)
tree5900a1fbf46d3c83aca09fca0ba664af40151082 /kernel/sched.c
parentdad1c65c8000f4485d8602e1875ded77e0d72133 (diff)
downloadlwn-3b0bd9bc6f3b8a47853d1b1de4520de3878e8941.tar.gz
lwn-3b0bd9bc6f3b8a47853d1b1de4520de3878e8941.zip
[PATCH] sched: smp nice bias busy queues on idle rebalance
To intensify the 'nice' support across physical cpus on SMP we can bias the loads on idle rebalancing. To prevent idle rebalance from trying to pull tasks from queues that appear heavily loaded we only bias the load if there is more than one task running. Add some minor micro-optimisations and have only one return from __source_load and __target_load functions. Fix the fact that target_load was not biased by priority when type == 0. Signed-off-by: Con Kolivas <kernel@kolivas.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d9dbf8ee6ca4..ec9ea9119b98 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -972,22 +972,26 @@ void kick_process(task_t *p)
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long cpu_load = rq->cpu_load[type-1],
+ unsigned long source_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE;
- if (idle == NOT_IDLE) {
+ if (type == 0)
+ source_load = load_now;
+ else
+ source_load = min(cpu_load, load_now);
+
+ if (idle == NOT_IDLE || rq->nr_running > 1)
/*
- * If we are balancing busy runqueues the load is biased by
- * priority to create 'nice' support across cpus.
+ * If we are busy rebalancing the load is biased by
+ * priority to create 'nice' support across cpus. When
+ * idle rebalancing we should only bias the source_load if
+ * there is more than one task running on that queue to
+ * prevent idle rebalance from trying to pull tasks from a
+ * queue with only one running task.
*/
- cpu_load *= rq->prio_bias;
- load_now *= rq->prio_bias;
- }
+ source_load *= rq->prio_bias;
- if (type == 0)
- return load_now;
-
- return min(cpu_load, load_now);
+ return source_load;
}
static inline unsigned long source_load(int cpu, int type)
@@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long cpu_load = rq->cpu_load[type-1],
+ unsigned long target_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0)
- return load_now;
+ target_load = load_now;
+ else
+ target_load = max(cpu_load, load_now);
- if (idle == NOT_IDLE) {
- cpu_load *= rq->prio_bias;
- load_now *= rq->prio_bias;
- }
- return max(cpu_load, load_now);
+ if (idle == NOT_IDLE || rq->nr_running > 1)
+ target_load *= rq->prio_bias;
+
+ return target_load;
}
static inline unsigned long target_load(int cpu, int type)