summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 11:16:51 +0200
committerIngo Molnar <mingo@elte.hu>2007-08-09 11:16:51 +0200
commit6e82a3befe91423e501c2124312bd805be0048eb (patch)
tree04e179ca9ef5e4efa4ff5814831094569e611bf3 /kernel/sched.c
parent2daa357705bfe68788132cf9079930ca948a90af (diff)
downloadlwn-6e82a3befe91423e501c2124312bd805be0048eb.tar.gz
lwn-6e82a3befe91423e501c2124312bd805be0048eb.zip
sched: optimize update_rq_clock() calls in the load-balancer
optimize update_rq_clock() calls in the load-balancer: update them right after locking the runqueue(s) so that the pull functions do not have to call it. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ccd91e5b65b..afc59f274e58 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2017,6 +2017,8 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
spin_lock(&rq1->lock);
}
}
+ update_rq_clock(rq1);
+ update_rq_clock(rq2);
}
/*
@@ -2113,10 +2115,8 @@ void sched_exec(void)
static void pull_task(struct rq *src_rq, struct task_struct *p,
struct rq *this_rq, int this_cpu)
{
- update_rq_clock(src_rq);
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
- __update_rq_clock(this_rq);
activate_task(this_rq, p, 0);
/*
* Note that idle threads have a prio of MAX_PRIO, for this test
@@ -2798,6 +2798,8 @@ redo:
if (busiest->nr_running > 1) {
/* Attempt to move tasks */
double_lock_balance(this_rq, busiest);
+ /* this_rq->clock is already updated */
+ update_rq_clock(busiest);
ld_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, CPU_NEWLY_IDLE,
&all_pinned);
@@ -2895,6 +2897,8 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq);
+ update_rq_clock(busiest_rq);
+ update_rq_clock(target_rq);
/* Search for an sd spanning us and the target CPU. */
for_each_domain(target_cpu, sd) {
@@ -4962,13 +4966,11 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
goto out;
on_rq = p->se.on_rq;
- if (on_rq) {
- update_rq_clock(rq_src);
+ if (on_rq)
deactivate_task(rq_src, p, 0);
- }
+
set_task_cpu(p, dest_cpu);
if (on_rq) {
- update_rq_clock(rq_dest);
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}