summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorFabio Checconi <fabio@gandalf.sssup.it>2009-07-16 12:32:27 +0200
committerIngo Molnar <mingo@elte.hu>2009-07-18 11:17:08 +0200
commit54fdc5816631b43ba55fc3206d7add2d85850bc6 (patch)
tree44aebd11ac53ef355b215d37f374fb40a4cb008b /kernel/sched_fair.c
parent78af08d90b8f745044b1274430bc4bc6b2b27aca (diff)
downloadlwn-54fdc5816631b43ba55fc3206d7add2d85850bc6.tar.gz
lwn-54fdc5816631b43ba55fc3206d7add2d85850bc6.zip
sched: Account for vruntime wrapping
I spotted two sites that didn't take vruntime wrap-around into account. Fix these by creating a comparison helper that does do so. Signed-off-by: Fabio Checconi <fabio@gandalf.sssup.it> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc30f41..9ffb2b2ceba4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
+static inline int entity_before(struct sched_entity *a,
+ struct sched_entity *b)
+{
+ return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
/*
* Already in the rightmost position?
*/
- if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+ if (unlikely(!rightmost || entity_before(rightmost, se)))
return;
/*
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
/* 'curr' will be NULL if the child belongs to a different group */
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
- curr && curr->vruntime < se->vruntime) {
+ curr && entity_before(curr, se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.