diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 17:00:05 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:05 +0200 |
commit | aeb73b040399f94698b4f64dd058cae39187e18d (patch) | |
tree | 72ffe7e64b414df2157d19073b92406cbf436535 /kernel | |
parent | 2e09bf556fbe1a4cd8d837a3e6607de55f7cf4fd (diff) | |
download | lwn-aeb73b040399f94698b4f64dd058cae39187e18d.tar.gz lwn-aeb73b040399f94698b4f64dd058cae39187e18d.zip |
sched: clean up new task placement
clean up new task placement.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 57 |
1 files changed, 42 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a60b1dac598a..cc447fbff51c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -199,6 +199,21 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); } +static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) +{ + struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; + struct sched_entity *se = NULL; + struct rb_node *parent; + + while (*link) { + parent = *link; + se = rb_entry(parent, struct sched_entity, run_node); + link = &parent->rb_right; + } + + return se; +} + /************************************************************** * Scheduling class statistics methods: */ @@ -530,6 +545,31 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) } static void +place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) +{ + struct sched_entity *last = __pick_last_entity(cfs_rq); + u64 min_runtime, latency; + + min_runtime = cfs_rq->min_vruntime; + if (last) { + min_runtime += last->vruntime; + min_runtime >>= 1; + if (initial && sched_feat(START_DEBIT)) + min_runtime += sysctl_sched_latency/2; + } + + if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { + latency = sysctl_sched_latency; + if (min_runtime > latency) + min_runtime -= latency; + else + min_runtime = 0; + } + + se->vruntime = max(se->vruntime, min_runtime); +} + +static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) { /* @@ -538,19 +578,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) update_curr(cfs_rq); if (wakeup) { - u64 min_runtime, latency; - - min_runtime = cfs_rq->min_vruntime; - min_runtime += sysctl_sched_latency/2; - - if (sched_feat(NEW_FAIR_SLEEPERS)) { - latency = calc_weighted(sysctl_sched_latency, se); - if (min_runtime > latency) - min_runtime -= latency; - } - - se->vruntime = max(se->vruntime, min_runtime); - + place_entity(cfs_rq, se, 0); enqueue_sleeper(cfs_rq, se); } @@ -1033,8 +1061,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) sched_info_queued(p); update_curr(cfs_rq); - se->vruntime = cfs_rq->min_vruntime; - update_stats_enqueue(cfs_rq, se); + place_entity(cfs_rq, se, 1); /* * The first wait is dominated by the child-runs-first logic, |