diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-01-19 19:20:29 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-01-21 00:32:01 +0100 |
commit | 1f10ed61d75f849395fffcb46251510bd287d91d (patch) | |
tree | 637b36aa8d717058016a57e4f8e6dcac8db0aa74 | |
parent | 8ca861d423b09959eacb9860edb254c1c0ba9d01 (diff) | |
download | lwn-1f10ed61d75f849395fffcb46251510bd287d91d.tar.gz lwn-1f10ed61d75f849395fffcb46251510bd287d91d.zip |
sched: Extend activate_task to allow queueing to the head of a list
The ability of enqueueing a task to the head of a SCHED_FIFO priority
list is required to fix some violations of POSIX scheduling policy.
Extend activate_task with a "head" argument and fix up all callers.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched.c | 22 | ||||
-rw-r--r-- | kernel/sched_rt.c | 4 |
2 files changed, 14 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 83b9c3230632..1ca987256375 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1500,7 +1500,8 @@ static const u32 prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; -static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); +static void activate_task(struct rq *rq, struct task_struct *p, int wakeup, + bool head); /* * runqueue iterator, to support SMP load-balancing between different @@ -1967,12 +1968,13 @@ static int effective_prio(struct task_struct *p) /* * activate_task - move a task to the runqueue. */ -static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) +static void +activate_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; - enqueue_task(rq, p, wakeup, false); + enqueue_task(rq, p, wakeup, head); inc_nr_running(rq); } @@ -2617,7 +2619,7 @@ out_activate: schedstat_inc(p, se.nr_wakeups_local); else schedstat_inc(p, se.nr_wakeups_remote); - activate_task(rq, p, 1); + activate_task(rq, p, 1, false); success = 1; /* @@ -2826,7 +2828,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) p->prio = effective_prio(p); if (!p->sched_class->task_new || !current->se.on_rq) { - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); } else { /* * Let the scheduling class do new task startup @@ -3362,7 +3364,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, { deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); + activate_task(this_rq, p, 0, false); /* * Note that idle threads have a prio of MAX_PRIO, for this test * to be always true for them. @@ -6651,7 +6653,7 @@ recheck: if (running) p->sched_class->set_curr_task(rq); if (on_rq) { - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); check_class_changed(rq, p, prev_class, oldprio, running); } @@ -7578,7 +7580,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) set_task_cpu(p, dest_cpu); if (on_rq) { - activate_task(rq_dest, p, 0); + activate_task(rq_dest, p, 0, false); check_preempt_curr(rq_dest, p, 0); } done: @@ -7758,7 +7760,7 @@ void sched_idle_next(void) __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); update_rq_clock(rq); - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); atomic_spin_unlock_irqrestore(&rq->lock, flags); } @@ -9959,7 +9961,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) deactivate_task(rq, p, 0); __setscheduler(rq, p, SCHED_NORMAL, 0); if (on_rq) { - activate_task(rq, p, 0); + activate_task(rq, p, 0, false); resched_task(rq->curr); } } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 0f1aba273f92..60efb82dc5eb 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1419,7 +1419,7 @@ static int push_rt_task(struct rq *rq) deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); - activate_task(lowest_rq, next_task, 0); + activate_task(lowest_rq, next_task, 0, false); resched_task(lowest_rq->curr); @@ -1502,7 +1502,7 @@ static int pull_rt_task(struct rq *this_rq) deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); + activate_task(this_rq, p, 0, false); /* * We continue with the search, just in * case there's an even higher prio task |