summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-01-19 16:09:11 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-21 20:20:10 +0100
commit6640be50e8a2f9bd526beddafd23b49406a4af07 (patch)
tree3a2cef9f3b60fd9edeea265e3ec6226e421e356a
parent57bba6a44244a9c7cef5951041bdd3f747b28ed0 (diff)
downloadlwn-6640be50e8a2f9bd526beddafd23b49406a4af07.tar.gz
lwn-6640be50e8a2f9bd526beddafd23b49406a4af07.zip
sched: Extend enqueue_task to allow head queueing
The ability of enqueueing a task to the head of a SCHED_FIFO priority list is required to fix some violations of POSIX scheduling policy. Extend the related functions with a "head" argument. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c3
-rw-r--r--kernel/sched_rt.c3
4 files changed, 13 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 57cf766c3404..64cf01a3cd0f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1119,7 +1119,8 @@ struct sched_domain;
struct sched_class {
const struct sched_class *next;
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
+ bool head);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
diff --git a/kernel/sched.c b/kernel/sched.c
index ba0ee0d34545..8da004bbc8db 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1972,13 +1972,14 @@ static void update_avg(u64 *avg, u64 sample)
*avg += diff >> 3;
}
-static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
if (wakeup)
p->se.start_runtime = p->se.sum_exec_runtime;
sched_info_queued(p);
- p->sched_class->enqueue_task(rq, p, wakeup);
+ p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1;
}
@@ -2056,7 +2057,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
- enqueue_task(rq, p, wakeup);
+ enqueue_task(rq, p, wakeup, false);
inc_nr_running(rq);
}
@@ -6389,7 +6390,7 @@ void task_setprio(struct task_struct *p, int prio)
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
- enqueue_task(rq, p, 0);
+ enqueue_task(rq, p, 0, false);
check_class_changed(rq, p, prev_class, oldprio, running);
}
@@ -6433,7 +6434,7 @@ void set_user_nice(struct task_struct *p, long nice)
delta = p->prio - old_prio;
if (on_rq) {
- enqueue_task(rq, p, 0);
+ enqueue_task(rq, p, 0, false);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -10522,7 +10523,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
- enqueue_task(rq, tsk, 0);
+ enqueue_task(rq, tsk, 0, false);
task_rq_unlock(rq, &flags);
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index be1772e37dc3..cff45e488188 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1057,7 +1057,8 @@ static inline void hrtick_update(struct rq *rq)
* increased. Here we update the fair scheduling stats and
* then put the task into the rbtree:
*/
-static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9204fe190fb9..569d18ea7a01 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -927,7 +927,8 @@ unsigned long rt_nr_uninterruptible_cpu(int cpu)
/*
* Adding/removing a task to/from a priority array:
*/
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
struct sched_rt_entity *rt_se = &p->rt;