summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorDmitry Adamushko <dmitry.adamushko@gmail.com>2007-10-15 17:00:14 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:14 +0200
commite62dd02ed0af35631c6ca473e50758c9594773cf (patch)
tree5bc942a91fac6e48d046997bef8dbfcb9c3a8d3b /kernel/sched_fair.c
parentace8b3d633f93da8535921bf3e3679db3c619578 (diff)
downloadlwn-e62dd02ed0af35631c6ca473e50758c9594773cf.tar.gz
lwn-e62dd02ed0af35631c6ca473e50758c9594773cf.zip
sched: fix group scheduling for SCHED_BATCH
The following patch (sched: disable sleeper_fairness on SCHED_BATCH) seems to break GROUP_SCHED. Although, it may be 'oops'-less due to the possibility of 'p' being always a valid address. Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 410b77aea216..3ac096e74faf 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -485,9 +485,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime += sched_vslice_add(cfs_rq, se);
if (!initial) {
- struct task_struct *p = container_of(se, struct task_struct, se);
-
- if (sched_feat(NEW_FAIR_SLEEPERS) && p->policy != SCHED_BATCH)
+ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
+ task_of(se)->policy != SCHED_BATCH)
vruntime -= sysctl_sched_latency;
vruntime = max_t(s64, vruntime, se->vruntime);