diff options
author | Tim Chen <tim.c.chen@linux.intel.com> | 2014-12-12 15:38:12 -0800 |
---|---|---|
committer | Ben Hutchings <ben@decadent.org.uk> | 2015-03-06 00:39:19 +0000 |
commit | 985504f71bb0ca24ac647c72446cc621980aef9f (patch) | |
tree | 2cb7704ebe77dd284fa26690dd85a3ba89c0036f | |
parent | c0a74eb2b093cd781f216306cc5a1e5825835d74 (diff) | |
download | lwn-985504f71bb0ca24ac647c72446cc621980aef9f.tar.gz lwn-985504f71bb0ca24ac647c72446cc621980aef9f.zip |
sched/rt: Reduce rq lock contention by eliminating locking of non-feasible target
commit 80e3d87b2c5582db0ab5e39610ce3707d97ba409 upstream.
This patch adds checks that prevens futile attempts to move rt tasks
to a CPU with active tasks of equal or higher priority.
This reduces run queue lock contention and improves the performance of
a well known OLTP benchmark by 0.7%.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Shawn Bohrer <sbohrer@rgmadvisors.com>
Cc: Suruchi Kadu <suruchi.a.kadu@intel.com>
Cc: Doug Nelson<doug.nelson@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1421430374.2399.27.camel@schen9-desk2.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
[bwh: Backported to 3.2: adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
-rw-r--r-- | kernel/sched_rt.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f57fda7cbef0..bd4afa408686 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1067,7 +1067,12 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) (p->rt.nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); - if (target != -1) + /* + * Don't bother moving it if the destination CPU is + * not running a lower priority task. + */ + if (target != -1 && + p->prio < cpu_rq(target)->rt.highest_prio.curr) cpu = target; } rcu_read_unlock(); @@ -1346,6 +1351,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) lowest_rq = cpu_rq(cpu); + if (lowest_rq->rt.highest_prio.curr <= task->prio) { + /* + * Target rq has tasks of equal or higher priority, + * retrying does not release any lock and is unlikely + * to yield a different result. + */ + lowest_rq = NULL; + break; + } + /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { /* |