From cbce1a686700595de65ee363b9b3283ae85d8fc5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Jun 2015 14:46:54 +0200 Subject: sched,lockdep: Employ lock pinning Employ the new lockdep lock pinning annotation to ensure no 'accidental' lock-breaks happen with rq->lock. Signed-off-by: Peter Zijlstra (Intel) Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124744.003233193@infradead.org Signed-off-by: Thomas Gleixner --- kernel/sched/fair.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7210ae848909..509ef63d0d6f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5392,7 +5392,15 @@ simple: return p; idle: + /* + * This is OK, because current is on_cpu, which avoids it being picked + * for load-balance and preemption/IRQs are still disabled avoiding + * further scheduler activity on it and we're being very careful to + * re-start the picking loop. + */ + lockdep_unpin_lock(&rq->lock); new_tasks = idle_balance(rq); + lockdep_pin_lock(&rq->lock); /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we @@ -7426,9 +7434,6 @@ static int idle_balance(struct rq *this_rq) goto out; } - /* - * Drop the rq->lock, but keep IRQ/preempt disabled. - */ raw_spin_unlock(&this_rq->lock); update_blocked_averages(this_cpu); -- cgit v1.2.3