summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-11-29 18:46:48 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-12-04 10:56:23 +0100
commit0c1d7a2c2d32fac7ff4a644724b2d52a64184645 (patch)
tree7d973e81c916d17c106e916ba9c3ec63cbf93865 /kernel/locking
parenta3642021923b26d86bb27d88c826494827612c06 (diff)
downloadlwn-0c1d7a2c2d32fac7ff4a644724b2d52a64184645.tar.gz
lwn-0c1d7a2c2d32fac7ff4a644724b2d52a64184645.zip
lockdep: Remove softirq accounting on PREEMPT_RT.
There is not really a softirq context on PREEMPT_RT. Softirqs on PREEMPT_RT are always invoked within the context of a threaded interrupt handler or within ksoftirqd. The "in-softirq" context is preemptible and is protected by a per-CPU lock to ensure mutual exclusion. There is no difference on PREEMPT_RT between spin_lock_irq() and spin_lock() because the former does not disable interrupts. Therefore if a lock is used in_softirq() and locked once with spin_lock_irq() then lockdep will report this with "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage". Teach lockdep that we don't really do softirqs on -RT. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20211129174654.668506-6-bigeasy@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2270ec68f10a..4a882f83aeb9 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned long flags)
}
}
+#ifndef CONFIG_PREEMPT_RT
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
+#endif
if (!debug_locks)
print_irqtrace_events(current);