diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-08-20 09:13:30 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-08-26 12:41:53 +0200 |
commit | fddf9055a60dfcc97bda5ef03c8fa4108ed555c5 (patch) | |
tree | 5909759ef3729f2ef425cee2b1b33e483a48954c /include/linux/lockdep.h | |
parent | d012a7190fc1fd72ed48911e77ca97ba4521bccd (diff) | |
download | lwn-fddf9055a60dfcc97bda5ef03c8fa4108ed555c5.tar.gz lwn-fddf9055a60dfcc97bda5ef03c8fa4108ed555c5.zip |
lockdep: Use raw_cpu_*() for per-cpu variables
Sven reported that commit a21ee6055c30 ("lockdep: Change
hardirq{s_enabled,_context} to per-cpu variables") caused trouble on
s390 because their this_cpu_*() primitives disable preemption which
then lands back tracing.
On the one hand, per-cpu ops should use preempt_*able_notrace() and
raw_local_irq_*(), on the other hand, we can trivialy use raw_cpu_*()
ops for this.
Fixes: a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables")
Reported-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200821085348.192346882@infradead.org
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r-- | include/linux/lockdep.h | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d1845b..6a584b3e5c74 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -535,19 +535,27 @@ do { \ DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ + #define lockdep_assert_irqs_enabled() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_irqs_disabled() \ do { \ - WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_in_irq() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ } while (0) #define lockdep_assert_preemption_enabled() \ @@ -555,7 +563,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + !raw_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -563,7 +571,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + raw_cpu_read(hardirqs_enabled))); \ } while (0) #else |