summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-05-04 04:54:47 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2010-05-04 05:38:16 +0200
commit54d47a2be5e7f928fb77b2f5a0761f6bd3c9dbff (patch)
treef31a627eea303b08da532645fce94cbb4452ec30
parentfa9a97dec611c5356301645d576b523ce3919eba (diff)
downloadlwn-54d47a2be5e7f928fb77b2f5a0761f6bd3c9dbff.tar.gz
lwn-54d47a2be5e7f928fb77b2f5a0761f6bd3c9dbff.zip
lockdep: No need to disable preemption in debug atomic ops
No need to disable preemption in the debug_atomic_* ops, as we ensure interrupts are disabled already. So let's use the __this_cpu_ops() rather than this_cpu_ops() that enclose the ops in a preempt disabled section. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org>
-rw-r--r--kernel/lockdep_internals.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 8d929c717d3e..4f560cfedc8f 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -144,12 +144,12 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
#define debug_atomic_inc(ptr) { \
WARN_ON_ONCE(!irqs_disabled()); \
- this_cpu_inc(lockdep_stats.ptr); \
+ __this_cpu_inc(lockdep_stats.ptr); \
}
#define debug_atomic_dec(ptr) { \
WARN_ON_ONCE(!irqs_disabled()); \
- this_cpu_dec(lockdep_stats.ptr); \
+ __this_cpu_dec(lockdep_stats.ptr); \
}
#define debug_atomic_read(ptr) ({ \