summaryrefslogtreecommitdiff
path: root/include/linux/lockdep.h
diff options
context:
space:
mode:
authorYunsheng Lin <linyunsheng@huawei.com>2020-11-24 18:49:28 +0800
committerJakub Kicinski <kuba@kernel.org>2020-11-25 15:08:34 -0800
commit8b5536ad1216c47fb9b37ef2cd0cfa70d79d4645 (patch)
tree501780f646449de753cb071f3b6bc6908961a5b4 /include/linux/lockdep.h
parentb5094a3b535b5a258ab49ed9614948df63bc3557 (diff)
downloadlwn-8b5536ad1216c47fb9b37ef2cd0cfa70d79d4645.tar.gz
lwn-8b5536ad1216c47fb9b37ef2cd0cfa70d79d4645.zip
lockdep: Introduce in_softirq lockdep assert
The current semantic for napi_consume_skb() is that caller need to provide non-zero budget when calling from NAPI context, and breaking this semantic will cause hard to debug problem, because _kfree_skb_defer() need to run in atomic context in order to push the skb to the particular cpu' napi_alloc_cache atomically. So add the lockdep_assert_in_softirq() to assert when the running context is not in_softirq, in_softirq means softirq is serving or BH is disabled, which has a ambiguous semantics due to the BH disabled confusion, so add a comment to emphasize that. And the softirq context can be interrupted by hard IRQ or NMI context, lockdep_assert_in_softirq() need to assert about hard IRQ or NMI context too. Suggested-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r--include/linux/lockdep.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f5594879175a..92771bc1791f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -594,6 +594,16 @@ do { \
this_cpu_read(hardirqs_enabled))); \
} while (0)
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_irq() || in_nmi())); \
+} while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -605,6 +615,7 @@ do { \
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING