diff options
author | Peter Zijlstra <peterz@infradead.org> | 2022-09-02 18:48:55 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-09-07 21:54:01 +0200 |
commit | f3c0eba287049237b23d1300376768293eb89e69 (patch) | |
tree | ceb5cab8cd9b1733b4504a639a63454c4d4c746c /include/linux/perf_event.h | |
parent | 88081cfb699ce2568e5309c145eb9f9e9497b53f (diff) | |
download | lwn-f3c0eba287049237b23d1300376768293eb89e69.tar.gz lwn-f3c0eba287049237b23d1300376768293eb89e69.zip |
perf: Add a few assertions
While auditing 6b959ba22d34 ("perf/core: Fix reentry problem in
perf_output_read_group()") a few spots were found that wanted
assertions.
Notable for_each_sibling_event() relies on exclusion from
modification. This would normally be holding either ctx->lock or
ctx->mutex, however due to how things are constructed disabling IRQs
is a valid and sufficient substitute for ctx->lock.
Another possible site to add assertions would be the various
pmu::{add,del,read,..}() methods, but that's not trivially expressable
in C -- the best option is wrappers, but those are easy enough to
forget.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f88cb31eaf75..368bdc4f563f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -61,6 +61,7 @@ struct perf_guest_info_callbacks { #include <linux/refcount.h> #include <linux/security.h> #include <linux/static_call.h> +#include <linux/lockdep.h> #include <asm/local.h> struct perf_callchain_entry { @@ -634,7 +635,23 @@ struct pmu_event_list { struct list_head list; }; +/* + * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex + * as such iteration must hold either lock. However, since ctx->lock is an IRQ + * safe lock, and is only held by the CPU doing the modification, having IRQs + * disabled is sufficient since it will hold-off the IPIs. + */ +#ifdef CONFIG_PROVE_LOCKING +#define lockdep_assert_event_ctx(event) \ + WARN_ON_ONCE(__lockdep_enabled && \ + (this_cpu_read(hardirqs_enabled) || \ + lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD)) +#else +#define lockdep_assert_event_ctx(event) +#endif + #define for_each_sibling_event(sibling, event) \ + lockdep_assert_event_ctx(event); \ if ((event)->group_leader == (event)) \ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) |