diff options
author | Song Liu <song@kernel.org> | 2021-04-25 14:43:31 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-04-29 10:30:58 -0300 |
commit | 112cb56164bc2108a55aee785d841a35aab0616a (patch) | |
tree | fec53329a1865ca4f3a26f09e4f74f2b816b1e5b /tools/perf/builtin-stat.c | |
parent | fe3dd8263b9f3912a0f3a2f66c0fdb3987d69a1a (diff) | |
download | lwn-112cb56164bc2108a55aee785d841a35aab0616a.tar.gz lwn-112cb56164bc2108a55aee785d841a35aab0616a.zip |
perf stat: Introduce config stat.bpf-counter-events
Currently, to use BPF to aggregate perf event counters, the user uses
--bpf-counters option. Enable "use bpf by default" events with a config
option, stat.bpf-counter-events. Events with name in the option will use
BPF.
This also enables mixed BPF event and regular event in the same sesssion.
For example:
perf config stat.bpf-counter-events=instructions
perf stat -e instructions,cs
The second command will use BPF for "instructions" but not "cs".
Signed-off-by: Song Liu <song@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/r/20210425214333.1090950-4-song@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r-- | tools/perf/builtin-stat.c | 42 |
1 files changed, 25 insertions, 17 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index ba5b31aab86b..5ec2190e45cd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -161,6 +161,7 @@ static const char *smi_cost_attrs = { }; static struct evlist *evsel_list; +static bool all_counters_use_bpf = true; static struct target target = { .uid = UINT_MAX, @@ -401,6 +402,9 @@ static int read_affinity_counters(struct timespec *rs) struct affinity affinity; int i, ncpus, cpu; + if (all_counters_use_bpf) + return 0; + if (affinity__setup(&affinity) < 0) return -1; @@ -415,6 +419,8 @@ static int read_affinity_counters(struct timespec *rs) evlist__for_each_entry(evsel_list, counter) { if (evsel__cpu_iter_skip(counter, cpu)) continue; + if (evsel__is_bpf(counter)) + continue; if (!counter->err) { counter->err = read_counter_cpu(counter, rs, counter->cpu_iter - 1); @@ -431,6 +437,9 @@ static int read_bpf_map_counters(void) int err; evlist__for_each_entry(evsel_list, counter) { + if (!evsel__is_bpf(counter)) + continue; + err = bpf_counter__read(counter); if (err) return err; @@ -441,14 +450,10 @@ static int read_bpf_map_counters(void) static void read_counters(struct timespec *rs) { struct evsel *counter; - int err; if (!stat_config.stop_read_counter) { - if (target__has_bpf(&target)) - err = read_bpf_map_counters(); - else - err = read_affinity_counters(rs); - if (err < 0) + if (read_bpf_map_counters() || + read_affinity_counters(rs)) return; } @@ -537,12 +542,13 @@ static int enable_counters(void) struct evsel *evsel; int err; - if (target__has_bpf(&target)) { - evlist__for_each_entry(evsel_list, evsel) { - err = bpf_counter__enable(evsel); - if (err) - return err; - } + evlist__for_each_entry(evsel_list, evsel) { + if (!evsel__is_bpf(evsel)) + continue; + + err = bpf_counter__enable(evsel); + if (err) + return err; } if (stat_config.initial_delay < 0) { @@ -786,11 +792,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (affinity__setup(&affinity) < 0) return -1; - if (target__has_bpf(&target)) { - evlist__for_each_entry(evsel_list, counter) { - if (bpf_counter__load(counter, &target)) - return -1; - } + evlist__for_each_entry(evsel_list, counter) { + if (bpf_counter__load(counter, &target)) + return -1; + if (!evsel__is_bpf(counter)) + all_counters_use_bpf = false; } evlist__for_each_cpu (evsel_list, i, cpu) { @@ -807,6 +813,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) continue; if (counter->reset_group || counter->errored) continue; + if (evsel__is_bpf(counter)) + continue; try_again: if (create_perf_stat_counter(counter, &stat_config, &target, counter->cpu_iter - 1) < 0) { |