diff options
Diffstat (limited to 'tools/perf/util/bpf_lock_contention.c')
-rw-r--r-- | tools/perf/util/bpf_lock_contention.c | 142 |
1 files changed, 140 insertions, 2 deletions
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c index 41a1ad087895..fc8666222399 100644 --- a/tools/perf/util/bpf_lock_contention.c +++ b/tools/perf/util/bpf_lock_contention.c @@ -2,6 +2,7 @@ #include "util/cgroup.h" #include "util/debug.h" #include "util/evlist.h" +#include "util/hashmap.h" #include "util/machine.h" #include "util/map.h" #include "util/symbol.h" @@ -12,17 +13,106 @@ #include <linux/zalloc.h> #include <linux/string.h> #include <bpf/bpf.h> +#include <bpf/btf.h> #include <inttypes.h> #include "bpf_skel/lock_contention.skel.h" #include "bpf_skel/lock_data.h" static struct lock_contention_bpf *skel; +static bool has_slab_iter; +static struct hashmap slab_hash; + +static size_t slab_cache_hash(long key, void *ctx __maybe_unused) +{ + return key; +} + +static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused) +{ + return key1 == key2; +} + +static void check_slab_cache_iter(struct lock_contention *con) +{ + struct btf *btf = btf__load_vmlinux_btf(); + s32 ret; + + hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL); + + if (btf == NULL) { + pr_debug("BTF loading failed: %s\n", strerror(errno)); + return; + } + + ret = btf__find_by_name_kind(btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT); + if (ret < 0) { + bpf_program__set_autoload(skel->progs.slab_cache_iter, false); + pr_debug("slab cache iterator is not available: %d\n", ret); + goto out; + } + + has_slab_iter = true; + + bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries); +out: + btf__free(btf); +} + +static void run_slab_cache_iter(void) +{ + int fd; + char buf[256]; + long key, *prev_key; + + if (!has_slab_iter) + return; + + fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter)); + if (fd < 0) { + pr_debug("cannot create slab cache iter: %d\n", fd); + return; + } + + /* This will run the bpf program */ + while (read(fd, buf, sizeof(buf)) > 0) + continue; + + close(fd); + + /* Read the slab cache map and build a hash with IDs */ + fd = bpf_map__fd(skel->maps.slab_caches); + prev_key = NULL; + while (!bpf_map_get_next_key(fd, prev_key, &key)) { + struct slab_cache_data *data; + + data = malloc(sizeof(*data)); + if (data == NULL) + break; + + if (bpf_map_lookup_elem(fd, &key, data) < 0) + break; + + hashmap__add(&slab_hash, data->id, data); + prev_key = &key; + } +} + +static void exit_slab_cache_iter(void) +{ + struct hashmap_entry *cur; + unsigned bkt; + + hashmap__for_each_entry(&slab_hash, cur, bkt) + free(cur->pvalue); + + hashmap__clear(&slab_hash); +} int lock_contention_prepare(struct lock_contention *con) { int i, fd; - int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1; + int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1, nslabs = 1; struct evlist *evlist = con->evlist; struct target *target = con->target; @@ -109,6 +199,15 @@ int lock_contention_prepare(struct lock_contention *con) skel->rodata->use_cgroup_v2 = 1; } + check_slab_cache_iter(con); + + if (con->filters->nr_slabs && has_slab_iter) { + skel->rodata->has_slab = 1; + nslabs = con->filters->nr_slabs; + } + + bpf_map__set_max_entries(skel->maps.slab_filter, nslabs); + if (lock_contention_bpf__load(skel) < 0) { pr_err("Failed to load lock-contention BPF skeleton\n"); return -1; @@ -179,6 +278,36 @@ int lock_contention_prepare(struct lock_contention *con) bpf_program__set_autoload(skel->progs.collect_lock_syms, false); lock_contention_bpf__attach(skel); + + /* run the slab iterator after attaching */ + run_slab_cache_iter(); + + if (con->filters->nr_slabs) { + u8 val = 1; + int cache_fd; + long key, *prev_key; + + fd = bpf_map__fd(skel->maps.slab_filter); + + /* Read the slab cache map and build a hash with its address */ + cache_fd = bpf_map__fd(skel->maps.slab_caches); + prev_key = NULL; + while (!bpf_map_get_next_key(cache_fd, prev_key, &key)) { + struct slab_cache_data data; + + if (bpf_map_lookup_elem(cache_fd, &key, &data) < 0) + break; + + for (i = 0; i < con->filters->nr_slabs; i++) { + if (!strcmp(con->filters->slabs[i], data.name)) { + bpf_map_update_elem(fd, &key, &val, BPF_ANY); + break; + } + } + prev_key = &key; + } + } + return 0; } @@ -347,6 +476,7 @@ static const char *lock_contention_get_name(struct lock_contention *con, if (con->aggr_mode == LOCK_AGGR_ADDR) { int lock_fd = bpf_map__fd(skel->maps.lock_syms); + struct slab_cache_data *slab_data; /* per-process locks set upper bits of the flags */ if (flags & LCD_F_MMAP_LOCK) @@ -365,6 +495,12 @@ static const char *lock_contention_get_name(struct lock_contention *con, return "rq_lock"; } + /* look slab_hash for dynamic locks in a slab object */ + if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) { + snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name); + return name_buf; + } + return ""; } @@ -458,7 +594,7 @@ int lock_contention_read(struct lock_contention *con) if (con->save_callstack) { bpf_map_lookup_elem(stack, &key.stack_id, stack_trace); - if (!match_callstack_filter(machine, stack_trace)) { + if (!match_callstack_filter(machine, stack_trace, con->max_stack)) { con->nr_filtered += data.count; goto next; } @@ -539,5 +675,7 @@ int lock_contention_finish(struct lock_contention *con) cgroup__put(cgrp); } + exit_slab_cache_iter(); + return 0; } |