diff options
Diffstat (limited to 'tools/lib/perf/cpumap.c')
-rw-r--r-- | tools/lib/perf/cpumap.c | 137 |
1 files changed, 45 insertions, 92 deletions
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index cae799ad44e1..4454a5987570 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <errno.h> #include <perf/cpumap.h> #include <stdlib.h> #include <linux/refcount.h> @@ -10,6 +11,9 @@ #include <ctype.h> #include <limits.h> #include "internal.h" +#include <api/fs/fs.h> + +#define MAX_NR_CPUS 4096 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) { @@ -100,12 +104,12 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void) static struct perf_cpu_map *cpu_map__new_sysfs_online(void) { struct perf_cpu_map *cpus = NULL; - FILE *onlnf; + char *buf = NULL; + size_t buf_len; - onlnf = fopen("/sys/devices/system/cpu/online", "r"); - if (onlnf) { - cpus = perf_cpu_map__read(onlnf); - fclose(onlnf); + if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) { + cpus = perf_cpu_map__new(buf); + free(buf); } return cpus; } @@ -158,62 +162,6 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu return cpus; } -struct perf_cpu_map *perf_cpu_map__read(FILE *file) -{ - struct perf_cpu_map *cpus = NULL; - int nr_cpus = 0; - struct perf_cpu *tmp_cpus = NULL, *tmp; - int max_entries = 0; - int n, cpu, prev; - char sep; - - sep = 0; - prev = -1; - for (;;) { - n = fscanf(file, "%u%c", &cpu, &sep); - if (n <= 0) - break; - if (prev >= 0) { - int new_max = nr_cpus + cpu - prev - 1; - - WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. " - "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS); - - if (new_max >= max_entries) { - max_entries = new_max + MAX_NR_CPUS / 2; - tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - while (++prev < cpu) - tmp_cpus[nr_cpus++].cpu = prev; - } - if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - tmp_cpus[nr_cpus++].cpu = cpu; - if (n == 2 && sep == '-') - prev = cpu; - else - prev = -1; - if (n == 1 || sep == '\n') - break; - } - - if (nr_cpus > 0) - cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); -out_free_tmp: - free(tmp_cpus); - return cpus; -} - struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) { struct perf_cpu_map *cpus = NULL; @@ -237,8 +185,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) while (isdigit(*cpu_list)) { p = NULL; start_cpu = strtoul(cpu_list, &p, 0); - if (start_cpu >= INT_MAX - || (*p != '\0' && *p != ',' && *p != '-')) + if (start_cpu >= INT16_MAX + || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n')) goto invalid; if (*p == '-') { @@ -246,7 +194,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) p = NULL; end_cpu = strtoul(cpu_list, &p, 0); - if (end_cpu >= INT_MAX || (*p != '\0' && *p != ',')) + if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n')) goto invalid; if (end_cpu < start_cpu) @@ -261,17 +209,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) for (; start_cpu <= end_cpu; start_cpu++) { /* check for duplicates */ for (i = 0; i < nr_cpus; i++) - if (tmp_cpus[i].cpu == (int)start_cpu) + if (tmp_cpus[i].cpu == (int16_t)start_cpu) goto invalid; if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; + max_entries += max(end_cpu - start_cpu + 1, 16UL); tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); if (tmp == NULL) goto invalid; tmp_cpus = tmp; } - tmp_cpus[nr_cpus++].cpu = (int)start_cpu; + tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu; } if (*p) ++p; @@ -279,14 +227,15 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) cpu_list = p; } - if (nr_cpus > 0) + if (nr_cpus > 0) { cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else if (*cpu_list != '\0') { + } else if (*cpu_list != '\0') { pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.", cpu_list); cpus = perf_cpu_map__new_online_cpus(); - } else + } else { cpus = perf_cpu_map__new_any_cpu(); + } invalid: free(tmp_cpus); out: @@ -436,46 +385,49 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu } /* - * Merge two cpumaps + * Merge two cpumaps. + * + * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count + * change (similar to "realloc"). + * + * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference + * count increased. * - * orig either gets freed and replaced with a new map, or reused - * with no reference count change (similar to "realloc") - * other has its reference count increased. + * Otherwise, '*orig' gets freed and replaced with a new map. */ - -struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, - struct perf_cpu_map *other) +int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other) { struct perf_cpu *tmp_cpus; int tmp_len; int i, j, k; struct perf_cpu_map *merged; - if (perf_cpu_map__is_subset(orig, other)) - return orig; - if (perf_cpu_map__is_subset(other, orig)) { - perf_cpu_map__put(orig); - return perf_cpu_map__get(other); + if (perf_cpu_map__is_subset(*orig, other)) + return 0; + if (perf_cpu_map__is_subset(other, *orig)) { + perf_cpu_map__put(*orig); + *orig = perf_cpu_map__get(other); + return 0; } - tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other); + tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other); tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); if (!tmp_cpus) - return NULL; + return -ENOMEM; /* Standard merge algorithm from wikipedia */ i = j = k = 0; - while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) { - if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) { - if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu) + while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) { + if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) { + if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu) j++; - tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++); + tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++); } else tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++); } - while (i < __perf_cpu_map__nr(orig)) - tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++); + while (i < __perf_cpu_map__nr(*orig)) + tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++); while (j < __perf_cpu_map__nr(other)) tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++); @@ -483,8 +435,9 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, merged = cpu_map__trim_new(k, tmp_cpus); free(tmp_cpus); - perf_cpu_map__put(orig); - return merged; + perf_cpu_map__put(*orig); + *orig = merged; + return 0; } struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, |