diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 236 |
1 files changed, 213 insertions, 23 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 814e954c1318..3cebc9a8d52e 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -25,6 +25,9 @@ #include <linux/bitops.h> #include <linux/hash.h> +static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); +static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) @@ -37,6 +40,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, INIT_HLIST_HEAD(&evlist->heads[i]); INIT_LIST_HEAD(&evlist->entries); perf_evlist__set_maps(evlist, cpus, threads); + fdarray__init(&evlist->pollfd, 64); evlist->workload.pid = -1; } @@ -102,7 +106,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist) { zfree(&evlist->mmap); - zfree(&evlist->pollfd); + fdarray__exit(&evlist->pollfd); } void perf_evlist__delete(struct perf_evlist *evlist) @@ -122,6 +126,7 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) { list_add_tail(&entry->node, &evlist->entries); entry->idx = evlist->nr_entries; + entry->tracking = !entry->idx; if (!evlist->nr_entries++) perf_evlist__set_id_pos(evlist); @@ -265,17 +270,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist, return 0; } +static int perf_evlist__nr_threads(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + if (evsel->system_wide) + return 1; + else + return thread_map__nr(evlist->threads); +} + void perf_evlist__disable(struct perf_evlist *evlist) { int cpu, thread; struct perf_evsel *pos; int nr_cpus = cpu_map__nr(evlist->cpus); - int nr_threads = thread_map__nr(evlist->threads); + int nr_threads; for (cpu = 0; cpu < nr_cpus; cpu++) { evlist__for_each(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE, 0); @@ -288,12 +303,13 @@ void perf_evlist__enable(struct perf_evlist *evlist) int cpu, thread; struct perf_evsel *pos; int nr_cpus = cpu_map__nr(evlist->cpus); - int nr_threads = thread_map__nr(evlist->threads); + int nr_threads; for (cpu = 0; cpu < nr_cpus; cpu++) { evlist__for_each(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); @@ -305,12 +321,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist, struct perf_evsel *evsel) { int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); if (!evsel->fd) return 0; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - for (thread = 0; thread < evlist->threads->nr; thread++) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_DISABLE, 0); if (err) @@ -324,12 +342,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist, struct perf_evsel *evsel) { int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); if (!evsel->fd) return -EINVAL; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - for (thread = 0; thread < evlist->threads->nr; thread++) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); if (err) @@ -339,21 +359,111 @@ int perf_evlist__enable_event(struct perf_evlist *evlist, return 0; } -static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) +static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, + struct perf_evsel *evsel, int cpu) +{ + int thread, err; + int nr_threads = perf_evlist__nr_threads(evlist, evsel); + + if (!evsel->fd) + return -EINVAL; + + for (thread = 0; thread < nr_threads; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, + struct perf_evsel *evsel, + int thread) +{ + int cpu, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + + if (!evsel->fd) + return -EINVAL; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +int perf_evlist__enable_event_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx) +{ + bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); + + if (per_cpu_mmaps) + return perf_evlist__enable_event_cpu(evlist, evsel, idx); + else + return perf_evlist__enable_event_thread(evlist, evsel, idx); +} + +int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { int nr_cpus = cpu_map__nr(evlist->cpus); int nr_threads = thread_map__nr(evlist->threads); - int nfds = nr_cpus * nr_threads * evlist->nr_entries; - evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); - return evlist->pollfd != NULL ? 0 : -ENOMEM; + int nfds = 0; + struct perf_evsel *evsel; + + list_for_each_entry(evsel, &evlist->entries, node) { + if (evsel->system_wide) + nfds += nr_cpus; + else + nfds += nr_cpus * nr_threads; + } + + if (fdarray__available_entries(&evlist->pollfd) < nfds && + fdarray__grow(&evlist->pollfd, nfds) < 0) + return -ENOMEM; + + return 0; +} + +static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) +{ + int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); + /* + * Save the idx so that when we filter out fds POLLHUP'ed we can + * close the associated evlist->mmap[] entry. + */ + if (pos >= 0) { + evlist->pollfd.priv[pos].idx = idx; + + fcntl(fd, F_SETFL, O_NONBLOCK); + } + + return pos; +} + +int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) +{ + return __perf_evlist__add_pollfd(evlist, fd, -1); +} + +static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) +{ + struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); + + perf_evlist__mmap_put(evlist, fda->priv[fd].idx); +} + +int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) +{ + return fdarray__filter(&evlist->pollfd, revents_and_mask, + perf_evlist__munmap_filtered); } -void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) +int perf_evlist__poll(struct perf_evlist *evlist, int timeout) { - fcntl(fd, F_SETFL, O_NONBLOCK); - evlist->pollfd[evlist->nr_fds].fd = fd; - evlist->pollfd[evlist->nr_fds].events = POLLIN; - evlist->nr_fds++; + return fdarray__poll(&evlist->pollfd, timeout); } static void perf_evlist__id_hash(struct perf_evlist *evlist, @@ -566,14 +676,36 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) return event; } +static bool perf_mmap__empty(struct perf_mmap *md) +{ + return perf_mmap__read_head(md) != md->prev; +} + +static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) +{ + ++evlist->mmap[idx].refcnt; +} + +static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) +{ + BUG_ON(evlist->mmap[idx].refcnt == 0); + + if (--evlist->mmap[idx].refcnt == 0) + __perf_evlist__munmap(evlist, idx); +} + void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) { + struct perf_mmap *md = &evlist->mmap[idx]; + if (!evlist->overwrite) { - struct perf_mmap *md = &evlist->mmap[idx]; unsigned int old = md->prev; perf_mmap__write_tail(md, old); } + + if (md->refcnt == 1 && perf_mmap__empty(md)) + perf_evlist__mmap_put(evlist, idx); } static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) @@ -581,6 +713,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) if (evlist->mmap[idx].base != NULL) { munmap(evlist->mmap[idx].base, evlist->mmap_len); evlist->mmap[idx].base = NULL; + evlist->mmap[idx].refcnt = 0; } } @@ -614,6 +747,20 @@ struct mmap_params { static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, struct mmap_params *mp, int fd) { + /* + * The last one will be done at perf_evlist__mmap_consume(), so that we + * make sure we don't prevent tools from consuming every last event in + * the ring buffer. + * + * I.e. we can get the POLLHUP meaning that the fd doesn't exist + * anymore, but the last events for it are still in the ring buffer, + * waiting to be consumed. + * + * Tools can chose to ignore this at their own discretion, but the + * evlist layer can't just drop it when filtering events in + * perf_evlist__filter_pollfd(). + */ + evlist->mmap[idx].refcnt = 2; evlist->mmap[idx].prev = 0; evlist->mmap[idx].mask = mp->mask; evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, @@ -625,7 +772,6 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, return -1; } - perf_evlist__add_pollfd(evlist, fd); return 0; } @@ -636,7 +782,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct perf_evsel *evsel; evlist__for_each(evlist, evsel) { - int fd = FD(evsel, cpu, thread); + int fd; + + if (evsel->system_wide && thread) + continue; + + fd = FD(evsel, cpu, thread); if (*output == -1) { *output = fd; @@ -645,6 +796,13 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, } else { if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) return -1; + + perf_evlist__mmap_get(evlist, idx); + } + + if (__perf_evlist__add_pollfd(evlist, fd, idx) < 0) { + perf_evlist__mmap_put(evlist, idx); + return -1; } if ((evsel->attr.read_format & PERF_FORMAT_ID) && @@ -804,7 +962,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; - if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) + if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) return -ENOMEM; evlist->overwrite = overwrite; @@ -1061,6 +1219,8 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar } if (!evlist->workload.pid) { + int ret; + if (pipe_output) dup2(2, 1); @@ -1078,8 +1238,22 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar /* * Wait until the parent tells us to go. */ - if (read(go_pipe[0], &bf, 1) == -1) - perror("unable to read pipe"); + ret = read(go_pipe[0], &bf, 1); + /* + * The parent will ask for the execvp() to be performed by + * writing exactly one byte, in workload.cork_fd, usually via + * perf_evlist__start_workload(). + * + * For cancelling the workload without actuallin running it, + * the parent will just close workload.cork_fd, without writing + * anything, i.e. read will return zero and we just exit() + * here. + */ + if (ret != 1) { + if (ret == -1) + perror("unable to read pipe"); + exit(ret); + } execvp(argv[0], (char **)argv); @@ -1202,7 +1376,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, int err, char *buf, size_t size) { int printed, value; - char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); switch (err) { case EACCES: @@ -1250,3 +1424,19 @@ void perf_evlist__to_front(struct perf_evlist *evlist, list_splice(&move, &evlist->entries); } + +void perf_evlist__set_tracking_event(struct perf_evlist *evlist, + struct perf_evsel *tracking_evsel) +{ + struct perf_evsel *evsel; + + if (tracking_evsel->tracking) + return; + + evlist__for_each(evlist, evsel) { + if (evsel != tracking_evsel) + evsel->tracking = false; + } + + tracking_evsel->tracking = true; +} |