diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-04-09 10:53:44 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-09 11:50:43 +0200 |
commit | 9ee318a7825929bc3734110b83ae8e20e53d9de3 (patch) | |
tree | 4823b4e82bb99971559d3569c7f4d710ac7f232d /kernel/perf_counter.c | |
parent | b3828ebb3901adfe989d8d4157ed28247aeec132 (diff) | |
download | lwn-9ee318a7825929bc3734110b83ae8e20e53d9de3.tar.gz lwn-9ee318a7825929bc3734110b83ae8e20e53d9de3.zip |
perf_counter: optimize mmap/comm tracking
Impact: performance optimization
The mmap/comm tracking code does quite a lot of work before it discovers
there's no interest in it, avoid that by keeping a counter.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090409085524.427173196@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b07195bbd228..76376ecb23b5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -38,6 +38,10 @@ int perf_max_counters __read_mostly = 1; static int perf_reserved_percpu __read_mostly; static int perf_overcommit __read_mostly = 1; +static atomic_t nr_mmap_tracking __read_mostly; +static atomic_t nr_munmap_tracking __read_mostly; +static atomic_t nr_comm_tracking __read_mostly; + /* * Mutex for (sysadmin-configurable) counter reservations: */ @@ -1186,6 +1190,13 @@ static void free_counter(struct perf_counter *counter) { perf_pending_sync(counter); + if (counter->hw_event.mmap) + atomic_dec(&nr_mmap_tracking); + if (counter->hw_event.munmap) + atomic_dec(&nr_munmap_tracking); + if (counter->hw_event.comm) + atomic_dec(&nr_comm_tracking); + if (counter->destroy) counter->destroy(counter); @@ -2005,7 +2016,12 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) void perf_counter_comm(struct task_struct *task) { - struct perf_comm_event comm_event = { + struct perf_comm_event comm_event; + + if (!atomic_read(&nr_comm_tracking)) + return; + + comm_event = (struct perf_comm_event){ .task = task, .event = { .header = { .type = PERF_EVENT_COMM, }, @@ -2128,7 +2144,12 @@ got_name: void perf_counter_mmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { - struct perf_mmap_event mmap_event = { + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_mmap_tracking)) + return; + + mmap_event = (struct perf_mmap_event){ .file = file, .event = { .header = { .type = PERF_EVENT_MMAP, }, @@ -2146,7 +2167,12 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, void perf_counter_munmap(unsigned long addr, unsigned long len, unsigned long pgoff, struct file *file) { - struct perf_mmap_event mmap_event = { + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_munmap_tracking)) + return; + + mmap_event = (struct perf_mmap_event){ .file = file, .event = { .header = { .type = PERF_EVENT_MUNMAP, }, @@ -2725,6 +2751,13 @@ done: counter->hw_ops = hw_ops; + if (counter->hw_event.mmap) + atomic_inc(&nr_mmap_tracking); + if (counter->hw_event.munmap) + atomic_inc(&nr_munmap_tracking); + if (counter->hw_event.comm) + atomic_inc(&nr_comm_tracking); + return counter; } |