summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-17 13:16:32 +0100
committerGreg Kroah-Hartman <gregkh@suse.de>2010-01-25 10:49:45 -0800
commitb0a93920c4117933657cb5572157ce502ef0fc57 (patch)
tree7d01a7b2e58c9d3d5671170511613a7389aad4fe
parent5a20267b12fec85520acc1e839ee61f2ea16413d (diff)
downloadlwn-b0a93920c4117933657cb5572157ce502ef0fc57.tar.gz
lwn-b0a93920c4117933657cb5572157ce502ef0fc57.zip
perf events: Dont report side-band events on each cpu for per-task-per-cpu events
commit 5d27c23df09b702868d9a3bff86ec6abd22963ac upstream. Acme noticed that his FORK/MMAP numbers were inflated by about the same factor as his cpu-count. This led to the discovery of a few more sites that need to respect the event->cpu filter. Reported-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20091217121830.215333434@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--kernel/perf_event.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 6eee915a939e..6e9a31030783 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1359,6 +1359,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ continue;
+
hwc = &event->hw;
interrupts = hwc->interrupts;
@@ -3226,6 +3229,9 @@ static void perf_event_task_output(struct perf_event *event,
static int perf_event_task_match(struct perf_event *event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm || event->attr.mmap || event->attr.task)
return 1;
@@ -3255,13 +3261,13 @@ static void perf_event_task_event(struct perf_task_event *task_event)
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
- put_cpu_var(perf_cpu_context);
rcu_read_lock();
if (!ctx)
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
if (ctx)
perf_event_task_ctx(ctx, task_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3338,6 +3344,9 @@ static void perf_event_comm_output(struct perf_event *event,
static int perf_event_comm_match(struct perf_event *event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm)
return 1;
@@ -3378,7 +3387,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
- put_cpu_var(perf_cpu_context);
rcu_read_lock();
/*
@@ -3388,6 +3396,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3462,6 +3471,9 @@ static void perf_event_mmap_output(struct perf_event *event,
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.mmap)
return 1;
@@ -3539,7 +3551,6 @@ got_name:
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
- put_cpu_var(perf_cpu_context);
rcu_read_lock();
/*
@@ -3549,6 +3560,7 @@ got_name:
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_mmap_ctx(ctx, mmap_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
kfree(buf);
@@ -3811,6 +3823,9 @@ static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id, struct pt_regs *regs)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (!perf_swevent_is_counting(event))
return 0;