diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-11 02:44:08 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-11 02:44:08 +0100 |
commit | c0d362a832ee70435fc4555a64f820893b1da0bd (patch) | |
tree | 162497df64451936dfdb70f972dd7849d3e3e520 /kernel/perf_counter.c | |
parent | 506c10f26c481b7f8ef27c1c79290f68989b2e9e (diff) | |
parent | f78628374a13bc150db77c6e02d4f2c0a7f932ef (diff) | |
download | lwn-c0d362a832ee70435fc4555a64f820893b1da0bd.tar.gz lwn-c0d362a832ee70435fc4555a64f820893b1da0bd.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 55 |
1 files changed, 46 insertions, 9 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 37f771691f93..4c0dccb756ad 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -41,12 +41,20 @@ static DEFINE_MUTEX(perf_resource_mutex); extern __weak const struct hw_perf_counter_ops * hw_perf_counter_init(struct perf_counter *counter) { - return ERR_PTR(-EINVAL); + return NULL; } u64 __weak hw_perf_save_disable(void) { return 0; } void __weak hw_perf_restore(u64 ctrl) { barrier(); } void __weak hw_perf_counter_setup(void) { barrier(); } +int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, int cpu) +{ + return 0; +} + +void __weak perf_counter_print_debug(void) { } static void list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) @@ -341,6 +349,9 @@ group_sched_out(struct perf_counter *group_counter, { struct perf_counter *counter; + if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) + return; + counter_sched_out(group_counter, cpuctx, ctx); /* @@ -354,15 +365,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_counter *counter; + u64 flags; if (likely(!ctx->nr_counters)) return; spin_lock(&ctx->lock); + flags = hw_perf_save_disable(); if (ctx->nr_active) { list_for_each_entry(counter, &ctx->counter_list, list_entry) group_sched_out(counter, cpuctx, ctx); } + hw_perf_restore(flags); spin_unlock(&ctx->lock); } @@ -402,7 +416,14 @@ group_sched_in(struct perf_counter *group_counter, int cpu) { struct perf_counter *counter, *partial_group; - int ret = 0; + int ret; + + if (group_counter->state == PERF_COUNTER_STATE_OFF) + return 0; + + ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); + if (ret) + return ret < 0 ? ret : 0; if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) return -EAGAIN; @@ -415,10 +436,9 @@ group_sched_in(struct perf_counter *group_counter, partial_group = counter; goto group_error; } - ret = -EAGAIN; } - return ret; + return 0; group_error: /* @@ -440,11 +460,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, struct perf_cpu_context *cpuctx, int cpu) { struct perf_counter *counter; + u64 flags; if (likely(!ctx->nr_counters)) return; spin_lock(&ctx->lock); + flags = hw_perf_save_disable(); list_for_each_entry(counter, &ctx->counter_list, list_entry) { /* * Listen to the 'cpu' scheduling filter constraint @@ -454,12 +476,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, continue; /* - * If we scheduled in a group atomically and - * exclusively, break out: + * If we scheduled in a group atomically and exclusively, + * or if this group can't go on, break out: */ if (group_sched_in(counter, cpuctx, ctx, cpu)) break; } + hw_perf_restore(flags); spin_unlock(&ctx->lock); } @@ -928,18 +951,32 @@ static const struct file_operations perf_fops = { static int cpu_clock_perf_counter_enable(struct perf_counter *counter) { + int cpu = raw_smp_processor_id(); + + atomic64_set(&counter->hw.prev_count, cpu_clock(cpu)); return 0; } +static void cpu_clock_perf_counter_update(struct perf_counter *counter) +{ + int cpu = raw_smp_processor_id(); + s64 prev; + u64 now; + + now = cpu_clock(cpu); + prev = atomic64_read(&counter->hw.prev_count); + atomic64_set(&counter->hw.prev_count, now); + atomic64_add(now - prev, &counter->count); +} + static void cpu_clock_perf_counter_disable(struct perf_counter *counter) { + cpu_clock_perf_counter_update(counter); } static void cpu_clock_perf_counter_read(struct perf_counter *counter) { - int cpu = raw_smp_processor_id(); - - atomic64_set(&counter->count, cpu_clock(cpu)); + cpu_clock_perf_counter_update(counter); } static const struct hw_perf_counter_ops perf_ops_cpu_clock = { |