summaryrefslogtreecommitdiff
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-13 09:00:03 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-14 20:30:48 +0100
commitee06094f8279e1312fc0a31591320cc7b6f0ab1e (patch)
treeaecf8f2177b2398e4db8df68a9705009b31a8ef7 /kernel/perf_counter.c
parent9b194e831fb2c322ed81a373e49620f34edc2778 (diff)
downloadlwn-ee06094f8279e1312fc0a31591320cc7b6f0ab1e.tar.gz
lwn-ee06094f8279e1312fc0a31591320cc7b6f0ab1e.zip
perfcounters: restructure x86 counter math
Impact: restructure code Change counter math from absolute values to clear delta logic. We try to extract elapsed deltas from the raw hw counter - and put that into the generic counter. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c68
1 files changed, 5 insertions, 63 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 559130b8774d..416861ce8b27 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -44,67 +44,9 @@ hw_perf_counter_init(struct perf_counter *counter)
}
u64 __weak hw_perf_save_disable(void) { return 0; }
-void __weak hw_perf_restore(u64 ctrl) { }
+void __weak hw_perf_restore(u64 ctrl) { }
void __weak hw_perf_counter_setup(void) { }
-#if BITS_PER_LONG == 64
-
-/*
- * Read the cached counter in counter safe against cross CPU / NMI
- * modifications. 64 bit version - no complications.
- */
-static inline u64 perf_counter_read_safe(struct perf_counter *counter)
-{
- return (u64) atomic64_read(&counter->count);
-}
-
-void atomic64_counter_set(struct perf_counter *counter, u64 val)
-{
- atomic64_set(&counter->count, val);
-}
-
-u64 atomic64_counter_read(struct perf_counter *counter)
-{
- return atomic64_read(&counter->count);
-}
-
-#else
-
-/*
- * Read the cached counter in counter safe against cross CPU / NMI
- * modifications. 32 bit version.
- */
-static u64 perf_counter_read_safe(struct perf_counter *counter)
-{
- u32 cntl, cnth;
-
- local_irq_disable();
- do {
- cnth = atomic_read(&counter->count32[1]);
- cntl = atomic_read(&counter->count32[0]);
- } while (cnth != atomic_read(&counter->count32[1]));
-
- local_irq_enable();
-
- return cntl | ((u64) cnth) << 32;
-}
-
-void atomic64_counter_set(struct perf_counter *counter, u64 val64)
-{
- u32 *val32 = (void *)&val64;
-
- atomic_set(counter->count32 + 0, *(val32 + 0));
- atomic_set(counter->count32 + 1, *(val32 + 1));
-}
-
-u64 atomic64_counter_read(struct perf_counter *counter)
-{
- return atomic_read(counter->count32 + 0) |
- (u64) atomic_read(counter->count32 + 1) << 32;
-}
-
-#endif
-
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
@@ -280,11 +222,11 @@ static void __perf_install_in_context(void *info)
ctx->nr_counters++;
if (cpuctx->active_oncpu < perf_max_counters) {
- counter->hw_ops->hw_perf_counter_enable(counter);
counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu;
ctx->nr_active++;
cpuctx->active_oncpu++;
+ counter->hw_ops->hw_perf_counter_enable(counter);
}
if (!ctx->task && cpuctx->max_pertask)
@@ -624,7 +566,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
__hw_perf_counter_read, counter, 1);
}
- return perf_counter_read_safe(counter);
+ return atomic64_read(&counter->count);
}
/*
@@ -921,7 +863,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
int cpu = raw_smp_processor_id();
- atomic64_counter_set(counter, cpu_clock(cpu));
+ atomic64_set(&counter->count, cpu_clock(cpu));
}
static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
@@ -940,7 +882,7 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter)
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
- atomic64_counter_set(counter, current->se.sum_exec_runtime);
+ atomic64_set(&counter->count, current->se.sum_exec_runtime);
}
static const struct hw_perf_counter_ops perf_ops_task_clock = {