diff options
author | Paul Mackerras <paulus@samba.org> | 2009-06-17 21:51:13 +1000 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-18 11:11:45 +0200 |
commit | 448d64f8f4c147db466c549550767cc515a4d34c (patch) | |
tree | 9c33191273219d8e4d77e3ea78304691e4fb4b56 /arch/powerpc/kernel/perf_counter.c | |
parent | 105988c015943e77092a6568bc5fb7e386df6ccd (diff) | |
download | lwn-448d64f8f4c147db466c549550767cc515a4d34c.tar.gz lwn-448d64f8f4c147db466c549550767cc515a4d34c.zip |
perf_counter: powerpc: Use unsigned long for register and constraint values
This changes the powerpc perf_counter back-end to use unsigned long
types for hardware register values and for the value/mask pairs used
in checking whether a given set of events fit within the hardware
constraints. This is in preparation for adding support for the PMU
on some 32-bit powerpc processors. On 32-bit processors the hardware
registers are only 32 bits wide, and the PMU structure is generally
simpler, so 32 bits should be ample for expressing the hardware
constraints. On 64-bit processors, unsigned long is 64 bits wide,
so using unsigned long vs. u64 (unsigned long long) makes no actual
difference.
This makes some other very minor changes: adjusting whitespace to line
things up in initialized structures, and simplifying some code in
hw_perf_disable().
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linuxppc-dev@ozlabs.org
Cc: benh@kernel.crashing.org
LKML-Reference: <19000.55473.26174.331511@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index e6dc1850191c..9300638b8c26 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -29,7 +29,7 @@ struct cpu_hw_counters { struct perf_counter *counter[MAX_HWCOUNTERS]; u64 events[MAX_HWCOUNTERS]; unsigned int flags[MAX_HWCOUNTERS]; - u64 mmcr[3]; + unsigned long mmcr[3]; struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; }; @@ -135,15 +135,15 @@ static void write_pmc(int idx, unsigned long val) static int power_check_constraints(u64 event[], unsigned int cflags[], int n_ev) { - u64 mask, value, nv; + unsigned long mask, value, nv; u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; + unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; int i, j; - u64 addf = ppmu->add_fields; - u64 tadd = ppmu->test_adder; + unsigned long addf = ppmu->add_fields; + unsigned long tadd = ppmu->test_adder; if (n_ev > ppmu->n_counter) return -1; @@ -403,14 +403,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) void hw_perf_disable(void) { struct cpu_hw_counters *cpuhw; - unsigned long ret; unsigned long flags; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_counters); - ret = cpuhw->disabled; - if (!ret) { + if (!cpuhw->disabled) { cpuhw->disabled = 1; cpuhw->n_added = 0; @@ -1013,9 +1011,9 @@ static void record_and_restart(struct perf_counter *counter, long val, struct pt_regs *regs, int nmi) { u64 period = counter->hw.sample_period; + unsigned long mmcra, sdsync; s64 prev, delta, left; int record = 0; - u64 mmcra, sdsync; /* we don't have to worry about interrupts here */ prev = atomic64_read(&counter->hw.prev_count); |