summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
authorRob Herring (Arm) <robh@kernel.org>2024-07-31 10:51:22 -0600
committerWill Deacon <will@kernel.org>2024-08-16 13:09:12 +0100
commit126d7d7cce5e048fb82477a9842d088d10ff0df6 (patch)
tree2c6ce4ffdb4206581a523cb8b60b88bca179a7e6 /drivers/perf
parentf9b11aa00708d94a0cd78bfde34b68c0f95d8b50 (diff)
downloadlwn-126d7d7cce5e048fb82477a9842d088d10ff0df6.tar.gz
lwn-126d7d7cce5e048fb82477a9842d088d10ff0df6.zip
arm64: perf/kvm: Use a common PMU cycle counter define
The PMUv3 and KVM code each have a define for the PMU cycle counter index. Move KVM's define to a shared location and use it for PMUv3 driver. Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Rob Herring (Arm) <robh@kernel.org> Tested-by: James Clark <james.clark@linaro.org> Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-5-280a8d7ff465@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm_pmuv3.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index bd45fbcb9a5a..18046cf4b3a3 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -452,11 +452,6 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
};
/*
- * Perf Events' indices
- */
-#define ARMV8_IDX_CYCLE_COUNTER 31
-
-/*
* We unconditionally enable ARMv8.5-PMU long event counter support
* (64-bit events) where supported. Indicate if this arm_pmu has long
* event counter support.
@@ -574,7 +569,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
int idx = hwc->idx;
u64 value;
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
value = read_pmccntr();
else
value = armv8pmu_read_hw_counter(event);
@@ -607,7 +602,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
value = armv8pmu_bias_long_counter(event, value);
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(value);
else
armv8pmu_write_hw_counter(event, value);
@@ -644,7 +639,7 @@ static void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
@@ -772,7 +767,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
/* Clear any unused counters to avoid leaking their contents */
for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
ARMPMU_MAX_HWEVENTS) {
- if (i == ARMV8_IDX_CYCLE_COUNTER)
+ if (i == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(0);
else
armv8pmu_write_evcntr(i, 0);
@@ -933,8 +928,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
/* Always prefer to place a cycle counter into the cycle counter. */
if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
!armv8pmu_event_get_threshold(&event->attr)) {
- if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return ARMV8_IDX_CYCLE_COUNTER;
+ if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask))
+ return ARMV8_PMU_CYCLE_IDX;
else if (armv8pmu_event_is_64bit(event) &&
armv8pmu_event_want_user_access(event) &&
!armv8pmu_has_long_event(cpu_pmu))
@@ -1196,7 +1191,7 @@ static void __armv8pmu_probe_pmu(void *info)
0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()));
/* Add the CPU cycles counter */
- set_bit(ARMV8_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask);
+ set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask);
pmceid[0] = pmceid_raw[0] = read_pmceid0();
pmceid[1] = pmceid_raw[1] = read_pmceid1();