summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/perf_event.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 674edc7ba8ca..fdb6029c9021 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1177,6 +1177,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
+ userpg->cap_user_time_short = 0;
do {
rd = sched_clock_read_begin(&seq);
@@ -1187,13 +1188,13 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns;
+ userpg->time_cycles = rd->epoch_cyc;
+ userpg->time_mask = rd->sched_clock_mask;
/*
- * This isn't strictly correct, the ARM64 counter can be
- * 'short' and then we get funnies when it wraps. The correct
- * thing would be to extend the perf ABI with a cycle and mask
- * value, but because wrapping on ARM64 is very rare in
- * practise this 'works'.
+ * Subtract the cycle base, such that software that
+ * doesn't know about cap_user_time_short still 'works'
+ * assuming no wraps.
*/
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns;
@@ -1219,4 +1220,5 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
+ userpg->cap_user_time_short = 1;
}