diff options
author | Peter Zijlstra <peterz@infradead.org> | 2023-05-19 12:21:10 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2023-06-05 21:11:09 +0200 |
commit | fb7d4948c4da2dbd26da4b7ec76bbd2f19ff862a (patch) | |
tree | 2ba5554afc9632b70edef76bd257121b3a33e51a /kernel/sched | |
parent | 5c5e9a2b25b6a79d4b7a5f2a54d02ef1c36dc35a (diff) | |
download | lwn-fb7d4948c4da2dbd26da4b7ec76bbd2f19ff862a.tar.gz lwn-fb7d4948c4da2dbd26da4b7ec76bbd2f19ff862a.zip |
sched/clock: Provide local_clock_noinstr()
Now that all ARCH_WANTS_NO_INSTR architectures (arm64, loongarch,
s390, x86) provide sched_clock_noinstr(), use this to provide
local_clock_noinstr().
This local_clock_noinstr() will be safe to use from noinstr code with
the assumption that any such noinstr code is non-preemptible (it had
better be, entry code will have IRQs disabled while __cpuidle must
have preemption disabled).
Specifically, preempt_enable_notrace(), a common part of many a
sched_clock() implementation calls out to schedule() -- even though,
per the above, it will never trigger -- which frustrates noinstr
validation.
vmlinux.o: warning: objtool: local_clock+0xb5: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V
Link: https://lore.kernel.org/r/20230519102715.978624636@infradead.org
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/clock.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index b5cc2b53464d..5a575a0ba4e6 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -266,7 +266,7 @@ static __always_inline u64 sched_clock_local(struct sched_clock_data *scd) s64 delta; again: - now = sched_clock(); + now = sched_clock_noinstr(); delta = now - scd->tick_raw; if (unlikely(delta < 0)) delta = 0; @@ -293,22 +293,29 @@ again: return clock; } -noinstr u64 local_clock(void) +noinstr u64 local_clock_noinstr(void) { u64 clock; if (static_branch_likely(&__sched_clock_stable)) - return sched_clock() + __sched_clock_offset; + return sched_clock_noinstr() + __sched_clock_offset; if (!static_branch_likely(&sched_clock_running)) - return sched_clock(); + return sched_clock_noinstr(); - preempt_disable_notrace(); clock = sched_clock_local(this_scd()); - preempt_enable_notrace(); return clock; } + +u64 local_clock(void) +{ + u64 now; + preempt_disable_notrace(); + now = local_clock_noinstr(); + preempt_enable_notrace(); + return now; +} EXPORT_SYMBOL_GPL(local_clock); static notrace u64 sched_clock_remote(struct sched_clock_data *scd) |