summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@kernel.org>2026-02-24 17:37:53 +0100
committerPeter Zijlstra <peterz@infradead.org>2026-02-27 16:40:12 +0100
commit9e07a9c980eaa93fd1bba722d31eeb4bf0cbbfb4 (patch)
treee12042f688ea2e77266429d97a1619a8a14e12ab /kernel/time
parent2889243848560b6b0211aba401d2fc122070ba2f (diff)
downloadlwn-9e07a9c980eaa93fd1bba722d31eeb4bf0cbbfb4.tar.gz
lwn-9e07a9c980eaa93fd1bba722d31eeb4bf0cbbfb4.zip
hrtimer: Rename hrtimer_cpu_base::in_hrtirq to deferred_rearm
The upcoming deferred rearming scheme has the same effect as the deferred rearming when the hrtimer interrupt is executing. So it can reuse the in_hrtirq flag, but when it gets deferred beyond the hrtimer interrupt path, then the name does not make sense anymore. Rename it to deferred_rearm upfront to keep the actual functional change separate from the mechanical rename churn. Signed-off-by: Thomas Gleixner <tglx@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20260224163430.935623347@kernel.org
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 2e05a1885d24..6f05d2569286 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -883,11 +883,8 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
if (expires >= cpu_base->expires_next)
return;
- /*
- * If the hrtimer interrupt is running, then it will reevaluate the
- * clock bases and reprogram the clock event device.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending skip reprogramming the device */
+ if (cpu_base->deferred_rearm)
return;
cpu_base->next_timer = timer;
@@ -921,12 +918,8 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, unsigned int act
if (seq == cpu_base->clock_was_set_seq)
return false;
- /*
- * If the remote CPU is currently handling an hrtimer interrupt, it
- * will reevaluate the first expiring timer of all clock bases
- * before reprogramming. Nothing to do here.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending the remote CPU will take care of it */
+ if (cpu_base->deferred_rearm)
return false;
/*
@@ -1334,11 +1327,8 @@ static bool __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 del
first = enqueue_hrtimer(timer, base, mode, was_armed);
}
- /*
- * If the hrtimer interrupt is running, then it will reevaluate the
- * clock bases and reprogram the clock event device.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending skip reprogramming the device */
+ if (cpu_base->deferred_rearm)
return false;
if (!was_first || cpu_base != this_cpu_base) {
@@ -1947,14 +1937,14 @@ static __latent_entropy void hrtimer_run_softirq(void)
/*
* Very similar to hrtimer_force_reprogram(), except it deals with
- * in_hrtirq and hang_detected.
+ * deferred_rearm and hang_detected.
*/
static void hrtimer_rearm(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
ktime_t expires_next = hrtimer_update_next_event(cpu_base);
cpu_base->expires_next = expires_next;
- cpu_base->in_hrtirq = false;
+ cpu_base->deferred_rearm = false;
if (unlikely(cpu_base->hang_detected)) {
/*
@@ -1985,7 +1975,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
- cpu_base->in_hrtirq = true;
+ cpu_base->deferred_rearm = true;
/*
* Set expires_next to KTIME_MAX, which prevents that remote CPUs queue
* timers while __hrtimer_run_queues() is expiring the clock bases.