summaryrefslogtreecommitdiff
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2023-05-26 18:41:36 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-06-19 16:25:27 -0700
commitd3b62ace0f097f1d863fb6c41df3c61503e4ec9e (patch)
tree7a7d05ca38c1d9e0731aca9fc2e6c8a90c3470d8 /kernel/watchdog.c
parent05e7b558766114aa9c3d5d3af188a5c574809661 (diff)
downloadlwn-d3b62ace0f097f1d863fb6c41df3c61503e4ec9e.tar.gz
lwn-d3b62ace0f097f1d863fb6c41df3c61503e4ec9e.zip
watchdog/buddy: cleanup how watchdog_buddy_check_hardlockup() is called
In the patch ("watchdog/hardlockup: detect hard lockups using secondary (buddy) CPUs"), we added a call from the common watchdog.c file into the buddy. That call could be done more cleanly. Specifically: 1. If we move the call into watchdog_hardlockup_kick() then it keeps watchdog_timer_fn() simpler. 2. We don't need to pass an "unsigned long" to the buddy for the timer count. In the patch ("watchdog/hardlockup: add a "cpu" param to watchdog_hardlockup_check()") the count was changed to "atomic_t" which is backed by an int, so we should match types. Link: https://lkml.kernel.org/r/20230526184139.6.I006c7d958a1ea5c4e1e4dc44a25596d9bb5fd3ba@changeid Signed-off-by: Douglas Anderson <dianders@chromium.org> Suggested-by: Petr Mladek <pmladek@suse.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: "David S. Miller" <davem@davemloft.net> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 85f4839b6faf..6cc46b8e3d07 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -131,9 +131,12 @@ static bool is_hardlockup(unsigned int cpu)
return false;
}
-static unsigned long watchdog_hardlockup_kick(void)
+static void watchdog_hardlockup_kick(void)
{
- return atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
+ int new_interrupts;
+
+ new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
+ watchdog_buddy_check_hardlockup(new_interrupts);
}
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
@@ -195,7 +198,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
-static inline unsigned long watchdog_hardlockup_kick(void) { return 0; }
+static inline void watchdog_hardlockup_kick(void) { }
#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
@@ -449,15 +452,11 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
- unsigned long hrtimer_interrupts;
if (!watchdog_enabled)
return HRTIMER_NORESTART;
- hrtimer_interrupts = watchdog_hardlockup_kick();
-
- /* test for hardlockups */
- watchdog_buddy_check_hardlockup(hrtimer_interrupts);
+ watchdog_hardlockup_kick();
/* kick the softlockup detector */
if (completion_done(this_cpu_ptr(&softlockup_completion))) {