summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/softlockup.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index e557c44b1275..def081b8a5b3 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -15,13 +15,16 @@
#include <linux/notifier.h>
#include <linux/module.h>
+#include <asm/irq_regs.h>
+
static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
-static int did_panic = 0;
+static int did_panic;
+int softlockup_thresh = 10;
static int
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -70,6 +73,7 @@ void softlockup_tick(void)
int this_cpu = smp_processor_id();
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
unsigned long print_timestamp;
+ struct pt_regs *regs = get_irq_regs();
unsigned long now;
if (touch_timestamp == 0) {
@@ -99,21 +103,26 @@ void softlockup_tick(void)
wake_up_process(per_cpu(watchdog_task, this_cpu));
/* Warn about unreasonable 10+ seconds delays: */
- if (now > (touch_timestamp + 10)) {
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ if (now <= (touch_timestamp + softlockup_thresh))
+ return;
+
+ per_cpu(print_timestamp, this_cpu) = touch_timestamp;
- spin_lock(&print_lock);
- printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
- this_cpu);
+ spin_lock(&print_lock);
+ printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
+ this_cpu, now - touch_timestamp,
+ current->comm, current->pid);
+ if (regs)
+ show_regs(regs);
+ else
dump_stack();
- spin_unlock(&print_lock);
- }
+ spin_unlock(&print_lock);
}
/*
* The watchdog thread - runs every second and touches the timestamp.
*/
-static int watchdog(void * __bind_cpu)
+static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -151,13 +160,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(per_cpu(watchdog_task, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
- printk("watchdog for %i failed\n", hotcpu);
+ printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(touch_timestamp, hotcpu) = 0;
+ per_cpu(watchdog_task, hotcpu) = p;
kthread_bind(p, hotcpu);
- break;
+ break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
wake_up_process(per_cpu(watchdog_task, hotcpu));
@@ -177,7 +186,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
- }
+ }
return NOTIFY_OK;
}