summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarsten Emde <C.Emde@osadl.org>2010-02-24 07:49:31 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-24 12:10:52 +0100
commit2022ce18f08a62593979bd3875111ee010b57d43 (patch)
tree0abe39b76b2de493ef68540ad59047472268727a
parenta359d62fa46b9c41621fec1c3b31730cfc436b40 (diff)
downloadlwn-2022ce18f08a62593979bd3875111ee010b57d43.tar.gz
lwn-2022ce18f08a62593979bd3875111ee010b57d43.zip
tracing: Fix the merge fallout from 31-rt to 33
Apply upgrade changes from 2.6.31 to the histograms. Signed-off-by: Carsten Emde <C.Emde@osadl.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/trace/events/hist.h36
-rw-r--r--kernel/hrtimer.c11
-rw-r--r--kernel/trace/Kconfig64
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/latency_hist.c653
6 files changed, 532 insertions, 236 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 64cf01a3cd0f..9a9f376c4f44 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1615,6 +1615,9 @@ struct task_struct {
unsigned long trace;
/* bitmask of trace recursion */
unsigned long trace_recursion;
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+#endif
#endif /* CONFIG_TRACING */
unsigned long stack_start;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
index 73b0454eb90f..28646db2c775 100644
--- a/include/trace/events/hist.h
+++ b/include/trace/events/hist.h
@@ -17,8 +17,8 @@ TRACE_EVENT(preemptirqsoff_hist,
TP_ARGS(reason, starthist),
TP_STRUCT__entry(
- __field( int, reason )
- __field( int, starthist )
+ __field(int, reason )
+ __field(int, starthist )
),
TP_fast_assign(
@@ -31,6 +31,38 @@ TRACE_EVENT(preemptirqsoff_hist,
);
#endif
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
+#define trace_hrtimer_interrupt(a,b,c,d)
+#else
+TRACE_EVENT(hrtimer_interrupt,
+
+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task),
+
+ TP_ARGS(cpu, offset, curr, task),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(long long, offset )
+ __array(char, ccomm, TASK_COMM_LEN)
+ __field(int, cprio )
+ __array(char, tcomm, TASK_COMM_LEN)
+ __field(int, tprio )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->offset = offset;
+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
+ __entry->cprio = curr->prio;
+ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", task != NULL ? TASK_COMM_LEN : 7);
+ __entry->tprio = task != NULL ? task->prio : -1;
+ ),
+
+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
+ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio)
+);
+#endif
+
#endif /* _TRACE_HIST_H */
/* This part must be outside protection */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 32006ae805ab..2bf8f8555caa 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -49,6 +49,7 @@
#include <asm/uaccess.h>
#include <trace/events/timer.h>
+#include <trace/events/hist.h>
/*
* The timer bases:
@@ -1416,6 +1417,8 @@ static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
#ifdef CONFIG_HIGH_RES_TIMERS
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1458,6 +1461,14 @@ retry:
timer = rb_entry(node, struct hrtimer, node);
+ trace_hrtimer_interrupt(raw_smp_processor_id(),
+ ktime_to_ns(ktime_sub(
+ hrtimer_get_expires(timer), basenow)),
+ current,
+ timer->function == hrtimer_wakeup ?
+ container_of(timer, struct hrtimer_sleeper,
+ timer)->task : NULL);
+
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index f5051d22d61b..7d57890e906d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -179,16 +179,19 @@ config INTERRUPT_OFF_HIST
bool "Interrupts-off Latency Histogram"
depends on IRQSOFF_TRACER
help
- This option generates a continuously updated histogram (one per cpu)
+ This option generates continuously updated histograms (one per cpu)
of the duration of time periods with interrupts disabled. The
- histogram is disabled by default. To enable it, write a non-zero
- number to the related file in
+ histograms are disabled by default. To enable them, write a non-zero
+ number to
/sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
- If PREEMPT_OFF_HIST is also selected, an additional histogram (one
- per cpu) is generated that accumulates the duration of time periods
- when both interrupts and preemption are disabled.
+ If PREEMPT_OFF_HIST is also selected, additional histograms (one
+ per cpu) are generated that accumulate the duration of time periods
+ when both interrupts and preemption are disabled. The histogram data
+ will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/irqsoff
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
@@ -216,16 +219,19 @@ config PREEMPT_OFF_HIST
bool "Preemption-off Latency Histogram"
depends on PREEMPT_TRACER
help
- This option generates a continuously updated histogram (one per cpu)
+ This option generates continuously updated histograms (one per cpu)
of the duration of time periods with preemption disabled. The
- histogram is disabled by default. To enable it, write a non-zero
+ histograms are disabled by default. To enable them, write a non-zero
number to
/sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
- If INTERRUPT_OFF_HIST is also selected, an additional histogram (one
- per cpu) is generated that accumulates the duration of time periods
- when both interrupts and preemption are disabled.
+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
+ per cpu) are generated that accumulate the duration of time periods
+ when both interrupts and preemption are disabled. The histogram data
+ will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/preemptoff
config SCHED_TRACER
bool "Scheduling Latency Tracer"
@@ -240,12 +246,42 @@ config WAKEUP_LATENCY_HIST
bool "Scheduling Latency Histogram"
depends on SCHED_TRACER
help
- This option generates a continuously updated histogram (one per cpu)
- of the scheduling latency of the highest priority task. The histogram
- is disabled by default. To enable it, write a non-zero number to
+ This option generates continuously updated histograms (one per cpu)
+ of the scheduling latency of the highest priority task.
+ The histograms are disabled by default. To enable them, write a
+ non-zero number to
/sys/kernel/debug/tracing/latency_hist/enable/wakeup
+ Two different algorithms are used, one to determine the latency of
+ processes that exclusively use the highest priority of the system and
+ another one to determine the latency of processes that share the
+ highest system priority with other processes. The former is used to
+ improve hardware and system software, the latter to optimize the
+ priority design of a given system. The histogram data will be
+ located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/wakeup
+
+ and
+
+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
+
+config MISSED_TIMER_OFFSETS_HIST
+ depends on GENERIC_TIME
+ select GENERIC_TRACER
+ bool "Missed timer offsets histogram"
+ help
+ Generate a histogram of missed timer offsets in microseconds. The
+ histograms are disabled by default. To enable them, write a non-zero
+ number to
+
+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
+
+ The histogram data will be located in the debug file system at
+
+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
+
config SYSPROF_TRACER
bool "Sysprof Tracer"
depends on X86
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 04e959d690c1..5dddc4da1399 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
index b3b5ea25984f..8300929b0f1d 100644
--- a/kernel/trace/latency_hist.c
+++ b/kernel/trace/latency_hist.c
@@ -34,6 +34,8 @@ enum {
PREEMPTOFF_LATENCY,
PREEMPTIRQSOFF_LATENCY,
WAKEUP_LATENCY,
+ WAKEUP_LATENCY_SHAREDPRIO,
+ MISSED_TIMER_OFFSETS,
MAX_LATENCY_TYPE,
};
@@ -41,9 +43,11 @@ enum {
struct hist_data {
atomic_t hist_mode; /* 0 log, 1 don't log */
+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
unsigned long min_lat;
unsigned long max_lat;
- unsigned long long beyond_hist_bound_samples;
+ unsigned long long below_hist_bound_samples;
+ unsigned long long above_hist_bound_samples;
unsigned long long accumulate_lat;
unsigned long long total_samples;
unsigned long long hist_array[MAX_ENTRY_NUM];
@@ -59,16 +63,22 @@ static char *latency_hist_dir_root = "latency_hist";
#ifdef CONFIG_INTERRUPT_OFF_HIST
static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
static char *irqsoff_hist_dir = "irqsoff";
+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
+static DEFINE_PER_CPU(int, hist_irqsoff_counting);
#endif
#ifdef CONFIG_PREEMPT_OFF_HIST
static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
static char *preemptoff_hist_dir = "preemptoff";
+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
+static DEFINE_PER_CPU(int, hist_preemptoff_counting);
#endif
#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
static char *preemptirqsoff_hist_dir = "preemptirqsoff";
+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
#endif
#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
@@ -79,34 +89,59 @@ static struct enable_data preemptirqsoff_enabled_data = {
};
#endif
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+struct maxlatproc_data {
+ char comm[FIELD_SIZEOF(struct task_struct, comm)];
+ int pid;
+ int prio;
+ long latency;
+};
+#endif
+
#ifdef CONFIG_WAKEUP_LATENCY_HIST
static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
static char *wakeup_latency_hist_dir = "wakeup";
+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
static notrace void probe_wakeup_latency_hist_start(struct rq *rq,
struct task_struct *p, int success);
static notrace void probe_wakeup_latency_hist_stop(struct rq *rq,
struct task_struct *prev, struct task_struct *next);
+static notrace void probe_sched_migrate_task(struct task_struct *task,
+ int cpu);
static struct enable_data wakeup_latency_enabled_data = {
.latency_type = WAKEUP_LATENCY,
.enabled = 0,
};
-static struct task_struct *ts;
-struct maxlatproc_data {
- char comm[sizeof(ts->comm)];
- unsigned int pid;
- unsigned int prio;
- unsigned long latency;
-};
static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
-static unsigned wakeup_prio = (unsigned)-1;
-static struct task_struct *wakeup_task;
-static int wakeup_pid;
+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
+static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
+static DEFINE_PER_CPU(int, wakeup_sharedprio);
+static unsigned long wakeup_pid;
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
+static char *missed_timer_offsets_dir = "missed_timer_offsets";
+static notrace void probe_hrtimer_interrupt(int cpu,
+ long long offset, struct task_struct *curr, struct task_struct *task);
+static struct enable_data missed_timer_offsets_enabled_data = {
+ .latency_type = MISSED_TIMER_OFFSETS,
+ .enabled = 0,
+};
+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
+static unsigned long missed_timer_offsets_pid;
#endif
void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
struct task_struct *p)
{
struct hist_data *my_hist;
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ struct maxlatproc_data *mp = NULL;
+#endif
if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
latency_type >= MAX_LATENCY_TYPE)
@@ -118,22 +153,30 @@ void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
my_hist = &per_cpu(irqsoff_hist, cpu);
break;
#endif
-
#ifdef CONFIG_PREEMPT_OFF_HIST
case PREEMPTOFF_LATENCY:
my_hist = &per_cpu(preemptoff_hist, cpu);
break;
#endif
-
#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
case PREEMPTIRQSOFF_LATENCY:
my_hist = &per_cpu(preemptirqsoff_hist, cpu);
break;
#endif
-
#ifdef CONFIG_WAKEUP_LATENCY_HIST
case WAKEUP_LATENCY:
my_hist = &per_cpu(wakeup_latency_hist, cpu);
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
+ break;
+ case WAKEUP_LATENCY_SHAREDPRIO:
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ my_hist = &per_cpu(missed_timer_offsets, cpu);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
break;
#endif
default:
@@ -143,18 +186,24 @@ void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
if (atomic_read(&my_hist->hist_mode) == 0)
return;
- if (latency >= MAX_ENTRY_NUM)
- my_hist->beyond_hist_bound_samples++;
- else
+ latency += my_hist->offset;
+
+ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
+ if (latency < 0)
+ my_hist->below_hist_bound_samples++;
+ else
+ my_hist->above_hist_bound_samples++;
+ } else
my_hist->hist_array[latency]++;
if (latency < my_hist->min_lat)
my_hist->min_lat = latency;
- else if (latency > my_hist->max_lat) {
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- if (latency_type == WAKEUP_LATENCY) {
- struct maxlatproc_data *mp =
- &per_cpu(wakeup_maxlatproc, cpu);
+ if (latency > my_hist->max_lat) {
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ if (latency_type == WAKEUP_LATENCY ||
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
+ latency_type == MISSED_TIMER_OFFSETS) {
strncpy(mp->comm, p->comm, sizeof(mp->comm));
mp->pid = task_pid_nr(p);
mp->prio = p->prio;
@@ -163,10 +212,8 @@ void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
#endif
my_hist->max_lat = latency;
}
-
my_hist->total_samples++;
my_hist->accumulate_lat += latency;
- return;
}
static void *l_start(struct seq_file *m, loff_t *pos)
@@ -179,30 +226,42 @@ static void *l_start(struct seq_file *m, loff_t *pos)
return NULL;
if (index == 0) {
- char avgstr[32];
+ char minstr[32], avgstr[32], maxstr[32];
atomic_dec(&my_hist->hist_mode);
+
if (likely(my_hist->total_samples)) {
unsigned long avg = (unsigned long)
div64_u64(my_hist->accumulate_lat,
my_hist->total_samples);
- sprintf(avgstr, "%lu", avg);
- } else
- strcpy(avgstr, "<undef>");
+ snprintf(minstr, sizeof(minstr), "%ld",
+ (long) my_hist->min_lat - my_hist->offset);
+ snprintf(avgstr, sizeof(avgstr), "%ld",
+ (long) avg - my_hist->offset);
+ snprintf(maxstr, sizeof(maxstr), "%ld",
+ (long) my_hist->max_lat - my_hist->offset);
+ } else {
+ strcpy(minstr, "<undef>");
+ strcpy(avgstr, minstr);
+ strcpy(maxstr, minstr);
+ }
- seq_printf(m, "#Minimum latency: %lu microseconds.\n"
- "#Average latency: %s microseconds.\n"
- "#Maximum latency: %lu microseconds.\n"
+ seq_printf(m, "#Minimum latency: %s microseconds\n"
+ "#Average latency: %s microseconds\n"
+ "#Maximum latency: %s microseconds\n"
"#Total samples: %llu\n"
+ "#There are %llu samples lower than %ld"
+ " microseconds.\n"
"#There are %llu samples greater or equal"
- " than %d microseconds\n"
- "#usecs\t%16s\n"
- , my_hist->min_lat
- , avgstr
- , my_hist->max_lat
- , my_hist->total_samples
- , my_hist->beyond_hist_bound_samples
- , MAX_ENTRY_NUM, "samples");
+ " than %ld microseconds.\n"
+ "#usecs\t%16s\n",
+ minstr, avgstr, maxstr,
+ my_hist->total_samples,
+ my_hist->below_hist_bound_samples,
+ -my_hist->offset,
+ my_hist->above_hist_bound_samples,
+ MAX_ENTRY_NUM - my_hist->offset,
+ "samples");
}
if (index >= MAX_ENTRY_NUM)
return NULL;
@@ -234,7 +293,8 @@ static int l_show(struct seq_file *m, void *p)
int index = *(loff_t *) p;
struct hist_data *my_hist = m->private;
- seq_printf(m, "%5d\t%16llu\n", index, my_hist->hist_array[index]);
+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
+ my_hist->hist_array[index]);
return 0;
}
@@ -269,7 +329,8 @@ static void hist_reset(struct hist_data *hist)
atomic_dec(&hist->hist_mode);
memset(hist->hist_array, 0, sizeof(hist->hist_array));
- hist->beyond_hist_bound_samples = 0ULL;
+ hist->below_hist_bound_samples = 0ULL;
+ hist->above_hist_bound_samples = 0ULL;
hist->min_lat = 0xFFFFFFFFUL;
hist->max_lat = 0UL;
hist->total_samples = 0ULL;
@@ -280,52 +341,61 @@ static void hist_reset(struct hist_data *hist)
static ssize_t
latency_hist_reset(struct file *file, const char __user *a,
- size_t size, loff_t *off)
+ size_t size, loff_t *off)
{
int cpu;
- struct hist_data *hist;
- int latency_type = (int) file->private_data;
+ struct hist_data *hist = NULL;
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ struct maxlatproc_data *mp = NULL;
+#endif
+ off_t latency_type = (off_t) file->private_data;
- switch (latency_type) {
+ for_each_online_cpu(cpu) {
+ switch (latency_type) {
#ifdef CONFIG_PREEMPT_OFF_HIST
- case PREEMPTOFF_LATENCY:
- for_each_online_cpu(cpu) {
+ case PREEMPTOFF_LATENCY:
hist = &per_cpu(preemptoff_hist, cpu);
- hist_reset(hist);
- }
- break;
+ break;
#endif
-
#ifdef CONFIG_INTERRUPT_OFF_HIST
- case IRQSOFF_LATENCY:
- for_each_online_cpu(cpu) {
+ case IRQSOFF_LATENCY:
hist = &per_cpu(irqsoff_hist, cpu);
- hist_reset(hist);
- }
- break;
+ break;
#endif
-
#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- for_each_online_cpu(cpu) {
+ case PREEMPTIRQSOFF_LATENCY:
hist = &per_cpu(preemptirqsoff_hist, cpu);
- hist_reset(hist);
- }
- break;
+ break;
#endif
-
#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- for_each_online_cpu(cpu) {
- struct maxlatproc_data *mp =
- &per_cpu(wakeup_maxlatproc, cpu);
- mp->comm[0] = '\0';
- mp->prio = mp->pid = mp->latency = 0;
+ case WAKEUP_LATENCY:
hist = &per_cpu(wakeup_latency_hist, cpu);
- hist_reset(hist);
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
+ break;
+ case WAKEUP_LATENCY_SHAREDPRIO:
+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
+ break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ hist = &per_cpu(missed_timer_offsets, cpu);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
+ break;
+#endif
+ }
+
+ hist_reset(hist);
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ if (latency_type == WAKEUP_LATENCY ||
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
+ latency_type == MISSED_TIMER_OFFSETS) {
+ mp->comm[0] = '\0';
+ mp->prio = mp->pid = mp->latency = -1;
}
- break;
#endif
}
@@ -334,24 +404,22 @@ latency_hist_reset(struct file *file, const char __user *a,
#ifdef CONFIG_WAKEUP_LATENCY_HIST
static ssize_t
-latency_hist_show_pid(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
+ unsigned long *this_pid = file->private_data;
- r = snprintf(buf, sizeof(buf), "%u\n", wakeup_pid);
- if (r > sizeof(buf))
- r = sizeof(buf);
+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static ssize_t
-latency_hist_pid(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static ssize_t do_pid(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
{
char buf[64];
unsigned long pid;
+ unsigned long *this_pid = file->private_data;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -364,63 +432,44 @@ latency_hist_pid(struct file *filp, const char __user *ubuf,
if (strict_strtoul(buf, 10, &pid))
return(-EINVAL);
- wakeup_pid = pid;
+ *this_pid = pid;
+
return cnt;
}
+#endif
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
static ssize_t
-latency_hist_show_maxlatproc(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
{
char buf[1024];
int r;
- struct maxlatproc_data *mp = (struct maxlatproc_data *)
- filp->private_data;
+ struct maxlatproc_data *mp = file->private_data;
- r = snprintf(buf, sizeof(buf), "%5d %3d %ld %s\n",
- mp->pid, mp->prio, mp->latency, mp->comm);
- if (r > sizeof(buf))
- r = sizeof(buf);
+ r = snprintf(buf, sizeof(buf), "%d %d %ld %s\n",
+ mp->pid, MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->comm);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-#ifdef CONFIG_INTERRUPT_OFF_HIST
-static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
-static DEFINE_PER_CPU(int, hist_irqsoff_counting);
-#endif
-#ifdef CONFIG_PREEMPT_OFF_HIST
-static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
-static DEFINE_PER_CPU(int, hist_preemptoff_counting);
-#endif
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
-static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
#endif
static ssize_t
-latency_hist_show_enable(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
{
char buf[64];
- struct enable_data *ed = (struct enable_data *) filp->private_data;
+ struct enable_data *ed = file->private_data;
int r;
r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
- if (r > sizeof(buf))
- r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
-latency_hist_enable(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
char buf[64];
long enable;
- struct enable_data *ed = (struct enable_data *) filp->private_data;
+ struct enable_data *ed = file->private_data;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -440,6 +489,19 @@ latency_hist_enable(struct file *filp, const char __user *ubuf,
int ret;
switch (ed->latency_type) {
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+ case PREEMPTIRQSOFF_LATENCY:
+ ret = register_trace_preemptirqsoff_hist(
+ probe_preemptirqsoff_hist);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_preemptirqsoff_hist "
+ "to trace_preemptirqsoff_hist\n");
+ return ret;
+ }
+ break;
+#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
case WAKEUP_LATENCY:
ret = register_trace_sched_wakeup(
probe_wakeup_latency_hist_start);
@@ -467,53 +529,94 @@ latency_hist_enable(struct file *filp, const char __user *ubuf,
"to trace_sched_switch\n");
unregister_trace_sched_wakeup(
probe_wakeup_latency_hist_start);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start);
+ return ret;
+ }
+ ret = register_trace_sched_migrate_task(
+ probe_sched_migrate_task);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't assign "
+ "probe_sched_migrate_task "
+ "to trace_sched_migrate_task\n");
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start);
unregister_trace_sched_switch(
probe_wakeup_latency_hist_stop);
return ret;
}
break;
- case PREEMPTIRQSOFF_LATENCY:
- ret = register_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist);
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ ret = register_trace_hrtimer_interrupt(
+ probe_hrtimer_interrupt);
if (ret) {
pr_info("wakeup trace: Couldn't assign "
- "probe_preemptirqsoff_hist "
- "to trace_preemptirqsoff_hist\n");
+ "probe_hrtimer_interrupt "
+ "to trace_hrtimer_interrupt\n");
return ret;
}
break;
+#endif
default:
break;
}
} else {
- int cpu;
-
switch (ed->latency_type) {
- case WAKEUP_LATENCY:
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start);
- unregister_trace_sched_switch(
- probe_wakeup_latency_hist_stop);
- wakeup_task = NULL;
- wakeup_prio = (unsigned)-1;
- break;
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
case PREEMPTIRQSOFF_LATENCY:
- unregister_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist);
- for_each_online_cpu(cpu) {
+ {
+ int cpu;
+
+ unregister_trace_preemptirqsoff_hist(
+ probe_preemptirqsoff_hist);
+ for_each_online_cpu(cpu) {
#ifdef CONFIG_INTERRUPT_OFF_HIST
- per_cpu(hist_irqsoff_counting, cpu) = 0;
+ per_cpu(hist_irqsoff_counting,
+ cpu) = 0;
#endif
#ifdef CONFIG_PREEMPT_OFF_HIST
- per_cpu(hist_preemptoff_counting, cpu) = 0;
+ per_cpu(hist_preemptoff_counting,
+ cpu) = 0;
#endif
#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
+ per_cpu(hist_preemptirqsoff_counting,
+ cpu) = 0;
+#endif
+ }
+ }
+ break;
#endif
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ {
+ int cpu;
+
+ unregister_trace_sched_wakeup(
+ probe_wakeup_latency_hist_start);
+ unregister_trace_sched_wakeup_new(
+ probe_wakeup_latency_hist_start);
+ unregister_trace_sched_switch(
+ probe_wakeup_latency_hist_stop);
+ unregister_trace_sched_migrate_task(
+ probe_sched_migrate_task);
+
+ for_each_online_cpu(cpu) {
+ per_cpu(wakeup_task, cpu) = NULL;
+ per_cpu(wakeup_sharedprio, cpu) = 0;
+ }
}
break;
+#endif
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ case MISSED_TIMER_OFFSETS:
+ unregister_trace_hrtimer_interrupt(
+ probe_hrtimer_interrupt);
+ break;
+#endif
default:
break;
}
@@ -522,29 +625,33 @@ latency_hist_enable(struct file *filp, const char __user *ubuf,
return cnt;
}
-static struct file_operations latency_hist_reset_fops = {
+static const struct file_operations latency_hist_reset_fops = {
.open = tracing_open_generic,
.write = latency_hist_reset,
};
-static struct file_operations latency_hist_pid_fops = {
+static const struct file_operations enable_fops = {
.open = tracing_open_generic,
- .read = latency_hist_show_pid,
- .write = latency_hist_pid,
+ .read = show_enable,
+ .write = do_enable,
};
-static struct file_operations latency_hist_maxlatproc_fops = {
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+static const struct file_operations pid_fops = {
.open = tracing_open_generic,
- .read = latency_hist_show_maxlatproc,
+ .read = show_pid,
+ .write = do_pid,
};
-static struct file_operations latency_hist_enable_fops = {
+static const struct file_operations maxlatproc_fops = {
.open = tracing_open_generic,
- .read = latency_hist_show_enable,
- .write = latency_hist_enable,
+ .read = show_maxlatproc,
};
+#endif
-notrace void probe_preemptirqsoff_hist(int reason, int starthist)
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
+static notrace void probe_preemptirqsoff_hist(int reason, int starthist)
{
int cpu = raw_smp_processor_id();
int time_set = 0;
@@ -643,178 +750,284 @@ notrace void probe_preemptirqsoff_hist(int reason, int starthist)
#endif
}
}
-
#endif
#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static cycle_t wakeup_start;
static DEFINE_RAW_SPINLOCK(wakeup_lock);
+static notrace void probe_sched_migrate_task(struct task_struct *task, int cpu)
+{
+ int old_cpu = task_cpu(task);
+
+ if (cpu != old_cpu) {
+ unsigned long flags;
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
+
+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
+ if (task == cpu_wakeup_task) {
+ put_task_struct(cpu_wakeup_task);
+ per_cpu(wakeup_task, old_cpu) = NULL;
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
+ get_task_struct(cpu_wakeup_task);
+ }
+
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
+ }
+}
-notrace void probe_wakeup_latency_hist_start(struct rq *rq,
+static notrace void probe_wakeup_latency_hist_start(struct rq *rq,
struct task_struct *p, int success)
{
unsigned long flags;
struct task_struct *curr = rq_curr(rq);
+ int cpu = task_cpu(p);
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
+
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
if (wakeup_pid) {
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
+ p->prio == curr->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
if (likely(wakeup_pid != task_pid_nr(p)))
- return;
+ goto out;
} else {
if (likely(!rt_task(p)) ||
- p->prio >= wakeup_prio ||
- p->prio >= curr->prio)
- return;
+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
+ p->prio > curr->prio)
+ goto out;
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
+ p->prio == curr->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
}
- raw_spin_lock_irqsave(&wakeup_lock, flags);
- if (wakeup_task)
- put_task_struct(wakeup_task);
-
- get_task_struct(p);
- wakeup_task = p;
- wakeup_prio = p->prio;
- wakeup_start = ftrace_now(raw_smp_processor_id());
+ if (cpu_wakeup_task)
+ put_task_struct(cpu_wakeup_task);
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
+ get_task_struct(cpu_wakeup_task);
+ cpu_wakeup_task->preempt_timestamp_hist =
+ ftrace_now(raw_smp_processor_id());
+out:
raw_spin_unlock_irqrestore(&wakeup_lock, flags);
}
-notrace void probe_wakeup_latency_hist_stop(struct rq *rq,
+static notrace void probe_wakeup_latency_hist_stop(struct rq *rq,
struct task_struct *prev, struct task_struct *next)
{
unsigned long flags;
- int cpu;
+ int cpu = task_cpu(next);
unsigned long latency;
cycle_t stop;
+ struct task_struct *cpu_wakeup_task;
- if (next != wakeup_task)
- return;
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
- cpu = raw_smp_processor_id();
- stop = ftrace_now(cpu);
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
- raw_spin_lock_irqsave(&wakeup_lock, flags);
- if (next != wakeup_task)
+ if (cpu_wakeup_task == NULL)
goto out;
- latency = nsecs_to_usecs(stop - wakeup_start);
- latency_hist(WAKEUP_LATENCY, cpu, latency, next);
+ /* Already running? */
+ if (unlikely(current == cpu_wakeup_task))
+ goto out_reset;
- put_task_struct(wakeup_task);
- wakeup_task = NULL;
- wakeup_prio = (unsigned)-1;
+ if (next != cpu_wakeup_task) {
+ if (next->prio < cpu_wakeup_task->prio)
+ goto out_reset;
+
+ if (next->prio == cpu_wakeup_task->prio)
+ per_cpu(wakeup_sharedprio, cpu) = 1;
+
+ goto out;
+ }
+
+ /*
+ * The task we are waiting for is about to be switched to.
+ * Calculate latency and store it in histogram.
+ */
+ stop = ftrace_now(raw_smp_processor_id());
+
+ latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist);
+
+ if (per_cpu(wakeup_sharedprio, cpu)) {
+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, next);
+ per_cpu(wakeup_sharedprio, cpu) = 0;
+ } else
+ latency_hist(WAKEUP_LATENCY, cpu, latency, next);
+
+out_reset:
+ put_task_struct(cpu_wakeup_task);
+ per_cpu(wakeup_task, cpu) = NULL;
out:
raw_spin_unlock_irqrestore(&wakeup_lock, flags);
}
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+static notrace void probe_hrtimer_interrupt(int cpu, long long latency_ns,
+ struct task_struct *curr, struct task_struct *task)
+{
+ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
+ task->prio < curr->prio) {
+ unsigned long latency;
+
+ if (missed_timer_offsets_pid) {
+ if (likely(missed_timer_offsets_pid !=
+ task_pid_nr(task)))
+ return;
+ }
+ latency = (unsigned long) div_s64(-latency_ns, 1000);
+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, task);
+ }
+}
#endif
static __init int latency_hist_init(void)
{
struct dentry *latency_hist_root = NULL;
struct dentry *dentry;
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ struct dentry *dentry_sharedprio;
+#endif
struct dentry *entry;
- struct dentry *latency_hist_enable_root;
- int i = 0, len = 0;
+ struct dentry *enable_root;
+ int i = 0;
struct hist_data *my_hist;
char name[64];
char *cpufmt = "CPU%d";
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
+ char *cpufmt_maxlatproc = "max_latency-CPU%d";
+ struct maxlatproc_data *mp = NULL;
+#endif
dentry = tracing_init_dentry();
-
- latency_hist_root =
- debugfs_create_dir(latency_hist_dir_root, dentry);
-
- latency_hist_enable_root =
- debugfs_create_dir("enable", latency_hist_root);
+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
+ enable_root = debugfs_create_dir("enable", latency_hist_root);
#ifdef CONFIG_INTERRUPT_OFF_HIST
dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
for_each_possible_cpu(i) {
- len = sprintf(name, cpufmt, i);
- name[len] = '\0';
+ sprintf(name, cpufmt, i);
entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(irqsoff_hist, i),
- &latency_hist_fops);
+ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(irqsoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
entry = debugfs_create_file("reset", 0644, dentry,
- (void *)IRQSOFF_LATENCY,
- &latency_hist_reset_fops);
+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
#endif
#ifdef CONFIG_PREEMPT_OFF_HIST
dentry = debugfs_create_dir(preemptoff_hist_dir,
- latency_hist_root);
+ latency_hist_root);
for_each_possible_cpu(i) {
- len = sprintf(name, cpufmt, i);
- name[len] = '\0';
+ sprintf(name, cpufmt, i);
entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptoff_hist, i),
- &latency_hist_fops);
+ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(preemptoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTOFF_LATENCY,
- &latency_hist_reset_fops);
+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
#endif
#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
- latency_hist_root);
+ latency_hist_root);
for_each_possible_cpu(i) {
- len = sprintf(name, cpufmt, i);
- name[len] = '\0';
+ sprintf(name, cpufmt, i);
entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptirqsoff_hist, i),
- &latency_hist_fops);
+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(preemptirqsoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTIRQSOFF_LATENCY,
- &latency_hist_reset_fops);
+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
#endif
#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
entry = debugfs_create_file("preemptirqsoff", 0644,
- latency_hist_enable_root,
- (void *)&preemptirqsoff_enabled_data,
- &latency_hist_enable_fops);
+ enable_root, (void *)&preemptirqsoff_enabled_data,
+ &enable_fops);
#endif
#ifdef CONFIG_WAKEUP_LATENCY_HIST
dentry = debugfs_create_dir(wakeup_latency_hist_dir,
- latency_hist_root);
+ latency_hist_root);
+ dentry_sharedprio = debugfs_create_dir(
+ wakeup_latency_hist_dir_sharedprio, dentry);
for_each_possible_cpu(i) {
- len = sprintf(name, cpufmt, i);
- name[len] = '\0';
+ sprintf(name, cpufmt, i);
+
entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(wakeup_latency_hist, i),
- &latency_hist_fops);
+ &per_cpu(wakeup_latency_hist, i),
+ &latency_hist_fops);
my_hist = &per_cpu(wakeup_latency_hist, i);
atomic_set(&my_hist->hist_mode, 1);
my_hist->min_lat = 0xFFFFFFFFUL;
- len = sprintf(name, "max_latency-CPU%d", i);
- name[len] = '\0';
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(wakeup_maxlatproc, i),
- &latency_hist_maxlatproc_fops);
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
+ &per_cpu(wakeup_latency_hist_sharedprio, i),
+ &latency_hist_fops);
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+
+ mp = &per_cpu(wakeup_maxlatproc, i);
+ entry = debugfs_create_file(name, 0444, dentry, mp,
+ &maxlatproc_fops);
+ mp->prio = mp->pid = mp->latency = -1;
+
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
+ &maxlatproc_fops);
+ mp->prio = mp->pid = mp->latency = -1;
}
entry = debugfs_create_file("pid", 0644, dentry,
- (void *)&wakeup_pid,
- &latency_hist_pid_fops);
+ (void *)&wakeup_pid, &pid_fops);
entry = debugfs_create_file("reset", 0644, dentry,
- (void *)WAKEUP_LATENCY,
- &latency_hist_reset_fops);
+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
entry = debugfs_create_file("wakeup", 0644,
- latency_hist_enable_root,
- (void *)&wakeup_latency_enabled_data,
- &latency_hist_enable_fops);
+ enable_root, (void *)&wakeup_latency_enabled_data,
+ &enable_fops);
+#endif
+
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ dentry = debugfs_create_dir(missed_timer_offsets_dir,
+ latency_hist_root);
+ for_each_possible_cpu(i) {
+ sprintf(name, cpufmt, i);
+ entry = debugfs_create_file(name, 0444, dentry,
+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
+ my_hist = &per_cpu(missed_timer_offsets, i);
+ atomic_set(&my_hist->hist_mode, 1);
+ my_hist->min_lat = 0xFFFFFFFFUL;
+
+ sprintf(name, cpufmt_maxlatproc, i);
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
+ entry = debugfs_create_file(name, 0444, dentry, mp,
+ &maxlatproc_fops);
+ mp->prio = mp->pid = mp->latency = -1;
+ }
+ entry = debugfs_create_file("pid", 0644, dentry,
+ (void *)&missed_timer_offsets_pid, &pid_fops);
+ entry = debugfs_create_file("reset", 0644, dentry,
+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
+ entry = debugfs_create_file("missed_timer_offsets", 0644,
+ enable_root, (void *)&missed_timer_offsets_enabled_data,
+ &enable_fops);
#endif
return 0;
}