summaryrefslogtreecommitdiff
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h56
1 files changed, 31 insertions, 25 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ea6f9d4a20e9..16253db38d73 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -154,9 +154,11 @@
*
* field = (typeof(field))entry;
*
- * p = get_cpu_var(ftrace_event_seq);
+ * p = &get_cpu_var(ftrace_event_seq);
* trace_seq_init(p);
- * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * ret = trace_seq_printf(s, "%s: ", <call>);
+ * if (ret)
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu();
* if (!ret)
* return TRACE_TYPE_PARTIAL_LINE;
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
*
* static void ftrace_raw_event_<call>(proto)
* {
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
+ * int __data_size;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
* event = trace_current_buffer_lock_reserve(&buffer,
* event_<call>.id,
- * sizeof(struct ftrace_raw_<call>),
+ * sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
- * <assign>; <-- Here we assign the entries by the __field and
- * __array macros.
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
*
- * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ * if (!filter_current_check_discard(buffer, event_call, entry, event))
+ * trace_current_buffer_unlock_commit(buffer,
+ * event, irq_flags, pc);
* }
*
* static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
* {
- * int ret;
- *
- * ret = register_trace_<call>(ftrace_raw_event_<call>);
- * if (!ret)
- * pr_info("event trace: Could not activate trace point "
- * "probe to <call>");
- * return ret;
+ * return register_trace_<call>(ftrace_raw_event_<call>);
* }
*
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
+ * static const char print_fmt_<call>[] = <TP_printk>;
+ *
* static struct ftrace_event_call __used
* __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = {
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .raw_init = trace_event_raw_init,
* .regfunc = ftrace_reg_event_<call>,
* .unregfunc = ftrace_unreg_event_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .define_fields = ftrace_define_fields_<call>,
* }
*
*/
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
return; \
entry = ring_buffer_event_data(event); \
\
- \
tstruct \
\
{ assign; } \
@@ -758,13 +763,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
perf_trace_templ_##call(struct ftrace_event_call *event_call, \
- proto) \
+ struct pt_regs *__regs, proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
- struct pt_regs *__regs; \
int __entry_size; \
int __data_size; \
int rctx; \
@@ -785,20 +789,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
\
{ assign; } \
\
- __regs = &__get_cpu_var(perf_trace_regs); \
- perf_fetch_caller_regs(__regs, 2); \
- \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, irq_flags, __regs); \
}
#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static notrace void perf_trace_##call(proto) \
-{ \
- struct ftrace_event_call *event_call = &event_##call; \
- \
- perf_trace_templ_##template(event_call, args); \
+#define DEFINE_EVENT(template, call, proto, args) \
+static notrace void perf_trace_##call(proto) \
+{ \
+ struct ftrace_event_call *event_call = &event_##call; \
+ struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
+ \
+ perf_fetch_caller_regs(__regs, 1); \
+ \
+ perf_trace_templ_##template(event_call, __regs, args); \
+ \
+ put_cpu_var(perf_trace_regs); \
}
#undef DEFINE_EVENT_PRINT