summaryrefslogtreecommitdiff
path: root/include/linux/ftrace.h
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-11-07 09:36:25 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-02-20 12:13:12 -0500
commitb7e00a6c53e9134d5cf7631582acaf027a5ded26 (patch)
treeeb30ea8ede4a31e1b848d0b2c92eeef9a822ebe5 /include/linux/ftrace.h
parentf1b21c9a40704dfdf7b8423c7d2969ea31c9857d (diff)
downloadlwn-b7e00a6c53e9134d5cf7631582acaf027a5ded26.tar.gz
lwn-b7e00a6c53e9134d5cf7631582acaf027a5ded26.zip
ftrace: Add private data to ftrace_ops
Passing data to the function callback was originally done by adding the ftrace_ops in another structure, and using the container_of() to get the field. But this adds a bit more complexity than it is worth, and adding a simple .private field to ftrace_ops makes things a lot easier. But be warned, the .private data should not be freed once it is used unless the ftrace_ops itself has gone through the necessary freeing routines. A simple synchronize_sched() is not enough as functions can be traced that are called outside the view of RCU and all its concoctions. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/linux/ftrace.h')
-rw-r--r--include/linux/ftrace.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f4233b195dab..ef1607ed7044 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -105,11 +105,23 @@ enum {
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
};
+/*
+ * Note, ftrace_ops can be referenced outside of RCU protection.
+ * (Although, for perf, the control ops prevent that). If ftrace_ops is
+ * allocated and not part of kernel core data, the unregistering of it will
+ * perform a scheduling on all CPUs to make sure that there are no more users.
+ * Depending on the load of the system that may take a bit of time.
+ *
+ * Any private data added must also take care not to be freed and if private
+ * data is added to a ftrace_ops that is in core code, the user of the
+ * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
+ */
struct ftrace_ops {
ftrace_func_t func;
struct ftrace_ops *next;
unsigned long flags;
int __percpu *disabled;
+ void *private;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;