diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-04-03 12:10:28 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-04-27 11:03:53 -0700 |
commit | e5a971d76d701dbff9e5dbaa84dc9e8c3081a867 (patch) | |
tree | 90c379f06273b82cee257aab6ac20848e3a45e62 | |
parent | 25246fc83155b254534ce579fb713828fb5e621a (diff) | |
download | lwn-e5a971d76d701dbff9e5dbaa84dc9e8c3081a867.tar.gz lwn-e5a971d76d701dbff9e5dbaa84dc9e8c3081a867.zip |
ftrace: Use synchronize_rcu_tasks_rude() instead of ftrace_sync()
This commit replaces the schedule_on_each_cpu(ftrace_sync) instances
with synchronize_rcu_tasks_rude().
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
[ paulmck: Make Kconfig adjustments noted by kbuild test robot. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 17 |
2 files changed, 4 insertions, 14 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 402eef84c859..ae69010d521a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -158,6 +158,7 @@ config FUNCTION_TRACER select CONTEXT_SWITCH_TRACER select GLOB select TASKS_RCU if PREEMPTION + select TASKS_RUDE_RCU help Enable the kernel to trace every kernel function. This is done by using a compiler feature to insert a small, 5-byte No-Operation diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 041694a1eb74..771eace959f3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -160,17 +160,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, op->saved_func(ip, parent_ip, op, regs); } -static void ftrace_sync(struct work_struct *work) -{ - /* - * This function is just a stub to implement a hard force - * of synchronize_rcu(). This requires synchronizing - * tasks even in userspace and idle. - * - * Yes, function tracing is rude. - */ -} - static void ftrace_sync_ipi(void *data) { /* Probably not needed, but do it anyway */ @@ -256,7 +245,7 @@ static void update_ftrace_function(void) * Make sure all CPUs see this. Yes this is slow, but static * tracing is slow and nasty to have enabled. */ - schedule_on_each_cpu(ftrace_sync); + synchronize_rcu_tasks_rude(); /* Now all cpus are using the list ops. */ function_trace_op = set_function_trace_op; /* Make sure the function_trace_op is visible on all CPUs */ @@ -2932,7 +2921,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) * infrastructure to do the synchronization, thus we must do it * ourselves. */ - schedule_on_each_cpu(ftrace_sync); + synchronize_rcu_tasks_rude(); /* * When the kernel is preeptive, tasks can be preempted @@ -5887,7 +5876,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) * infrastructure to do the synchronization, thus we must do it * ourselves. */ - schedule_on_each_cpu(ftrace_sync); + synchronize_rcu_tasks_rude(); free_ftrace_hash(old_hash); } |