summaryrefslogtreecommitdiff
path: root/kernel/context_tracking.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-11-27 19:33:25 +0100
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-11-30 11:40:07 -0800
commit91d1aa43d30505b0b825db8898ffc80a8eca96c7 (patch)
tree911636f846d800c8a44efd540842dc726ec7c191 /kernel/context_tracking.c
parent4e79752c25ec221ac1e28f8875b539ed7631a0db (diff)
downloadlwn-91d1aa43d30505b0b825db8898ffc80a8eca96c7.tar.gz
lwn-91d1aa43d30505b0b825db8898ffc80a8eca96c7.zip
context_tracking: New context tracking susbsystem
Create a new subsystem that probes on kernel boundaries to keep track of the transitions between level contexts with two basic initial contexts: user or kernel. This is an abstraction of some RCU code that use such tracking to implement its userspace extended quiescent state. We need to pull this up from RCU into this new level of indirection because this tracking is also going to be used to implement an "on demand" generic virtual cputime accounting. A necessary step to shutdown the tick while still accounting the cputime. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> [ paulmck: fix whitespace error and email address. ] Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/context_tracking.c')
-rw-r--r--kernel/context_tracking.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
new file mode 100644
index 000000000000..e0e07fd55508
--- /dev/null
+++ b/kernel/context_tracking.c
@@ -0,0 +1,83 @@
+#include <linux/context_tracking.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+struct context_tracking {
+ /*
+ * When active is false, hooks are not set to
+ * minimize overhead: TIF flags are cleared
+ * and calls to user_enter/exit are ignored. This
+ * may be further optimized using static keys.
+ */
+ bool active;
+ enum {
+ IN_KERNEL = 0,
+ IN_USER,
+ } state;
+};
+
+static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+ .active = true,
+#endif
+};
+
+void user_enter(void)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ WARN_ON_ONCE(!current->mm);
+
+ local_irq_save(flags);
+ if (__this_cpu_read(context_tracking.active) &&
+ __this_cpu_read(context_tracking.state) != IN_USER) {
+ __this_cpu_write(context_tracking.state, IN_USER);
+ rcu_user_enter();
+ }
+ local_irq_restore(flags);
+}
+
+void user_exit(void)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ if (__this_cpu_read(context_tracking.state) == IN_USER) {
+ __this_cpu_write(context_tracking.state, IN_KERNEL);
+ rcu_user_exit();
+ }
+ local_irq_restore(flags);
+}
+
+void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (__this_cpu_read(context_tracking.active)) {
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
+ set_tsk_thread_flag(next, TIF_NOHZ);
+ }
+}