summaryrefslogtreecommitdiff
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-07-11 20:26:31 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2012-09-26 15:46:55 +0200
commitc5d900bf676b1e2a61c44483932c8088651bbb4e (patch)
tree881ee6420ba291d68d451986d5ed9832ee95661f /kernel/rcutree.c
parent2b1d5024e17be459aa6385763ca3faa8f01c52d9 (diff)
downloadlwn-c5d900bf676b1e2a61c44483932c8088651bbb4e.tar.gz
lwn-c5d900bf676b1e2a61c44483932c8088651bbb4e.zip
rcu: Allow rcu_user_enter()/exit() to nest
Allow calls to rcu_user_enter() even if we are already in userspace (as seen by RCU) and allow calls to rcu_user_exit() even if we are already in the kernel. This makes the APIs more flexible to be called from architectures. Exception entries for example won't need to know if they come from userspace before calling rcu_user_exit(). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Alessio Igor Bogani <abogani@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Avi Kivity <avi@redhat.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Kevin Hilman <khilman@ti.com> Cc: Max Krasnyansky <maxk@qualcomm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 79fa2db1595b..d62c04482228 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -366,11 +366,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
*/
static void rcu_eqs_enter(bool user)
{
- unsigned long flags;
long long oldval;
struct rcu_dynticks *rdtp;
- local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
@@ -379,7 +377,6 @@ static void rcu_eqs_enter(bool user)
else
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
rcu_eqs_enter_common(rdtp, oldval, user);
- local_irq_restore(flags);
}
/**
@@ -396,7 +393,11 @@ static void rcu_eqs_enter(bool user)
*/
void rcu_idle_enter(void)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
rcu_eqs_enter(0);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -411,6 +412,9 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
*/
void rcu_user_enter(void)
{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
@@ -422,7 +426,15 @@ void rcu_user_enter(void)
if (in_interrupt())
return;
- rcu_eqs_enter(1);
+ WARN_ON_ONCE(!current->mm);
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ if (!rdtp->in_user) {
+ rdtp->in_user = true;
+ rcu_eqs_enter(1);
+ }
+ local_irq_restore(flags);
}
/**
@@ -516,11 +528,9 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
*/
static void rcu_eqs_exit(bool user)
{
- unsigned long flags;
struct rcu_dynticks *rdtp;
long long oldval;
- local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(oldval < 0);
@@ -529,7 +539,6 @@ static void rcu_eqs_exit(bool user)
else
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_eqs_exit_common(rdtp, oldval, user);
- local_irq_restore(flags);
}
/**
@@ -545,7 +554,11 @@ static void rcu_eqs_exit(bool user)
*/
void rcu_idle_exit(void)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
rcu_eqs_exit(0);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -558,6 +571,9 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
*/
void rcu_user_exit(void)
{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
@@ -569,7 +585,13 @@ void rcu_user_exit(void)
if (in_interrupt())
return;
- rcu_eqs_exit(1);
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ if (rdtp->in_user) {
+ rdtp->in_user = false;
+ rcu_eqs_exit(1);
+ }
+ local_irq_restore(flags);
}
/**
@@ -2586,6 +2608,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
+#ifdef CONFIG_RCU_USER_QS
+ WARN_ON_ONCE(rdp->dynticks->in_user);
+#endif
rdp->cpu = cpu;
rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);