diff options
author | Mike Galbraith <efault@gmx.de> | 2018-08-15 09:05:29 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-30 16:03:54 -0700 |
commit | 894d45bbf7e7569ec2aa845155801fd503b5f1bf (patch) | |
tree | ee371190b7bb7b90812b24360d839eb71681d470 /kernel/rcu/tree.c | |
parent | 8d8a9d0e7eda9feeee4af7be31932e14b512d3ad (diff) | |
download | lwn-894d45bbf7e7569ec2aa845155801fd503b5f1bf.tar.gz lwn-894d45bbf7e7569ec2aa845155801fd503b5f1bf.zip |
rcu: Convert rcu_state.ofl_lock to raw_spinlock_t
1e64b15a4b10 ("rcu: Fix grace-period hangs due to race with CPU offline")
added spinlock_t ofl_lock to the rcu_state structure, then takes it with
preemption disabled during CPU offline, which gives the -rt patchset's
sleeping spinlock heartburn.
This commit therefore converts ->ofl_lock to raw_spinlock_t.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 85c2c2dc4c4a..58aa6c2fd7fa 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -97,7 +97,7 @@ struct rcu_state rcu_state = { .abbr = RCU_ABBR, .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), - .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), + .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), }; /* Dump rcu_node combining tree at boot to verify correct setup. */ @@ -1776,13 +1776,13 @@ static bool rcu_gp_init(void) */ rcu_state.gp_state = RCU_GP_ONOFF; rcu_for_each_leaf_node(rnp) { - spin_lock(&rcu_state.ofl_lock); + raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irq_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext && !rnp->wait_blkd_tasks) { /* Nothing to do on this leaf rcu_node structure. */ raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); continue; } @@ -1818,7 +1818,7 @@ static bool rcu_gp_init(void) } raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); } rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ @@ -3377,7 +3377,7 @@ void rcu_report_dead(unsigned int cpu) /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; - spin_lock(&rcu_state.ofl_lock); + raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); @@ -3388,7 +3388,7 @@ void rcu_report_dead(unsigned int cpu) } rnp->qsmaskinitnext &= ~mask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); per_cpu(rcu_cpu_started, cpu) = 0; } |