summaryrefslogtreecommitdiff
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-03-19 11:10:43 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-06-10 13:39:42 -0700
commitd34ea3221a0f34ed42eadabf054604bbcc7ecd27 (patch)
tree8a638346b2da58558abccf92f394372d098c4910 /kernel/rcutree.c
parent398ebe6000c16135d12ce2ff64318f306ffb20b0 (diff)
downloadlwn-d34ea3221a0f34ed42eadabf054604bbcc7ecd27.tar.gz
lwn-d34ea3221a0f34ed42eadabf054604bbcc7ecd27.zip
rcu: Rename note_new_gpnum() to note_gp_changes()
Because note_new_gpnum() now also checks for the ends of old grace periods, this commit changes its name to note_gp_changes(). Later commits will merge rcu_process_gp_end() into note_gp_changes(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 54aba759b609..7eb2bc95300a 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1307,7 +1307,7 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
* ->lock of the leaf rcu_node structure corresponding to the current CPU,
* and must have irqs disabled.
*/
-static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
+static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
{
/* Handle the ends of any preceding grace periods first. */
__rcu_process_gp_end(rsp, rnp, rdp);
@@ -1326,19 +1326,20 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
}
}
-static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
+static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_node *rnp;
local_irq_save(flags);
rnp = rdp->mynode;
- if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
+ if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
+ rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
local_irq_restore(flags);
return;
}
- __note_new_gpnum(rsp, rnp, rdp);
+ __note_gp_changes(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -1377,7 +1378,7 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
if (rdp->gpnum != rsp->gpnum) {
- note_new_gpnum(rsp, rdp);
+ note_gp_changes(rsp, rdp);
ret = 1;
}
local_irq_restore(flags);
@@ -1396,7 +1397,7 @@ rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
__rcu_process_gp_end(rsp, rnp, rdp);
/* Set state so that this CPU will detect the next quiescent state. */
- __note_new_gpnum(rsp, rnp, rdp);
+ __note_gp_changes(rsp, rnp, rdp);
}
/*