diff options
author | Ildar Ismagilov <devix84@gmail.com> | 2018-01-31 22:42:21 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-02-20 16:21:12 -0800 |
commit | 8ddbd8832d25463a996a1075c35f312fb43c86b1 (patch) | |
tree | 3b4c1380ccb51276bc068f7865f2072542d9db50 /kernel/rcu/srcutree.c | |
parent | a35d13ec3686612b17771bf1abcad75d2ebd42ef (diff) | |
download | lwn-8ddbd8832d25463a996a1075c35f312fb43c86b1.tar.gz lwn-8ddbd8832d25463a996a1075c35f312fb43c86b1.zip |
srcu: Reduce scans of srcu_data in counter wrap check
Currently, given a multi-level srcu_node tree, SRCU can scan the full
set of srcu_data structures at each level when cleaning up after a grace
period. This, though harmless otherwise, represents pointless overhead.
This commit therefore eliminates this overhead by scanning the srcu_data
structures only when traversing the leaf srcu_node structures.
Signed-off-by: Ildar Ismagilov <devix84@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/srcutree.c')
-rw-r--r-- | kernel/rcu/srcutree.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index bb9607baad9b..d582a2352cdb 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -527,6 +527,7 @@ static void srcu_gp_end(struct srcu_struct *sp) { unsigned long cbdelay; bool cbs; + bool last_lvl; int cpu; unsigned long flags; unsigned long gpseq; @@ -559,7 +560,8 @@ static void srcu_gp_end(struct srcu_struct *sp) rcu_for_each_node_breadth_first(sp, snp) { spin_lock_irq_rcu_node(snp); cbs = false; - if (snp >= sp->level[rcu_num_lvls - 1]) + last_lvl = snp >= sp->level[rcu_num_lvls - 1]; + if (last_lvl) cbs = snp->srcu_have_cbs[idx] == gpseq; snp->srcu_have_cbs[idx] = gpseq; rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); @@ -572,7 +574,7 @@ static void srcu_gp_end(struct srcu_struct *sp) srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); /* Occasionally prevent srcu_data counter wrap. */ - if (!(gpseq & counter_wrap_check)) + if (!(gpseq & counter_wrap_check) && last_lvl) for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { sdp = per_cpu_ptr(sp->sda, cpu); spin_lock_irqsave_rcu_node(sdp, flags); |