summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2022-08-05 15:42:25 -0700
committerPaul E. McKenney <paulmck@kernel.org>2022-08-31 05:09:22 -0700
commitd761de8a7dcef8e8e9e20a543f85a2c079ae3d0d (patch)
treee2016c6787384500b8f3520c78782dbc3d5b6230 /kernel/rcu/tree.c
parentb3cdd0a79c875d5e9cac9f6555485031ce5bea81 (diff)
downloadlwn-d761de8a7dcef8e8e9e20a543f85a2c079ae3d0d.tar.gz
lwn-d761de8a7dcef8e8e9e20a543f85a2c079ae3d0d.zip
rcu: Make synchronize_rcu() fastpath update only boot-CPU counters
Large systems can have hundreds of rcu_node structures, and updating counters in each of them might slow down booting. This commit therefore updates only the counters in those rcu_node structures corresponding to the boot CPU, up to and including the root rcu_node structure. The counters for the remaining rcu_node structures are updated by the rcu_scheduler_starting() function, which executes just before the first non-boot kthread is spawned. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b9e8ed00536d..ef15bae3c7c7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3504,11 +3504,14 @@ void synchronize_rcu(void)
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
- // Update normal grace-period counters to record grace period.
+ // Update the normal grace-period counters to record
+ // this grace period, but only those used by the boot CPU.
+ // The rcu_scheduler_starting() will take care of the rest of
+ // these counters.
local_irq_save(flags);
WARN_ON_ONCE(num_online_cpus() > 1);
rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
- rcu_for_each_node_breadth_first(rnp)
+ for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
local_irq_restore(flags);
}
@@ -4456,9 +4459,20 @@ early_initcall(rcu_spawn_gp_kthread);
*/
void rcu_scheduler_starting(void)
{
+ unsigned long flags;
+ struct rcu_node *rnp;
+
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_test_sync_prims();
+
+ // Fix up the ->gp_seq counters.
+ local_irq_save(flags);
+ rcu_for_each_node_breadth_first(rnp)
+ rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
+ local_irq_restore(flags);
+
+ // Switch out of early boot mode.
rcu_scheduler_active = RCU_SCHEDULER_INIT;
rcu_test_sync_prims();
}