summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2022-12-15 09:26:09 -0800
committerPaul E. McKenney <paulmck@kernel.org>2022-12-17 16:12:20 -0800
commit3f6c3d29df58f391cf487b50a24ebd24045ba569 (patch)
tree4c563b754020aa226a98dead459291358ce52609 /kernel/rcu/tree.c
parent31d8aaa87fcef1be5932f3813ea369e21bd3b11d (diff)
downloadlwn-3f6c3d29df58f391cf487b50a24ebd24045ba569.tar.gz
lwn-3f6c3d29df58f391cf487b50a24ebd24045ba569.zip
rcu: Don't assert interrupts enabled too early in boot
The rcu_poll_gp_seq_end() and rcu_poll_gp_seq_end_unlocked() both check that interrupts are enabled, as they normally should be when waiting for an RCU grace period. Except that it is legal to wait for grace periods during early boot, before interrupts have been enabled for the first time, and polling for grace periods is required to work during this time. This can result in false-positive lockdep splats in the presence of boot-time-initiated tracing. This commit therefore conditions those interrupts-enabled checks on rcu_scheduler_active having advanced past RCU_SCHEDULER_INACTIVE, by which time interrupts have been enabled. Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 93416afebd59..83c6baafd517 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1368,7 +1368,7 @@ static void rcu_poll_gp_seq_start(unsigned long *snap)
{
struct rcu_node *rnp = rcu_get_root();
- if (rcu_init_invoked())
+ if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp);
// If RCU was idle, note beginning of GP.
@@ -1384,7 +1384,7 @@ static void rcu_poll_gp_seq_end(unsigned long *snap)
{
struct rcu_node *rnp = rcu_get_root();
- if (rcu_init_invoked())
+ if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp);
// If the previously noted GP is still in effect, record the
@@ -1407,7 +1407,8 @@ static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) {
- lockdep_assert_irqs_enabled();
+ if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
+ lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_start(snap);
@@ -1423,7 +1424,8 @@ static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) {
- lockdep_assert_irqs_enabled();
+ if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
+ lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_end(snap);