summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2018-10-02 16:05:46 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2018-12-01 12:45:37 -0800
commit903ee83d91776bc72d856147743687d4b6c99286 (patch)
treedf3216d4cc5154d397a4acbcbe6d04ddb4ae32b4 /kernel/rcu/tree_plugin.h
parente0aff97355575ac6a28a48a4217533a3953095c5 (diff)
downloadlwn-903ee83d91776bc72d856147743687d4b6c99286.tar.gz
lwn-903ee83d91776bc72d856147743687d4b6c99286.zip
rcu: Account for nocb-CPU callback counts in RCU CPU stall warnings
The RCU CPU stall warnings print an estimate of the total number of RCU callbacks queued in the system, but this estimate leaves out the callbacks queued for nocbs CPUs. This commit therefore introduces rcu_get_n_cbs_cpu(), which gives an accurate callback estimate for both nocbs and normal CPUs, and uses this new function as needed. This commit also introduces a rcu_get_n_cbs_nocb_cpu() helper function that returns the number of callbacks for nocbs CPUs or zero otherwise, and also uses this function in place of direct access to ->nocb_q_count while in the area (fewer characters, you see). Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h24
1 files changed, 19 insertions, 5 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 25fe26c4e9c1..1b3dd2fc0cd6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2011,7 +2011,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* (if a callback is in fact needed). This is associated with an
* atomic_inc() in the caller.
*/
- ret = atomic_long_read(&rdp->nocb_q_count);
+ ret = rcu_get_n_cbs_nocb_cpu(rdp);
#ifdef CONFIG_PROVE_RCU
rhp = READ_ONCE(rdp->nocb_head);
@@ -2066,7 +2066,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
TPS("WakeNotPoll"));
return;
}
- len = atomic_long_read(&rdp->nocb_q_count);
+ len = rcu_get_n_cbs_nocb_cpu(rdp);
if (old_rhpp == &rdp->nocb_head) {
if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
@@ -2115,11 +2115,11 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_kfree_callback(rcu_state.name, rhp,
(unsigned long)rhp->func,
-atomic_long_read(&rdp->nocb_q_count_lazy),
- -atomic_long_read(&rdp->nocb_q_count));
+ -rcu_get_n_cbs_nocb_cpu(rdp));
else
trace_rcu_callback(rcu_state.name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy),
- -atomic_long_read(&rdp->nocb_q_count));
+ -rcu_get_n_cbs_nocb_cpu(rdp));
/*
* If called from an extended quiescent state with interrupts
@@ -2343,7 +2343,7 @@ static int rcu_nocb_kthread(void *arg)
/* Each pass through the following loop invokes a callback. */
trace_rcu_batch_start(rcu_state.name,
atomic_long_read(&rdp->nocb_q_count_lazy),
- atomic_long_read(&rdp->nocb_q_count), -1);
+ rcu_get_n_cbs_nocb_cpu(rdp), -1);
c = cl = 0;
while (list) {
next = list->next;
@@ -2614,6 +2614,15 @@ void rcu_bind_current_to_nocb(void)
}
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+/*
+ * Return the number of RCU callbacks still queued from the specified
+ * CPU, which must be a nocbs CPU.
+ */
+static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
+{
+ return atomic_long_read(&rdp->nocb_q_count);
+}
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool rcu_nocb_cpu_needs_barrier(int cpu)
@@ -2674,6 +2683,11 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return false;
}
+static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*