summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-02-22 20:07:09 -0800
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 11:01:16 -0700
commit1fca4d12f46371a34bf90af87f49548dd026c3ca (patch)
tree311125bf4cec30f94fb44dc3e00ffa9bda81432a /kernel/rcu/tree.c
parent53965dbe5396d2945c7cf53c77c7b5532b08791c (diff)
downloadlwn-1fca4d12f46371a34bf90af87f49548dd026c3ca.tar.gz
lwn-1fca4d12f46371a34bf90af87f49548dd026c3ca.zip
rcu: Expedite first two FQS scans under callback-overload conditions
Even if some CPUs have excessive numbers of callbacks, RCU's grace-period kthread will still wait normally between successive force-quiescent-state scans. The first two are the most important, as they are the ones that enlist aid from the scheduler when overloaded. This commit therefore omits the wait before the first and the second force-quiescent-state scan under callback-overload conditions. This approach was inspired by a discussion with Jeff Roberson. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c88240a685ec..0132bc6152dd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1624,12 +1624,16 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
{
struct rcu_node *rnp = rcu_get_root();
- /* Someone like call_rcu() requested a force-quiescent-state scan. */
+ // If under overload conditions, force an immediate FQS scan.
+ if (*gfp & RCU_GP_FLAG_OVLD)
+ return true;
+
+ // Someone like call_rcu() requested a force-quiescent-state scan.
*gfp = READ_ONCE(rcu_state.gp_flags);
if (*gfp & RCU_GP_FLAG_FQS)
return true;
- /* The current grace period has completed. */
+ // The current grace period has completed.
if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
return true;
@@ -1667,13 +1671,15 @@ static void rcu_gp_fqs(bool first_time)
static void rcu_gp_fqs_loop(void)
{
bool first_gp_fqs;
- int gf;
+ int gf = 0;
unsigned long j;
int ret;
struct rcu_node *rnp = rcu_get_root();
first_gp_fqs = true;
j = READ_ONCE(jiffies_till_first_fqs);
+ if (rcu_state.cbovld)
+ gf = RCU_GP_FLAG_OVLD;
ret = 0;
for (;;) {
if (!ret) {
@@ -1698,7 +1704,11 @@ static void rcu_gp_fqs_loop(void)
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
TPS("fqsstart"));
rcu_gp_fqs(first_gp_fqs);
- first_gp_fqs = false;
+ gf = 0;
+ if (first_gp_fqs) {
+ first_gp_fqs = false;
+ gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
+ }
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
TPS("fqsend"));
cond_resched_tasks_rcu_qs();
@@ -1718,6 +1728,7 @@ static void rcu_gp_fqs_loop(void)
j = 1;
else
j = rcu_state.jiffies_force_qs - j;
+ gf = 0;
}
}
}