summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-02-05 12:54:34 -0800
committerPaul E. McKenney <paulmck@kernel.org>2020-02-20 16:03:31 -0800
commit9470a18fabd056e67ee12059dab04faf6e1f253c (patch)
tree6a3163c94456ef850cf9fe783f8acc16bc252759 /kernel/rcu
parent50d4b62970e21e9573daf0e3c1138b4d1ebcca47 (diff)
downloadlwn-9470a18fabd056e67ee12059dab04faf6e1f253c.tar.gz
lwn-9470a18fabd056e67ee12059dab04faf6e1f253c.zip
rcutorture: Manually clean up after rcu_barrier() failure
Currently, if rcu_barrier() returns too soon, the test waits 100ms and then does another instance of the test. However, if rcu_barrier() were to have waited for more than 100ms too short a time, this could cause the test's rcu_head structures to be reused while they were still on RCU's callback lists. This can result in knock-on errors that obscure the original rcu_barrier() test failure. This commit therefore adds code that attempts to wait until all of the test's callbacks have been invoked. Of course, if RCU completely lost track of the corresponding rcu_head structures, this wait could be forever. This commit therefore also complains if this attempted recovery takes more than one second, and it also gives up when the test ends. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcutorture.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f82515cded34..5453bd557f43 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2124,7 +2124,21 @@ static int rcu_torture_barrier(void *arg)
pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
atomic_read(&barrier_cbs_invoked),
n_barrier_cbs);
- WARN_ON_ONCE(1);
+ WARN_ON(1);
+ // Wait manually for the remaining callbacks
+ i = 0;
+ do {
+ if (WARN_ON(i++ > HZ))
+ i = INT_MIN;
+ schedule_timeout_interruptible(1);
+ cur_ops->cb_barrier();
+ } while (atomic_read(&barrier_cbs_invoked) !=
+ n_barrier_cbs &&
+ !torture_must_stop());
+ smp_mb(); // Can't trust ordering if broken.
+ if (!torture_must_stop())
+ pr_err("Recovered: barrier_cbs_invoked = %d\n",
+ atomic_read(&barrier_cbs_invoked));
} else {
n_barrier_successes++;
}