summaryrefslogtreecommitdiff
path: root/lib/locking-selftest.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-08-23 13:13:11 +0200
committerIngo Molnar <mingo@kernel.org>2017-08-25 11:06:31 +0200
commite91498589746065e3ae95d9a00b068e525eec34f (patch)
tree9b7d6663dbfce16abc8cb6228c56dd72e4fff83c /lib/locking-selftest.c
parent0e709703af5bbc9ea6e75e1f99c2dd0dae261869 (diff)
downloadlwn-e91498589746065e3ae95d9a00b068e525eec34f.tar.gz
lwn-e91498589746065e3ae95d9a00b068e525eec34f.zip
locking/lockdep/selftests: Add mixed read-write ABBA tests
Currently lockdep has limited support for recursive readers, add a few mixed read-write ABBA selftests to show the extend of these limitations. [ 0.000000] ---------------------------------------------------------------------------- [ 0.000000] | spin |wlock |rlock |mutex | wsem | rsem | [ 0.000000] -------------------------------------------------------------------------- [ 0.000000] mixed read-lock/lock-write ABBA: |FAILED| | ok | [ 0.000000] mixed read-lock/lock-read ABBA: | ok | | ok | [ 0.000000] mixed write-lock/lock-write ABBA: | ok | | ok | This clearly illustrates the case where lockdep fails to find a deadlock. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: byungchul.park@lge.com Cc: david@fromorbit.com Cc: johannes@sipsolutions.net Cc: oleg@redhat.com Cc: tj@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/locking-selftest.c')
-rw-r--r--lib/locking-selftest.c117
1 files changed, 115 insertions, 2 deletions
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6f2b135dc5e8..3c7151a6cd98 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -363,6 +363,103 @@ static void rsem_AA3(void)
}
/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA1(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * read_lock(A)
+ */
+static void rlock_ABBA2(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1); // should NOT fail
+}
+
+static void rwsem_ABBA2(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ RSL(X1);
+ RSU(X1);
+ MU(Y1); // should fail
+}
+
+
+/*
+ * write_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA3(void)
+{
+ WL(X1);
+ L(Y1);
+ U(Y1);
+ WU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA3(void)
+{
+ WSL(X1);
+ ML(Y1);
+ MU(Y1);
+ WSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
* ABBA deadlock:
*/
@@ -1056,8 +1153,6 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
if (debug_locks != expected) {
unexpected_testcase_failures++;
pr_cont("FAILED|");
-
- dump_stack();
} else {
testcase_successes++;
pr_cont(" ok |");
@@ -1933,6 +2028,24 @@ void locking_selftest(void)
dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
pr_cont("\n");
+ print_testname("mixed read-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed read-lock/lock-read ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed write-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+
printk(" --------------------------------------------------------------------------\n");
/*