summaryrefslogtreecommitdiff
path: root/kernel/locking/qspinlock.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2019-01-29 22:53:46 +0100
committerIngo Molnar <mingo@kernel.org>2019-02-04 09:03:30 +0100
commit412f34a82ccf7dd52f6b197f6450a33f03342523 (patch)
tree6b913e5cee60855278cefa0bb954531f40f07481 /kernel/locking/qspinlock.c
parentd682b596d99345ef0000e7017db714ba7f29e017 (diff)
downloadlwn-412f34a82ccf7dd52f6b197f6450a33f03342523.tar.gz
lwn-412f34a82ccf7dd52f6b197f6450a33f03342523.zip
locking/qspinlock_stat: Track the no MCS node available case
Track the number of slowpath locking operations that are being done without any MCS node available as well renaming lock_index[123] to make them more descriptive. Using these stat counters is one way to find out if a code path is being exercised. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: James Morse <james.morse@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: SRINIVAS <srinivas.eeda@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com> Link: https://lkml.kernel.org/r/1548798828-16156-3-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r--kernel/locking/qspinlock.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 0875053c4050..21ee51b47961 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -422,6 +422,7 @@ pv_queue:
* simple enough.
*/
if (unlikely(idx >= MAX_NODES)) {
+ qstat_inc(qstat_lock_no_node, true);
while (!queued_spin_trylock(lock))
cpu_relax();
goto release;
@@ -432,7 +433,7 @@ pv_queue:
/*
* Keep counts of non-zero index values:
*/
- qstat_inc(qstat_lock_idx1 + idx - 1, idx);
+ qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
/*
* Ensure that we increment the head node->count before initialising