summaryrefslogtreecommitdiff
path: root/Documentation/locking
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/locking')
-rw-r--r--Documentation/locking/hwspinlock.rst57
-rw-r--r--Documentation/locking/index.rst7
-rw-r--r--Documentation/locking/locktypes.rst21
-rw-r--r--Documentation/locking/seqlock.rst11
4 files changed, 29 insertions, 67 deletions
diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
index 2ffaa3cbd63f..a737c702a7d1 100644
--- a/Documentation/locking/hwspinlock.rst
+++ b/Documentation/locking/hwspinlock.rst
@@ -40,17 +40,6 @@ User API
::
- struct hwspinlock *hwspin_lock_request(void);
-
-Dynamically assign an hwspinlock and return its address, or NULL
-in case an unused hwspinlock isn't available. Users of this
-API will usually want to communicate the lock's id to the remote core
-before it can be used to achieve synchronization.
-
-Should be called from a process context (might sleep).
-
-::
-
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
Assign a specific hwspinlock id and return its address, or NULL
@@ -312,17 +301,6 @@ The caller should **never** unlock an hwspinlock which is already unlocked.
Doing so is considered a bug (there is no protection against this).
This function will never sleep.
-::
-
- int hwspin_lock_get_id(struct hwspinlock *hwlock);
-
-Retrieve id number of a given hwspinlock. This is needed when an
-hwspinlock is dynamically assigned: before it can be used to achieve
-mutual exclusion with a remote cpu, the id number should be communicated
-to the remote task with which we want to synchronize.
-
-Returns the hwspinlock id number, or -EINVAL if hwlock is null.
-
Typical usage
=============
@@ -331,40 +309,7 @@ Typical usage
#include <linux/hwspinlock.h>
#include <linux/err.h>
- int hwspinlock_example1(void)
- {
- struct hwspinlock *hwlock;
- int ret;
-
- /* dynamically assign a hwspinlock */
- hwlock = hwspin_lock_request();
- if (!hwlock)
- ...
-
- id = hwspin_lock_get_id(hwlock);
- /* probably need to communicate id to a remote processor now */
-
- /* take the lock, spin for 1 sec if it's already taken */
- ret = hwspin_lock_timeout(hwlock, 1000);
- if (ret)
- ...
-
- /*
- * we took the lock, do our thing now, but do NOT sleep
- */
-
- /* release the lock */
- hwspin_unlock(hwlock);
-
- /* free the lock */
- ret = hwspin_lock_free(hwlock);
- if (ret)
- ...
-
- return ret;
- }
-
- int hwspinlock_example2(void)
+ int hwspinlock_example(void)
{
struct hwspinlock *hwlock;
int ret;
diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
index 6a9ea96c8bcb..9278d95b7dcb 100644
--- a/Documentation/locking/index.rst
+++ b/Documentation/locking/index.rst
@@ -24,10 +24,3 @@ Locking
percpu-rw-semaphore
robust-futexes
robust-futex-ABI
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst
index 80c914f6eae7..37b6a5670c2f 100644
--- a/Documentation/locking/locktypes.rst
+++ b/Documentation/locking/locktypes.rst
@@ -204,6 +204,27 @@ per-CPU data structures on a non PREEMPT_RT kernel.
local_lock is not suitable to protect against preemption or interrupts on a
PREEMPT_RT kernel due to the PREEMPT_RT specific spinlock_t semantics.
+CPU local scope and bottom-half
+-------------------------------
+
+Per-CPU variables that are accessed only in softirq context should not rely on
+the assumption that this context is implicitly protected due to being
+non-preemptible. In a PREEMPT_RT kernel, softirq context is preemptible, and
+synchronizing every bottom-half-disabled section via implicit context results
+in an implicit per-CPU "big kernel lock."
+
+A local_lock_t together with local_lock_nested_bh() and
+local_unlock_nested_bh() for locking operations help to identify the locking
+scope.
+
+When lockdep is enabled, these functions verify that data structure access
+occurs within softirq context.
+Unlike local_lock(), local_unlock_nested_bh() does not disable preemption and
+does not add overhead when used without lockdep.
+
+On a PREEMPT_RT kernel, local_lock_t behaves as a real lock and
+local_unlock_nested_bh() serializes access to the data structure, which allows
+removal of serialization via local_bh_disable().
raw_spinlock_t and spinlock_t
=============================
diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst
index ec6411d02ac8..9899871d3d9a 100644
--- a/Documentation/locking/seqlock.rst
+++ b/Documentation/locking/seqlock.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
======================================
Sequence counters and sequential locks
======================================
@@ -218,13 +220,14 @@ Read path, three categories:
according to a passed marker. This is used to avoid lockless readers
starvation (too much retry loops) in case of a sharp spike in write
activity. First, a lockless read is tried (even marker passed). If
- that trial fails (odd sequence counter is returned, which is used as
- the next iteration marker), the lockless read is transformed to a
- full locking read and no retry loop is necessary::
+ that trial fails (sequence counter doesn't match), make the marker
+ odd for the next iteration, the lockless read is transformed to a
+ full locking read and no retry loop is necessary, for example::
/* marker; even initialization */
- int seq = 0;
+ int seq = 1;
do {
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&foo_seqlock, &seq);
/* ... [[read-side critical section]] ... */