diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 16:42:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 16:42:37 -0700 |
commit | 151647ab581013b482893d4e2218cd29b005cd6b (patch) | |
tree | 79ce01c0e430047f0adde58f065ad696f6121af3 /kernel | |
parent | 923a327e8f2257ab7cd5485cb5d8db92c965dfca (diff) | |
parent | e81859fe64ad42dccefe134d1696e0635f78d763 (diff) | |
download | lwn-151647ab581013b482893d4e2218cd29b005cd6b.tar.gz lwn-151647ab581013b482893d4e2218cd29b005cd6b.zip |
Merge tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Jump label fixes, including a perf events fix that originally
manifested as jump label failures, but was a serialization bug at the
usage site
- Mark down_write*() helpers as __always_inline, to improve WCHAN
debuggability
- Misc cleanups and fixes
* tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers
jump_label: Simplify and clarify static_key_fast_inc_cpus_locked()
jump_label: Clarify condition in static_key_fast_inc_not_disabled()
jump_label: Fix concurrency issues in static_key_slow_dec()
perf/x86: Serialize set_attr_rdpmc()
cleanup: Standardize the header guard define's name
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/jump_label.c | 74 | ||||
-rw-r--r-- | kernel/locking/rwsem.c | 6 |
2 files changed, 49 insertions, 31 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3218fa5688b9..4ad5ed8adf96 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key) STATIC_KEY_CHECK_USE(key); /* * Negative key->enabled has a special meaning: it sends - * static_key_slow_inc() down the slow path, and it is non-zero - * so it counts as "enabled" in jump_label_update(). Note that - * atomic_inc_unless_negative() checks >= 0, so roll our own. + * static_key_slow_inc/dec() down the slow path, and it is non-zero + * so it counts as "enabled" in jump_label_update(). + * + * The INT_MAX overflow condition is either used by the networking + * code to reset or detected in the slow path of + * static_key_slow_inc_cpuslocked(). */ v = atomic_read(&key->enabled); do { - if (v <= 0 || (v + 1) < 0) + if (v <= 0 || v == INT_MAX) return false; } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); @@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) lockdep_assert_cpus_held(); /* - * Careful if we get concurrent static_key_slow_inc() calls; + * Careful if we get concurrent static_key_slow_inc/dec() calls; * later calls must wait for the first one to _finish_ the * jump_label_update() process. At the same time, however, * the jump_label_update() call below wants to see @@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) if (static_key_fast_inc_not_disabled(key)) return true; - jump_label_lock(); - if (atomic_read(&key->enabled) == 0) { - atomic_set(&key->enabled, -1); + guard(mutex)(&jump_label_mutex); + /* Try to mark it as 'enabling in progress. */ + if (!atomic_cmpxchg(&key->enabled, 0, -1)) { jump_label_update(key); /* - * Ensure that if the above cmpxchg loop observes our positive - * value, it must also observe all the text changes. + * Ensure that when static_key_fast_inc_not_disabled() or + * static_key_slow_try_dec() observe the positive value, + * they must also observe all the text changes. */ atomic_set_release(&key->enabled, 1); } else { - if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) { - jump_label_unlock(); + /* + * While holding the mutex this should never observe + * anything else than a value >= 1 and succeed + */ + if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) return false; - } } - jump_label_unlock(); return true; } @@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable); static bool static_key_slow_try_dec(struct static_key *key) { - int val; - - val = atomic_fetch_add_unless(&key->enabled, -1, 1); - if (val == 1) - return false; + int v; /* - * The negative count check is valid even when a negative - * key->enabled is in use by static_key_slow_inc(); a - * __static_key_slow_dec() before the first static_key_slow_inc() - * returns is unbalanced, because all other static_key_slow_inc() - * instances block while the update is in progress. + * Go into the slow path if key::enabled is less than or equal than + * one. One is valid to shut down the key, anything less than one + * is an imbalance, which is handled at the call site. + * + * That includes the special case of '-1' which is set in + * static_key_slow_inc_cpuslocked(), but that's harmless as it is + * fully serialized in the slow path below. By the time this task + * acquires the jump label lock the value is back to one and the + * retry under the lock must succeed. */ - WARN(val < 0, "jump label: negative count!\n"); + v = atomic_read(&key->enabled); + do { + /* + * Warn about the '-1' case though; since that means a + * decrement is concurrent with a first (0->1) increment. IOW + * people are trying to disable something that wasn't yet fully + * enabled. This suggests an ordering problem on the user side. + */ + WARN_ON_ONCE(v < 0); + if (v <= 1) + return false; + } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); + return true; } @@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) if (static_key_slow_try_dec(key)) return; - jump_label_lock(); - if (atomic_dec_and_test(&key->enabled)) + guard(mutex)(&jump_label_mutex); + if (atomic_cmpxchg(&key->enabled, 1, 0)) jump_label_update(key); - jump_label_unlock(); + else + WARN_ON_ONCE(!static_key_slow_try_dec(key)); } static void __static_key_slow_dec(struct static_key *key) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index c6d17aee4209..33cac79e3994 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline int __down_write_common(struct rw_semaphore *sem, int state) +static __always_inline int __down_write_common(struct rw_semaphore *sem, int state) { int ret = 0; @@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state) return ret; } -static inline void __down_write(struct rw_semaphore *sem) +static __always_inline void __down_write(struct rw_semaphore *sem) { __down_write_common(sem, TASK_UNINTERRUPTIBLE); } -static inline int __down_write_killable(struct rw_semaphore *sem) +static __always_inline int __down_write_killable(struct rw_semaphore *sem) { return __down_write_common(sem, TASK_KILLABLE); } |