diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 08:21:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 08:21:37 -0700 |
commit | 916bb6d76dfa49b540baa3f7262792d1de7f1c24 (patch) | |
tree | 8548db9da607ad8616125130969e7c2165ad00d5 /kernel/sched | |
parent | d0b8883800c913f5cc0eb273c052bcac94ad44d8 (diff) | |
parent | 2c522836627c6e78660f8bd52cdb4cdcb75e3e3c (diff) | |
download | lwn-916bb6d76dfa49b540baa3f7262792d1de7f1c24.tar.gz lwn-916bb6d76dfa49b540baa3f7262792d1de7f1c24.zip |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking changes from Ingo Molnar:
"The most noticeable change are mutex speedups from Waiman Long, for
higher loads. These scalability changes should be most noticeable on
larger server systems.
There are also cleanups, fixes and debuggability improvements."
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Consolidate bug messages into a single print_lockdep_off() function
lockdep: Print out additional debugging advice when we hit lockdep BUGs
mutex: Back out architecture specific check for negative mutex count
mutex: Queue mutex spinners with MCS lock to reduce cacheline contention
mutex: Make more scalable by doing less atomic operations
mutex: Move mutex spinning code from sched/core.c back to mutex.c
locking/rtmutex/tester: Set correct permissions on sysfs files
lockdep: Remove unnecessary 'hlock_next' variable
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 45 | ||||
-rw-r--r-- | kernel/sched/features.h | 7 |
2 files changed, 0 insertions, 52 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 67d04651f44b..42053547e0f5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2999,51 +2999,6 @@ void __sched schedule_preempt_disabled(void) preempt_disable(); } -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER - -static inline bool owner_running(struct mutex *lock, struct task_struct *owner) -{ - if (lock->owner != owner) - return false; - - /* - * Ensure we emit the owner->on_cpu, dereference _after_ checking - * lock->owner still matches owner, if that fails, owner might - * point to free()d memory, if it still matches, the rcu_read_lock() - * ensures the memory stays valid. - */ - barrier(); - - return owner->on_cpu; -} - -/* - * Look out! "owner" is an entirely speculative pointer - * access and not reliable. - */ -int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) -{ - if (!sched_feat(OWNER_SPIN)) - return 0; - - rcu_read_lock(); - while (owner_running(lock, owner)) { - if (need_resched()) - break; - - arch_mutex_cpu_relax(); - } - rcu_read_unlock(); - - /* - * We break out the loop above on need_resched() and when the - * owner changed, which is a sign for heavy contention. Return - * success only when lock->owner is NULL. - */ - return lock->owner == NULL; -} -#endif - #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1ad1d2b5395f..99399f8e4799 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -46,13 +46,6 @@ SCHED_FEAT(DOUBLE_TICK, false) SCHED_FEAT(LB_BIAS, true) /* - * Spin-wait on mutex acquisition when the mutex owner is running on - * another cpu -- assumes that when the owner is running, it will soon - * release the lock. Decreases scheduling overhead. - */ -SCHED_FEAT(OWNER_SPIN, true) - -/* * Decrement CPU power based on time not spent running tasks */ SCHED_FEAT(NONTASK_POWER, true) |