summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-24 09:54:54 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-24 12:10:52 +0100
commita359d62fa46b9c41621fec1c3b31730cfc436b40 (patch)
treebfc426dd361565ab75e3a58084a32236b6a89d78
parentd0ce166dd4c76b1eb484aa68f1cddaafdeef9238 (diff)
downloadlwn-a359d62fa46b9c41621fec1c3b31730cfc436b40.tar.gz
lwn-a359d62fa46b9c41621fec1c3b31730cfc436b40.zip
rwsem: Make inner lock raw
There is no reason to convert the lock protecting rwsems (the ownerless variant) to a sleeping spinlock on -rt. Convert it to raw. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/powerpc/include/asm/rwsem.h8
-rw-r--r--arch/x86/include/asm/rwsem.h8
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
5 files changed, 38 insertions, 38 deletions
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index c2494d42ca59..accf580c13a4 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -30,7 +30,7 @@ struct rw_anon_semaphore {
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -44,7 +44,7 @@ struct rw_anon_semaphore {
#endif
#define __RWSEM_ANON_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ { RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) }
#define DECLARE_ANON_RWSEM(name) \
@@ -178,7 +178,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
struct rw_semaphore {
/* XXX this should be able to be an atomic_t -- paulus */
signed int count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -192,7 +192,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ { RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 92d67a6e07ee..605191838e02 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -66,7 +66,7 @@ extern asmregparm struct rw_anon_semaphore *
struct rw_anon_semaphore {
signed long count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -81,7 +81,7 @@ struct rw_anon_semaphore {
#define __RWSEM_ANON_INITIALIZER(name) \
{ \
- RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
}
@@ -265,7 +265,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
struct rw_semaphore {
signed long count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -279,7 +279,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 17df0dcdbe99..6608521725f4 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -33,7 +33,7 @@ struct rwsem_waiter;
*/
struct rw_anon_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -47,7 +47,7 @@ struct rw_anon_semaphore {
#endif
#define __RWSEM_ANON_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_ANON_DEP_MAP_INIT(name) }
#define DECLARE_ANON_RWSEM(name) \
@@ -89,7 +89,7 @@ extern void __downgrade_write(struct rw_anon_semaphore *sem);
*/
struct rw_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -103,7 +103,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 831ecb57d0a3..4010d3282256 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
int ret = 1;
unsigned long flags;
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
@@ -44,7 +44,7 @@ void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_anon_rwsem);
@@ -144,12 +144,12 @@ void __sched __down_read(struct rw_anon_semaphore *sem)
struct rwsem_waiter waiter;
struct task_struct *tsk;
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
goto out;
}
@@ -164,7 +164,7 @@ void __sched __down_read(struct rw_anon_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -188,7 +188,7 @@ int __down_read_trylock(struct rw_anon_semaphore *sem)
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -196,7 +196,7 @@ int __down_read_trylock(struct rw_anon_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -210,12 +210,12 @@ void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
struct rwsem_waiter waiter;
struct task_struct *tsk;
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
goto out;
}
@@ -230,7 +230,7 @@ void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -258,7 +258,7 @@ int __down_write_trylock(struct rw_anon_semaphore *sem)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -266,7 +266,7 @@ int __down_write_trylock(struct rw_anon_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -278,12 +278,12 @@ void __up_read(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -293,13 +293,13 @@ void __up_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -310,11 +310,11 @@ void __downgrade_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 72eaba5e9861..47f5a751a75f 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_anon_rwsem);
@@ -155,7 +155,7 @@ rwsem_down_failed_common(struct rw_anon_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
@@ -168,7 +168,7 @@ rwsem_down_failed_common(struct rw_anon_semaphore *sem,
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -219,13 +219,13 @@ asmregparm struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
@@ -240,13 +240,13 @@ rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}