summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/locking/mutex.c19
-rw-r--r--kernel/locking/rwsem-spinlock.c18
-rw-r--r--kernel/locking/rwsem-xadd.c7
-rw-r--r--kernel/locking/semaphore.c7
4 files changed, 22 insertions, 29 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 9b349619f431..4c7d04362c95 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -622,7 +622,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
- struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned long flags;
bool first = false;
@@ -656,18 +655,18 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task);
+ debug_mutex_add_waiter(lock, &waiter, current);
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
- waiter.task = task;
+ waiter.task = current;
if (__mutex_waiter_is_first(lock, &waiter))
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
lock_contended(&lock->dep_map, ip);
- set_task_state(task, state);
+ set_task_state(current, state);
for (;;) {
/*
* Once we hold wait_lock, we're serialized against
@@ -683,7 +682,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* wait_lock. This ensures the lock cancellation is ordered
* against mutex_unlock() and wake-ups do not go missing.
*/
- if (unlikely(signal_pending_state(state, task))) {
+ if (unlikely(signal_pending_state(state, current))) {
ret = -EINTR;
goto err;
}
@@ -702,7 +701,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
}
- set_task_state(task, state);
+ set_task_state(current, state);
/*
* Here we order against unlock; we must either see it change
* state back to RUNNING and fall through the next schedule(),
@@ -716,9 +715,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}
spin_lock_mutex(&lock->wait_lock, flags);
acquired:
- __set_task_state(task, TASK_RUNNING);
+ __set_task_state(current, TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, task);
+ mutex_remove_waiter(lock, &waiter, current);
if (likely(list_empty(&lock->wait_list)))
__mutex_clear_flag(lock, MUTEX_FLAGS);
@@ -736,8 +735,8 @@ skip_wait:
return 0;
err:
- __set_task_state(task, TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, task);
+ __set_task_state(current, TASK_RUNNING);
+ mutex_remove_waiter(lock, &waiter, current);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 1591f6b3539f..60d15d3bb2a8 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -128,7 +128,6 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
void __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- struct task_struct *tsk;
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
@@ -140,13 +139,12 @@ void __sched __down_read(struct rw_semaphore *sem)
goto out;
}
- tsk = current;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- waiter.task = tsk;
+ waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
- get_task_struct(tsk);
+ get_task_struct(current);
list_add_tail(&waiter.list, &sem->wait_list);
@@ -158,10 +156,10 @@ void __sched __down_read(struct rw_semaphore *sem)
if (!waiter.task)
break;
schedule();
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
}
- __set_task_state(tsk, TASK_RUNNING);
+ __set_task_state(current, TASK_RUNNING);
out:
;
}
@@ -194,15 +192,13 @@ int __down_read_trylock(struct rw_semaphore *sem)
int __sched __down_write_common(struct rw_semaphore *sem, int state)
{
struct rwsem_waiter waiter;
- struct task_struct *tsk;
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */
- tsk = current;
- waiter.task = tsk;
+ waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
@@ -220,7 +216,7 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
ret = -EINTR;
goto out;
}
- set_task_state(tsk, state);
+ set_task_state(current, state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 631506004f9e..d3b819c7f07f 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -224,10 +224,9 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter;
- struct task_struct *tsk = current;
DEFINE_WAKE_Q(wake_q);
- waiter.task = tsk;
+ waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
raw_spin_lock_irq(&sem->wait_lock);
@@ -254,13 +253,13 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
/* wait to be given the lock */
while (true) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
if (!waiter.task)
break;
schedule();
}
- __set_task_state(tsk, TASK_RUNNING);
+ __set_task_state(current, TASK_RUNNING);
return sem;
}
EXPORT_SYMBOL(rwsem_down_read_failed);
diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index b8120abe594b..29aac9f9e5e4 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -204,19 +204,18 @@ struct semaphore_waiter {
static inline int __sched __down_common(struct semaphore *sem, long state,
long timeout)
{
- struct task_struct *task = current;
struct semaphore_waiter waiter;
list_add_tail(&waiter.list, &sem->wait_list);
- waiter.task = task;
+ waiter.task = current;
waiter.up = false;
for (;;) {
- if (signal_pending_state(state, task))
+ if (signal_pending_state(state, current))
goto interrupted;
if (unlikely(timeout <= 0))
goto timed_out;
- __set_task_state(task, state);
+ __set_task_state(current, state);
raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->lock);