diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-14 17:05:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-14 17:05:26 -0700 |
commit | 8394a1b7151c54c6be66551a0ca1041b398029b6 (patch) | |
tree | 3113992bcc202cc17ea579dbcf3ac6bb0e4cdf15 /ipc/sem.c | |
parent | fbd9163f1c89f9591c03981bdb2d1de7f6e27c11 (diff) | |
parent | 7f11c47605cbe7cb76fd2f8607f452d4afe919f5 (diff) | |
download | lwn-8394a1b7151c54c6be66551a0ca1041b398029b6.tar.gz lwn-8394a1b7151c54c6be66551a0ca1041b398029b6.zip |
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton:
"11 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
Update maintainers for DRM STI driver
mm: cma: mark cma_bitmap_maxno() inline in header
zram: fix pool name truncation
memory-hotplug: fix wrong edge when hot add a new node
.mailmap: Andrey Ryabinin has moved
ipc/sem.c: update/correct memory barriers
mm/hwpoison: fix panic due to split huge zero page
ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem()
ipc,sem: fix use after free on IPC_RMID after a task using same semaphore set exits
mm/hwpoison: fix fail isolate hugetlbfs page w/ refcount held
mm/hwpoison: fix page refcount of unknown non LRU page
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 47 |
1 files changed, 35 insertions, 12 deletions
diff --git a/ipc/sem.c b/ipc/sem.c index bc3d530cb23e..b471e5a3863d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) } /* + * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they + * are only control barriers. + * The code must pair with spin_unlock(&sem->lock) or + * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. + * + * smp_rmb() is sufficient, as writes cannot pass the control barrier. + */ +#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() + +/* * Wait until all currently ongoing simple ops have completed. * Caller must own sem_perm.lock. * New simple ops cannot start, because simple ops first check @@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); } + ipc_smp_acquire__after_spin_is_unlocked(); } /* @@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* Then check that the global lock is free */ if (!spin_is_locked(&sma->sem_perm.lock)) { /* - * The ipc object lock check must be visible on all - * cores before rechecking the complex count. Otherwise - * we can race with another thread that does: + * We need a memory barrier with acquire semantics, + * otherwise we can race with another thread that does: * complex_count++; * spin_unlock(sem_perm.lock); */ - smp_rmb(); + ipc_smp_acquire__after_spin_is_unlocked(); /* * Now repeat the test of complex_count: @@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk) rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); - if (&un->list_proc == &ulp->list_proc) - semid = -1; - else - semid = un->semid; + if (&un->list_proc == &ulp->list_proc) { + /* + * We must wait for freeary() before freeing this ulp, + * in case we raced with last sem_undo. There is a small + * possibility where we exit while freeary() didn't + * finish unlocking sem_undo_list. + */ + spin_unlock_wait(&ulp->lock); + rcu_read_unlock(); + break; + } + spin_lock(&ulp->lock); + semid = un->semid; + spin_unlock(&ulp->lock); + /* exit_sem raced with IPC_RMID, nothing to do */ if (semid == -1) { rcu_read_unlock(); - break; + continue; } - sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) { rcu_read_unlock(); @@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk) ipc_assert_locked_object(&sma->sem_perm); list_del(&un->list_id); - spin_lock(&ulp->lock); + /* we are the last process using this ulp, acquiring ulp->lock + * isn't required. Besides that, we are also protected against + * IPC_RMID as we hold sma->sem_perm lock now + */ list_del_rcu(&un->list_proc); - spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { |