summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-07-23 12:33:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-02 20:39:44 -0700
commitc6fe44d96fc1536af5b11cd859686453d1b7bfd1 (patch)
tree189b4815c4bc68a6bd65c833f6b38cc7ca4088fb
parent2a9127fcf2296674d58024f83981f40b128fffea (diff)
downloadlwn-c6fe44d96fc1536af5b11cd859686453d1b7bfd1.tar.gz
lwn-c6fe44d96fc1536af5b11cd859686453d1b7bfd1.zip
list: add "list_del_init_careful()" to go with "list_empty_careful()"
That gives us ordering guarantees around the pair. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/list.h20
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--mm/filemap.c7
3 files changed, 21 insertions, 8 deletions
diff --git a/include/linux/list.h b/include/linux/list.h
index aff44d34f4e4..0d0d17a10d25 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -283,6 +283,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ entry->prev = entry;
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
+ struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev);
}
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index ba059fbfc53a..01f5d3020589 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -389,7 +389,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
- list_del_init(&wq_entry->entry);
+ list_del_init_careful(&wq_entry->entry);
return ret;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 8c3d3e233d37..991503bbf922 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1041,13 +1041,8 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
* since after list_del_init(&wait->entry) the wait entry
* might be de-allocated and the process might even have
* exited.
- *
- * We _really_ should have a "list_del_init_careful()" to
- * properly pair with the unlocked "list_empty_careful()"
- * in finish_wait().
*/
- smp_mb();
- list_del_init(&wait->entry);
+ list_del_init_careful(&wait->entry);
return ret;
}