summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-07-23 22:33:41 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-06-28 11:18:35 +0300
commite77e5481d5bfd6dad8c00229681ba59c89668be7 (patch)
tree6b11ed9cddd15c8191f7c802114750e6bd518583
parentc32ab1c1959ab457e5eeab5c02352d8c02601243 (diff)
downloadlinux-e77e5481d5bfd6dad8c00229681ba59c89668be7.tar.xz
list: add "list_del_init_careful()" to go with "list_empty_careful()"
[ Upstream commit c6fe44d96fc1536af5b11cd859686453d1b7bfd1 ] That gives us ordering guarantees around the pair. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Stable-dep-of: 2192bba03d80 ("epoll: ep_autoremove_wake_function should use list_del_init_careful") Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--include/linux/list.h20
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--mm/filemap.c7
3 files changed, 21 insertions, 8 deletions
diff --git a/include/linux/list.h b/include/linux/list.h
index ce19c6b632a5..231ff089f7d1 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -269,6 +269,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ entry->prev = entry;
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -283,7 +301,7 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
+ struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev);
}
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 7d668b31dbc6..c76fe1d4d91e 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -384,7 +384,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
- list_del_init(&wq_entry->entry);
+ list_del_init_careful(&wq_entry->entry);
return ret;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 83b324420046..a106d63e8467 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1085,13 +1085,8 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
* since after list_del_init(&wait->entry) the wait entry
* might be de-allocated and the process might even have
* exited.
- *
- * We _really_ should have a "list_del_init_careful()" to
- * properly pair with the unlocked "list_empty_careful()"
- * in finish_wait().
*/
- smp_mb();
- list_del_init(&wait->entry);
+ list_del_init_careful(&wait->entry);
return ret;
}