list: add "list_del_init_careful()" to go with "list_empty_careful()"

That gives us ordering guarantees around the pair.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2020-07-23 12:33:41 -07:00
parent 2a9127fcf2
commit c6fe44d96f
3 changed files with 21 additions and 8 deletions

View File

@ -282,6 +282,24 @@ static inline int list_empty(const struct list_head *head)
return READ_ONCE(head->next) == head; return READ_ONCE(head->next) == head;
} }
/**
* list_del_init_careful - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*
* This is the same as list_del_init(), except designed to be used
* together with list_empty_careful() in a way to guarantee ordering
* of other memory operations.
*
* Any memory operations done before a list_del_init_careful() are
* guaranteed to be visible after a list_empty_careful() test.
*/
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = entry;
smp_store_release(&entry->next, entry);
}
/** /**
* list_empty_careful - tests whether a list is empty and not being modified * list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test * @head: the list to test
@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head)
*/ */
static inline int list_empty_careful(const struct list_head *head) static inline int list_empty_careful(const struct list_head *head)
{ {
struct list_head *next = head->next; struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev); return (next == head) && (next == head->prev);
} }

View File

@ -389,7 +389,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
int ret = default_wake_function(wq_entry, mode, sync, key); int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret) if (ret)
list_del_init(&wq_entry->entry); list_del_init_careful(&wq_entry->entry);
return ret; return ret;
} }

View File

@ -1041,13 +1041,8 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
* since after list_del_init(&wait->entry) the wait entry * since after list_del_init(&wait->entry) the wait entry
* might be de-allocated and the process might even have * might be de-allocated and the process might even have
* exited. * exited.
*
* We _really_ should have a "list_del_init_careful()" to
* properly pair with the unlocked "list_empty_careful()"
* in finish_wait().
*/ */
smp_mb(); list_del_init_careful(&wait->entry);
list_del_init(&wait->entry);
return ret; return ret;
} }