^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Generic waiting primitives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) 2004 Nadia Yvette Chambers, Oracle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <trace/hooks/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) spin_lock_init(&wq_head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) lockdep_set_class_and_name(&wq_head->lock, key, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) INIT_LIST_HEAD(&wq_head->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) EXPORT_SYMBOL(__init_waitqueue_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __add_wait_queue(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) EXPORT_SYMBOL(add_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __add_wait_queue_entry_tail(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) EXPORT_SYMBOL(add_wait_queue_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __remove_wait_queue(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXPORT_SYMBOL(remove_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Scan threshold to break wait queue walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * This allows a waker to take a break from holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * wait queue lock during the wait queue walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define WAITQUEUE_WALK_BREAK_CNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * number) then we wake all the non-exclusive tasks and one exclusive task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * There are circumstances in which we can try to wake a task which has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * zero in this (rare) case, and we handle it by continuing to scan the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int nr_exclusive, int wake_flags, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) wait_queue_entry_t *bookmark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) wait_queue_entry_t *curr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) lockdep_assert_held(&wq_head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) curr = list_next_entry(bookmark, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) list_del(&bookmark->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bookmark->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (&curr->entry == &wq_head->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return nr_exclusive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned flags = curr->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (flags & WQ_FLAG_BOOKMARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ret = curr->func(curr, mode, wake_flags, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) (&next->entry != &wq_head->head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bookmark->flags = WQ_FLAG_BOOKMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) list_add_tail(&bookmark->entry, &next->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return nr_exclusive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int nr_exclusive, int wake_flags, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) wait_queue_entry_t bookmark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bookmark.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bookmark.private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bookmark.func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) INIT_LIST_HEAD(&bookmark.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) wake_flags, key, &bookmark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) } while (bookmark.flags & WQ_FLAG_BOOKMARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * __wake_up - wake up threads blocked on a waitqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @wq_head: the waitqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @mode: which threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @nr_exclusive: how many wake-one or wake-many threads to wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @key: is directly passed to the wakeup function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If this function wakes up a task, it executes a full memory barrier before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * accessing the task state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int nr_exclusive, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXPORT_SYMBOL(__wake_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) EXPORT_SYMBOL_GPL(__wake_up_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __wake_up_common(wq_head, mode, 1, 0, key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) EXPORT_SYMBOL_GPL(__wake_up_locked_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned int mode, void *key, wait_queue_entry_t *bookmark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * __wake_up_sync_key - wake up threads blocked on a waitqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @wq_head: the waitqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @mode: which threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @key: opaque value to be passed to wakeup targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * The sync wakeup differs that the waker knows that it will schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * away soon, so while the target thread will be woken up, it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * be migrated to another CPU - ie. the two threads are 'synchronized'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * with each other. This can prevent needless bouncing between CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * On UP it can prevent extra preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * If this function wakes up a task, it executes a full memory barrier before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * accessing the task state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int wake_flags = WF_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (unlikely(!wq_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) trace_android_vh_set_wake_flags(&wake_flags, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) EXPORT_SYMBOL_GPL(__wake_up_sync_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @wq_head: the waitqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @mode: which threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @key: opaque value to be passed to wakeup targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * The sync wakeup differs in that the waker knows that it will schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * away soon, so while the target thread will be woken up, it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * be migrated to another CPU - ie. the two threads are 'synchronized'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * with each other. This can prevent needless bouncing between CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * On UP it can prevent extra preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * If this function wakes up a task, it executes a full memory barrier before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * accessing the task state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int mode, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * __wake_up_sync - see __wake_up_sync_key()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) __wake_up_sync_key(wq_head, mode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void __wake_up_pollfree(struct wait_queue_head *wq_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* POLLFREE must have cleared the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) WARN_ON_ONCE(waitqueue_active(wq_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Note: we use "set_current_state()" _after_ the wait-queue add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * because we need a memory barrier there on SMP, so that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * wake-function that tests for the wait-queue being active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * will be guaranteed to see waitqueue addition _or_ subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * tests in this thread will see the wakeup having taken place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * The spin_unlock() itself is semi-permeable and only protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * one way (it only protects stuff inside the critical region and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * stops them from bleeding out - it would still allow subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * loads to move into the critical region).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (list_empty(&wq_entry->entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) __add_wait_queue(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) EXPORT_SYMBOL(prepare_to_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Returns true if we are the first waiter in the queue, false otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bool was_empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (list_empty(&wq_entry->entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) was_empty = list_empty(&wq_head->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __add_wait_queue_entry_tail(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return was_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) EXPORT_SYMBOL(prepare_to_wait_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) wq_entry->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) wq_entry->private = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) wq_entry->func = autoremove_wake_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) INIT_LIST_HEAD(&wq_entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) EXPORT_SYMBOL(init_wait_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (signal_pending_state(state, current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Exclusive waiter must not fail if it was selected by wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * it should "consume" the condition we were waiting for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * The caller will recheck the condition and return success if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * we were already woken up, we can not miss the event because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * wakeup locks/unlocks the same wq_head->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * But we need to ensure that set-condition + wakeup after that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * can't see us, it should wake up another exclusive waiter if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * we fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) list_del_init(&wq_entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (list_empty(&wq_entry->entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) __add_wait_queue_entry_tail(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __add_wait_queue(wq_head, wq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) EXPORT_SYMBOL(prepare_to_wait_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Note! These two wait functions are entered with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * wait-queue lock held (and interrupts off in the _irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * case), so there is no race with testing the wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * condition in the caller before they add the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * entry to the wake queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (likely(list_empty(&wait->entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __add_wait_queue_entry_tail(wq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_unlock(&wq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) spin_lock(&wq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) EXPORT_SYMBOL(do_wait_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (likely(list_empty(&wait->entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __add_wait_queue_entry_tail(wq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_unlock_irq(&wq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_lock_irq(&wq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) EXPORT_SYMBOL(do_wait_intr_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * finish_wait - clean up after waiting in a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @wq_head: waitqueue waited on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * @wq_entry: wait descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Sets current thread back to running state and removes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * the wait descriptor from the given waitqueue if still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * We can check for list emptiness outside the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * IFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * - we use the "careful" check that verifies both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * the next and prev pointers, so that there cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * be any half-pending updates in progress on other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * CPU's that we haven't seen yet (and that might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * still change the stack area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * - all other users take the lock (ie we can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * have _one_ other CPU that looks at or modifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * the list).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!list_empty_careful(&wq_entry->entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_lock_irqsave(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) list_del_init(&wq_entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) spin_unlock_irqrestore(&wq_head->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) EXPORT_SYMBOL(finish_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) __sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int ret = default_wake_function(wq_entry, mode, sync, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) list_del_init_careful(&wq_entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) EXPORT_SYMBOL(autoremove_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static inline bool is_kthread_should_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return (current->flags & PF_KTHREAD) && kthread_should_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * DEFINE_WAIT_FUNC(wait, woken_wake_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * add_wait_queue(&wq_head, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * if (condition)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * // in wait_woken() // in woken_wake_function()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * smp_mb(); // A try_to_wake_up():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * schedule() if (p->state & mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * smp_mb(); // B condition = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * } smp_mb(); // C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * The below executes an smp_mb(), which matches with the full barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * executed by the try_to_wake_up() in woken_wake_function() such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * either we see the store to wq_entry->flags in woken_wake_function()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * or woken_wake_function() sees our store to current->state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) set_current_state(mode); /* A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) timeout = schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * The below executes an smp_mb(), which matches with the smp_mb() (C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * in woken_wake_function() such that either we see the wait condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * being true or the store to wq_entry->flags in woken_wake_function()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * follows ours in the coherence order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) EXPORT_SYMBOL(wait_woken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Pairs with the smp_store_mb() in wait_woken(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) smp_mb(); /* C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) wq_entry->flags |= WQ_FLAG_WOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return default_wake_function(wq_entry, mode, sync, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) EXPORT_SYMBOL(woken_wake_function);