Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Generic waiting primitives. |
| 4 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 5 | * (C) 2004 Nadia Yvette Chambers, Oracle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Ingo Molnar | 325ea10 | 2018-03-03 12:20:47 +0100 | [diff] [blame] | 7 | #include "sched.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 9 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 10 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 11 | spin_lock_init(&wq_head->lock); |
| 12 | lockdep_set_class_and_name(&wq_head->lock, key, name); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 13 | INIT_LIST_HEAD(&wq_head->head); |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 14 | } |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 15 | |
Peter Zijlstra | 2fc3911 | 2009-08-10 12:33:05 +0100 | [diff] [blame] | 16 | EXPORT_SYMBOL(__init_waitqueue_head); |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 17 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 18 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | { |
| 20 | unsigned long flags; |
| 21 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 22 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 23 | spin_lock_irqsave(&wq_head->lock, flags); |
Omar Sandoval | c6b9d9a | 2017-12-05 23:15:31 -0800 | [diff] [blame] | 24 | __add_wait_queue(wq_head, wq_entry); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 25 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | } |
| 27 | EXPORT_SYMBOL(add_wait_queue); |
| 28 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 29 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
| 31 | unsigned long flags; |
| 32 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 33 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 34 | spin_lock_irqsave(&wq_head->lock, flags); |
| 35 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
| 36 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | } |
| 38 | EXPORT_SYMBOL(add_wait_queue_exclusive); |
| 39 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 40 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | { |
| 42 | unsigned long flags; |
| 43 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 44 | spin_lock_irqsave(&wq_head->lock, flags); |
| 45 | __remove_wait_queue(wq_head, wq_entry); |
| 46 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } |
| 48 | EXPORT_SYMBOL(remove_wait_queue); |
| 49 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 50 | /* |
| 51 | * Scan threshold to break wait queue walk. |
| 52 | * This allows a waker to take a break from holding the |
| 53 | * wait queue lock during the wait queue walk. |
| 54 | */ |
| 55 | #define WAITQUEUE_WALK_BREAK_CNT 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
| 57 | /* |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 58 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 59 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
| 60 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 61 | * |
| 62 | * There are circumstances in which we can try to wake a task which has already |
| 63 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
| 64 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 65 | */ |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 66 | static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
| 67 | int nr_exclusive, int wake_flags, void *key, |
| 68 | wait_queue_entry_t *bookmark) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 69 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 70 | wait_queue_entry_t *curr, *next; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 71 | int cnt = 0; |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 72 | |
Christoph Hellwig | e05a8e4 | 2018-08-21 21:56:34 -0700 | [diff] [blame] | 73 | lockdep_assert_held(&wq_head->lock); |
| 74 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 75 | if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { |
| 76 | curr = list_next_entry(bookmark, entry); |
| 77 | |
| 78 | list_del(&bookmark->entry); |
| 79 | bookmark->flags = 0; |
| 80 | } else |
| 81 | curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); |
| 82 | |
| 83 | if (&curr->entry == &wq_head->head) |
| 84 | return nr_exclusive; |
| 85 | |
| 86 | list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 87 | unsigned flags = curr->flags; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 88 | int ret; |
| 89 | |
| 90 | if (flags & WQ_FLAG_BOOKMARK) |
| 91 | continue; |
| 92 | |
| 93 | ret = curr->func(curr, mode, wake_flags, key); |
Linus Torvalds | 3510ca2 | 2017-08-27 13:55:12 -0700 | [diff] [blame] | 94 | if (ret < 0) |
| 95 | break; |
| 96 | if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 97 | break; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 98 | |
| 99 | if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && |
| 100 | (&next->entry != &wq_head->head)) { |
| 101 | bookmark->flags = WQ_FLAG_BOOKMARK; |
| 102 | list_add_tail(&bookmark->entry, &next->entry); |
| 103 | break; |
| 104 | } |
| 105 | } |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 106 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 107 | return nr_exclusive; |
| 108 | } |
| 109 | |
| 110 | static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, |
| 111 | int nr_exclusive, int wake_flags, void *key) |
| 112 | { |
| 113 | unsigned long flags; |
| 114 | wait_queue_entry_t bookmark; |
| 115 | |
| 116 | bookmark.flags = 0; |
| 117 | bookmark.private = NULL; |
| 118 | bookmark.func = NULL; |
| 119 | INIT_LIST_HEAD(&bookmark.entry); |
| 120 | |
Pavel Begunkov | 016190a | 2019-06-11 15:29:07 +0300 | [diff] [blame] | 121 | do { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 122 | spin_lock_irqsave(&wq_head->lock, flags); |
| 123 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, |
| 124 | wake_flags, key, &bookmark); |
| 125 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Pavel Begunkov | 016190a | 2019-06-11 15:29:07 +0300 | [diff] [blame] | 126 | } while (bookmark.flags & WQ_FLAG_BOOKMARK); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | /** |
| 130 | * __wake_up - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 131 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 132 | * @mode: which threads |
| 133 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 134 | * @key: is directly passed to the wakeup function |
| 135 | * |
Andrea Parri | 7696f99 | 2018-07-16 11:06:03 -0700 | [diff] [blame] | 136 | * If this function wakes up a task, it executes a full memory barrier before |
| 137 | * accessing the task state. |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 138 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 139 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 140 | int nr_exclusive, void *key) |
| 141 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 142 | __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 143 | } |
| 144 | EXPORT_SYMBOL(__wake_up); |
| 145 | |
| 146 | /* |
| 147 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 148 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 149 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 150 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 151 | __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 152 | } |
| 153 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
| 154 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 155 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 156 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 157 | __wake_up_common(wq_head, mode, 1, 0, key, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 158 | } |
| 159 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); |
| 160 | |
Tim Chen | 11a19c7 | 2017-08-25 09:13:55 -0700 | [diff] [blame] | 161 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
| 162 | unsigned int mode, void *key, wait_queue_entry_t *bookmark) |
| 163 | { |
| 164 | __wake_up_common(wq_head, mode, 1, 0, key, bookmark); |
| 165 | } |
| 166 | EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); |
| 167 | |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 168 | /** |
| 169 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 170 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 171 | * @mode: which threads |
| 172 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 173 | * @key: opaque value to be passed to wakeup targets |
| 174 | * |
| 175 | * The sync wakeup differs that the waker knows that it will schedule |
| 176 | * away soon, so while the target thread will be woken up, it will not |
| 177 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 178 | * with each other. This can prevent needless bouncing between CPUs. |
| 179 | * |
| 180 | * On UP it can prevent extra preemption. |
| 181 | * |
Andrea Parri | 7696f99 | 2018-07-16 11:06:03 -0700 | [diff] [blame] | 182 | * If this function wakes up a task, it executes a full memory barrier before |
| 183 | * accessing the task state. |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 184 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 185 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 186 | int nr_exclusive, void *key) |
| 187 | { |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 188 | int wake_flags = 1; /* XXX WF_SYNC */ |
| 189 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 190 | if (unlikely(!wq_head)) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 191 | return; |
| 192 | |
| 193 | if (unlikely(nr_exclusive != 1)) |
| 194 | wake_flags = 0; |
| 195 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 196 | __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 197 | } |
| 198 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 199 | |
| 200 | /* |
| 201 | * __wake_up_sync - see __wake_up_sync_key() |
| 202 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 203 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 204 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 205 | __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 206 | } |
| 207 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 208 | |
| 209 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
| 211 | * because we need a memory barrier there on SMP, so that any |
| 212 | * wake-function that tests for the wait-queue being active |
| 213 | * will be guaranteed to see waitqueue addition _or_ subsequent |
| 214 | * tests in this thread will see the wakeup having taken place. |
| 215 | * |
| 216 | * The spin_unlock() itself is semi-permeable and only protects |
| 217 | * one way (it only protects stuff inside the critical region and |
| 218 | * stops them from bleeding out - it would still allow subsequent |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 219 | * loads to move into the critical region). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 221 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 222 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { |
| 224 | unsigned long flags; |
| 225 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 226 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 227 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 228 | if (list_empty(&wq_entry->entry)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 229 | __add_wait_queue(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 230 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 231 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
| 233 | EXPORT_SYMBOL(prepare_to_wait); |
| 234 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 235 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 236 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | { |
| 238 | unsigned long flags; |
| 239 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 240 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 241 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 242 | if (list_empty(&wq_entry->entry)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 243 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 244 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 245 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
| 247 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
| 248 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 249 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 250 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 251 | wq_entry->flags = flags; |
| 252 | wq_entry->private = current; |
| 253 | wq_entry->func = autoremove_wake_function; |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 254 | INIT_LIST_HEAD(&wq_entry->entry); |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 255 | } |
| 256 | EXPORT_SYMBOL(init_wait_entry); |
| 257 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 258 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 259 | { |
| 260 | unsigned long flags; |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 261 | long ret = 0; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 262 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 263 | spin_lock_irqsave(&wq_head->lock, flags); |
Davidlohr Bueso | 34ec35a | 2019-01-03 15:28:48 -0800 | [diff] [blame] | 264 | if (signal_pending_state(state, current)) { |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 265 | /* |
| 266 | * Exclusive waiter must not fail if it was selected by wakeup, |
| 267 | * it should "consume" the condition we were waiting for. |
| 268 | * |
| 269 | * The caller will recheck the condition and return success if |
| 270 | * we were already woken up, we can not miss the event because |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 271 | * wakeup locks/unlocks the same wq_head->lock. |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 272 | * |
| 273 | * But we need to ensure that set-condition + wakeup after that |
| 274 | * can't see us, it should wake up another exclusive waiter if |
| 275 | * we fail. |
| 276 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 277 | list_del_init(&wq_entry->entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 278 | ret = -ERESTARTSYS; |
| 279 | } else { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 280 | if (list_empty(&wq_entry->entry)) { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 281 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 282 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 283 | else |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 284 | __add_wait_queue(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 285 | } |
| 286 | set_current_state(state); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 287 | } |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 288 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 289 | |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 290 | return ret; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 291 | } |
| 292 | EXPORT_SYMBOL(prepare_to_wait_event); |
| 293 | |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 294 | /* |
| 295 | * Note! These two wait functions are entered with the |
| 296 | * wait-queue lock held (and interrupts off in the _irq |
| 297 | * case), so there is no race with testing the wakeup |
| 298 | * condition in the caller before they add the wait |
| 299 | * entry to the wake queue. |
| 300 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 301 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 302 | { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 303 | if (likely(list_empty(&wait->entry))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 304 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 305 | |
| 306 | set_current_state(TASK_INTERRUPTIBLE); |
| 307 | if (signal_pending(current)) |
| 308 | return -ERESTARTSYS; |
| 309 | |
| 310 | spin_unlock(&wq->lock); |
| 311 | schedule(); |
| 312 | spin_lock(&wq->lock); |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 313 | |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 314 | return 0; |
| 315 | } |
| 316 | EXPORT_SYMBOL(do_wait_intr); |
| 317 | |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 318 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 319 | { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 320 | if (likely(list_empty(&wait->entry))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 321 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 322 | |
| 323 | set_current_state(TASK_INTERRUPTIBLE); |
| 324 | if (signal_pending(current)) |
| 325 | return -ERESTARTSYS; |
| 326 | |
| 327 | spin_unlock_irq(&wq->lock); |
| 328 | schedule(); |
| 329 | spin_lock_irq(&wq->lock); |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 330 | |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 331 | return 0; |
| 332 | } |
| 333 | EXPORT_SYMBOL(do_wait_intr_irq); |
| 334 | |
Randy Dunlap | ee2f154 | 2010-10-26 14:17:25 -0700 | [diff] [blame] | 335 | /** |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 336 | * finish_wait - clean up after waiting in a queue |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 337 | * @wq_head: waitqueue waited on |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 338 | * @wq_entry: wait descriptor |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 339 | * |
| 340 | * Sets current thread back to running state and removes |
| 341 | * the wait descriptor from the given waitqueue if still |
| 342 | * queued. |
| 343 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 344 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | { |
| 346 | unsigned long flags; |
| 347 | |
| 348 | __set_current_state(TASK_RUNNING); |
| 349 | /* |
| 350 | * We can check for list emptiness outside the lock |
| 351 | * IFF: |
| 352 | * - we use the "careful" check that verifies both |
| 353 | * the next and prev pointers, so that there cannot |
| 354 | * be any half-pending updates in progress on other |
| 355 | * CPU's that we haven't seen yet (and that might |
| 356 | * still change the stack area. |
| 357 | * and |
| 358 | * - all other users take the lock (ie we can only |
| 359 | * have _one_ other CPU that looks at or modifies |
| 360 | * the list). |
| 361 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 362 | if (!list_empty_careful(&wq_entry->entry)) { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 363 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 364 | list_del_init(&wq_entry->entry); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 365 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } |
| 367 | } |
| 368 | EXPORT_SYMBOL(finish_wait); |
| 369 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 370 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 372 | int ret = default_wake_function(wq_entry, mode, sync, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
| 374 | if (ret) |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 375 | list_del_init(&wq_entry->entry); |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | return ret; |
| 378 | } |
| 379 | EXPORT_SYMBOL(autoremove_wake_function); |
| 380 | |
Peter Zijlstra | cb6538e | 2014-10-31 11:57:30 +0100 | [diff] [blame] | 381 | static inline bool is_kthread_should_stop(void) |
| 382 | { |
| 383 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); |
| 384 | } |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 385 | |
| 386 | /* |
| 387 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); |
| 388 | * |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 389 | * add_wait_queue(&wq_head, &wait); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 390 | * for (;;) { |
| 391 | * if (condition) |
| 392 | * break; |
| 393 | * |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 394 | * // in wait_woken() // in woken_wake_function() |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 395 | * |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 396 | * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN; |
| 397 | * smp_mb(); // A try_to_wake_up(): |
| 398 | * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier> |
| 399 | * schedule() if (p->state & mode) |
| 400 | * p->state = TASK_RUNNING; p->state = TASK_RUNNING; |
| 401 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~ |
| 402 | * smp_mb(); // B condition = true; |
| 403 | * } smp_mb(); // C |
| 404 | * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 405 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 406 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 407 | { |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 408 | /* |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 409 | * The below executes an smp_mb(), which matches with the full barrier |
| 410 | * executed by the try_to_wake_up() in woken_wake_function() such that |
| 411 | * either we see the store to wq_entry->flags in woken_wake_function() |
| 412 | * or woken_wake_function() sees our store to current->state. |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 413 | */ |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 414 | set_current_state(mode); /* A */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 415 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 416 | timeout = schedule_timeout(timeout); |
| 417 | __set_current_state(TASK_RUNNING); |
| 418 | |
| 419 | /* |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 420 | * The below executes an smp_mb(), which matches with the smp_mb() (C) |
| 421 | * in woken_wake_function() such that either we see the wait condition |
| 422 | * being true or the store to wq_entry->flags in woken_wake_function() |
| 423 | * follows ours in the coherence order. |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 424 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 425 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 426 | |
| 427 | return timeout; |
| 428 | } |
| 429 | EXPORT_SYMBOL(wait_woken); |
| 430 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 431 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 432 | { |
Andrea Parri | 76e079f | 2018-07-16 11:06:01 -0700 | [diff] [blame] | 433 | /* Pairs with the smp_store_mb() in wait_woken(). */ |
| 434 | smp_mb(); /* C */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 435 | wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 436 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 437 | return default_wake_function(wq_entry, mode, sync, key); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 438 | } |
| 439 | EXPORT_SYMBOL(woken_wake_function); |