Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic waiting primitives. |
| 3 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 4 | * (C) 2004 Nadia Yvette Chambers, Oracle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/init.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 7 | #include <linux/export.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 8 | #include <linux/sched/signal.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 9 | #include <linux/sched/debug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/wait.h> |
| 12 | #include <linux/hash.h> |
Peter Zijlstra | cb6538e | 2014-10-31 11:57:30 +0100 | [diff] [blame] | 13 | #include <linux/kthread.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 15 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 16 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 17 | spin_lock_init(&wq_head->lock); |
| 18 | lockdep_set_class_and_name(&wq_head->lock, key, name); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 19 | INIT_LIST_HEAD(&wq_head->head); |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 20 | } |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 21 | |
Peter Zijlstra | 2fc3911 | 2009-08-10 12:33:05 +0100 | [diff] [blame] | 22 | EXPORT_SYMBOL(__init_waitqueue_head); |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 23 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 24 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | { |
| 26 | unsigned long flags; |
| 27 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 28 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 29 | spin_lock_irqsave(&wq_head->lock, flags); |
| 30 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
| 31 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | } |
| 33 | EXPORT_SYMBOL(add_wait_queue); |
| 34 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 35 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
| 37 | unsigned long flags; |
| 38 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 39 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 40 | spin_lock_irqsave(&wq_head->lock, flags); |
| 41 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
| 42 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } |
| 44 | EXPORT_SYMBOL(add_wait_queue_exclusive); |
| 45 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 46 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | { |
| 48 | unsigned long flags; |
| 49 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 50 | spin_lock_irqsave(&wq_head->lock, flags); |
| 51 | __remove_wait_queue(wq_head, wq_entry); |
| 52 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } |
| 54 | EXPORT_SYMBOL(remove_wait_queue); |
| 55 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 56 | /* |
| 57 | * Scan threshold to break wait queue walk. |
| 58 | * This allows a waker to take a break from holding the |
| 59 | * wait queue lock during the wait queue walk. |
| 60 | */ |
| 61 | #define WAITQUEUE_WALK_BREAK_CNT 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
| 63 | /* |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 64 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 65 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
| 66 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 67 | * |
| 68 | * There are circumstances in which we can try to wake a task which has already |
| 69 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
| 70 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 71 | */ |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 72 | static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
| 73 | int nr_exclusive, int wake_flags, void *key, |
| 74 | wait_queue_entry_t *bookmark) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 75 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 76 | wait_queue_entry_t *curr, *next; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 77 | int cnt = 0; |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 78 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 79 | if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { |
| 80 | curr = list_next_entry(bookmark, entry); |
| 81 | |
| 82 | list_del(&bookmark->entry); |
| 83 | bookmark->flags = 0; |
| 84 | } else |
| 85 | curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); |
| 86 | |
| 87 | if (&curr->entry == &wq_head->head) |
| 88 | return nr_exclusive; |
| 89 | |
| 90 | list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 91 | unsigned flags = curr->flags; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 92 | int ret; |
| 93 | |
| 94 | if (flags & WQ_FLAG_BOOKMARK) |
| 95 | continue; |
| 96 | |
| 97 | ret = curr->func(curr, mode, wake_flags, key); |
Linus Torvalds | 3510ca2 | 2017-08-27 13:55:12 -0700 | [diff] [blame] | 98 | if (ret < 0) |
| 99 | break; |
| 100 | if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 101 | break; |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 102 | |
| 103 | if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && |
| 104 | (&next->entry != &wq_head->head)) { |
| 105 | bookmark->flags = WQ_FLAG_BOOKMARK; |
| 106 | list_add_tail(&bookmark->entry, &next->entry); |
| 107 | break; |
| 108 | } |
| 109 | } |
| 110 | return nr_exclusive; |
| 111 | } |
| 112 | |
| 113 | static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, |
| 114 | int nr_exclusive, int wake_flags, void *key) |
| 115 | { |
| 116 | unsigned long flags; |
| 117 | wait_queue_entry_t bookmark; |
| 118 | |
| 119 | bookmark.flags = 0; |
| 120 | bookmark.private = NULL; |
| 121 | bookmark.func = NULL; |
| 122 | INIT_LIST_HEAD(&bookmark.entry); |
| 123 | |
| 124 | spin_lock_irqsave(&wq_head->lock, flags); |
| 125 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); |
| 126 | spin_unlock_irqrestore(&wq_head->lock, flags); |
| 127 | |
| 128 | while (bookmark.flags & WQ_FLAG_BOOKMARK) { |
| 129 | spin_lock_irqsave(&wq_head->lock, flags); |
| 130 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, |
| 131 | wake_flags, key, &bookmark); |
| 132 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 133 | } |
| 134 | } |
| 135 | |
| 136 | /** |
| 137 | * __wake_up - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 138 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 139 | * @mode: which threads |
| 140 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 141 | * @key: is directly passed to the wakeup function |
| 142 | * |
| 143 | * It may be assumed that this function implies a write memory barrier before |
| 144 | * changing the task state if and only if any tasks are woken up. |
| 145 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 146 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 147 | int nr_exclusive, void *key) |
| 148 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 149 | __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 150 | } |
| 151 | EXPORT_SYMBOL(__wake_up); |
| 152 | |
| 153 | /* |
| 154 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 155 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 156 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 157 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 158 | __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 159 | } |
| 160 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
| 161 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 162 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 163 | { |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 164 | __wake_up_common(wq_head, mode, 1, 0, key, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 165 | } |
| 166 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); |
| 167 | |
Tim Chen | 11a19c7 | 2017-08-25 09:13:55 -0700 | [diff] [blame^] | 168 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
| 169 | unsigned int mode, void *key, wait_queue_entry_t *bookmark) |
| 170 | { |
| 171 | __wake_up_common(wq_head, mode, 1, 0, key, bookmark); |
| 172 | } |
| 173 | EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); |
| 174 | |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 175 | /** |
| 176 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 177 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 178 | * @mode: which threads |
| 179 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 180 | * @key: opaque value to be passed to wakeup targets |
| 181 | * |
| 182 | * The sync wakeup differs that the waker knows that it will schedule |
| 183 | * away soon, so while the target thread will be woken up, it will not |
| 184 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 185 | * with each other. This can prevent needless bouncing between CPUs. |
| 186 | * |
| 187 | * On UP it can prevent extra preemption. |
| 188 | * |
| 189 | * It may be assumed that this function implies a write memory barrier before |
| 190 | * changing the task state if and only if any tasks are woken up. |
| 191 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 192 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 193 | int nr_exclusive, void *key) |
| 194 | { |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 195 | int wake_flags = 1; /* XXX WF_SYNC */ |
| 196 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 197 | if (unlikely(!wq_head)) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 198 | return; |
| 199 | |
| 200 | if (unlikely(nr_exclusive != 1)) |
| 201 | wake_flags = 0; |
| 202 | |
Tim Chen | 2554db9 | 2017-08-25 09:13:54 -0700 | [diff] [blame] | 203 | __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 204 | } |
| 205 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 206 | |
| 207 | /* |
| 208 | * __wake_up_sync - see __wake_up_sync_key() |
| 209 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 210 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 211 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 212 | __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 213 | } |
| 214 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 215 | |
| 216 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
| 218 | * because we need a memory barrier there on SMP, so that any |
| 219 | * wake-function that tests for the wait-queue being active |
| 220 | * will be guaranteed to see waitqueue addition _or_ subsequent |
| 221 | * tests in this thread will see the wakeup having taken place. |
| 222 | * |
| 223 | * The spin_unlock() itself is semi-permeable and only protects |
| 224 | * one way (it only protects stuff inside the critical region and |
| 225 | * stops them from bleeding out - it would still allow subsequent |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 226 | * loads to move into the critical region). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 228 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 229 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | { |
| 231 | unsigned long flags; |
| 232 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 233 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 234 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 235 | if (list_empty(&wq_entry->entry)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 236 | __add_wait_queue(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 237 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 238 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } |
| 240 | EXPORT_SYMBOL(prepare_to_wait); |
| 241 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 242 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 243 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | { |
| 245 | unsigned long flags; |
| 246 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 247 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 248 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 249 | if (list_empty(&wq_entry->entry)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 250 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 251 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 252 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | } |
| 254 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
| 255 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 256 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 257 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 258 | wq_entry->flags = flags; |
| 259 | wq_entry->private = current; |
| 260 | wq_entry->func = autoremove_wake_function; |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 261 | INIT_LIST_HEAD(&wq_entry->entry); |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 262 | } |
| 263 | EXPORT_SYMBOL(init_wait_entry); |
| 264 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 265 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 266 | { |
| 267 | unsigned long flags; |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 268 | long ret = 0; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 269 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 270 | spin_lock_irqsave(&wq_head->lock, flags); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 271 | if (unlikely(signal_pending_state(state, current))) { |
| 272 | /* |
| 273 | * Exclusive waiter must not fail if it was selected by wakeup, |
| 274 | * it should "consume" the condition we were waiting for. |
| 275 | * |
| 276 | * The caller will recheck the condition and return success if |
| 277 | * we were already woken up, we can not miss the event because |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 278 | * wakeup locks/unlocks the same wq_head->lock. |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 279 | * |
| 280 | * But we need to ensure that set-condition + wakeup after that |
| 281 | * can't see us, it should wake up another exclusive waiter if |
| 282 | * we fail. |
| 283 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 284 | list_del_init(&wq_entry->entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 285 | ret = -ERESTARTSYS; |
| 286 | } else { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 287 | if (list_empty(&wq_entry->entry)) { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 288 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 289 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 290 | else |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 291 | __add_wait_queue(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 292 | } |
| 293 | set_current_state(state); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 294 | } |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 295 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 296 | |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 297 | return ret; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 298 | } |
| 299 | EXPORT_SYMBOL(prepare_to_wait_event); |
| 300 | |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 301 | /* |
| 302 | * Note! These two wait functions are entered with the |
| 303 | * wait-queue lock held (and interrupts off in the _irq |
| 304 | * case), so there is no race with testing the wakeup |
| 305 | * condition in the caller before they add the wait |
| 306 | * entry to the wake queue. |
| 307 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 308 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 309 | { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 310 | if (likely(list_empty(&wait->entry))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 311 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 312 | |
| 313 | set_current_state(TASK_INTERRUPTIBLE); |
| 314 | if (signal_pending(current)) |
| 315 | return -ERESTARTSYS; |
| 316 | |
| 317 | spin_unlock(&wq->lock); |
| 318 | schedule(); |
| 319 | spin_lock(&wq->lock); |
| 320 | return 0; |
| 321 | } |
| 322 | EXPORT_SYMBOL(do_wait_intr); |
| 323 | |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 324 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 325 | { |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 326 | if (likely(list_empty(&wait->entry))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 327 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 328 | |
| 329 | set_current_state(TASK_INTERRUPTIBLE); |
| 330 | if (signal_pending(current)) |
| 331 | return -ERESTARTSYS; |
| 332 | |
| 333 | spin_unlock_irq(&wq->lock); |
| 334 | schedule(); |
| 335 | spin_lock_irq(&wq->lock); |
| 336 | return 0; |
| 337 | } |
| 338 | EXPORT_SYMBOL(do_wait_intr_irq); |
| 339 | |
Randy Dunlap | ee2f154 | 2010-10-26 14:17:25 -0700 | [diff] [blame] | 340 | /** |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 341 | * finish_wait - clean up after waiting in a queue |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 342 | * @wq_head: waitqueue waited on |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 343 | * @wq_entry: wait descriptor |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 344 | * |
| 345 | * Sets current thread back to running state and removes |
| 346 | * the wait descriptor from the given waitqueue if still |
| 347 | * queued. |
| 348 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 349 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | { |
| 351 | unsigned long flags; |
| 352 | |
| 353 | __set_current_state(TASK_RUNNING); |
| 354 | /* |
| 355 | * We can check for list emptiness outside the lock |
| 356 | * IFF: |
| 357 | * - we use the "careful" check that verifies both |
| 358 | * the next and prev pointers, so that there cannot |
| 359 | * be any half-pending updates in progress on other |
| 360 | * CPU's that we haven't seen yet (and that might |
| 361 | * still change the stack area. |
| 362 | * and |
| 363 | * - all other users take the lock (ie we can only |
| 364 | * have _one_ other CPU that looks at or modifies |
| 365 | * the list). |
| 366 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 367 | if (!list_empty_careful(&wq_entry->entry)) { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 368 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 369 | list_del_init(&wq_entry->entry); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 370 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | } |
| 372 | } |
| 373 | EXPORT_SYMBOL(finish_wait); |
| 374 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 375 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 377 | int ret = default_wake_function(wq_entry, mode, sync, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | |
| 379 | if (ret) |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 380 | list_del_init(&wq_entry->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | return ret; |
| 382 | } |
| 383 | EXPORT_SYMBOL(autoremove_wake_function); |
| 384 | |
Peter Zijlstra | cb6538e | 2014-10-31 11:57:30 +0100 | [diff] [blame] | 385 | static inline bool is_kthread_should_stop(void) |
| 386 | { |
| 387 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); |
| 388 | } |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 389 | |
| 390 | /* |
| 391 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); |
| 392 | * |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 393 | * add_wait_queue(&wq_head, &wait); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 394 | * for (;;) { |
| 395 | * if (condition) |
| 396 | * break; |
| 397 | * |
| 398 | * p->state = mode; condition = true; |
| 399 | * smp_mb(); // A smp_wmb(); // C |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 400 | * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 401 | * schedule() try_to_wake_up(); |
| 402 | * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 403 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 404 | * smp_mb() // B smp_wmb(); // C |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 405 | * wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 406 | * } |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame] | 407 | * remove_wait_queue(&wq_head, &wait); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 408 | * |
| 409 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 410 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 411 | { |
| 412 | set_current_state(mode); /* A */ |
| 413 | /* |
| 414 | * The above implies an smp_mb(), which matches with the smp_wmb() from |
| 415 | * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must |
| 416 | * also observe all state before the wakeup. |
| 417 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 418 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 419 | timeout = schedule_timeout(timeout); |
| 420 | __set_current_state(TASK_RUNNING); |
| 421 | |
| 422 | /* |
| 423 | * The below implies an smp_mb(), it too pairs with the smp_wmb() from |
| 424 | * woken_wake_function() such that we must either observe the wait |
| 425 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss |
| 426 | * an event. |
| 427 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 428 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 429 | |
| 430 | return timeout; |
| 431 | } |
| 432 | EXPORT_SYMBOL(wait_woken); |
| 433 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 434 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 435 | { |
| 436 | /* |
| 437 | * Although this function is called under waitqueue lock, LOCK |
| 438 | * doesn't imply write barrier and the users expects write |
| 439 | * barrier semantics on wakeup functions. The following |
| 440 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 441 | * and is paired with smp_store_mb() in wait_woken(). |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 442 | */ |
| 443 | smp_wmb(); /* C */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 444 | wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 445 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 446 | return default_wake_function(wq_entry, mode, sync, key); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 447 | } |
| 448 | EXPORT_SYMBOL(woken_wake_function); |