Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic waiting primitives. |
| 3 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 4 | * (C) 2004 Nadia Yvette Chambers, Oracle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/init.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 7 | #include <linux/export.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 8 | #include <linux/sched/signal.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 9 | #include <linux/sched/debug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/wait.h> |
| 12 | #include <linux/hash.h> |
Peter Zijlstra | cb6538e | 2014-10-31 11:57:30 +0100 | [diff] [blame] | 13 | #include <linux/kthread.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 15 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 16 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 17 | spin_lock_init(&wq_head->lock); |
| 18 | lockdep_set_class_and_name(&wq_head->lock, key, name); |
| 19 | INIT_LIST_HEAD(&wq_head->task_list); |
Ingo Molnar | 21d71f5 | 2006-07-10 04:45:32 -0700 | [diff] [blame] | 20 | } |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 21 | |
Peter Zijlstra | 2fc3911 | 2009-08-10 12:33:05 +0100 | [diff] [blame] | 22 | EXPORT_SYMBOL(__init_waitqueue_head); |
Ingo Molnar | eb4542b | 2006-07-03 00:25:07 -0700 | [diff] [blame] | 23 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 24 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | { |
| 26 | unsigned long flags; |
| 27 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 28 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 29 | spin_lock_irqsave(&wq_head->lock, flags); |
| 30 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
| 31 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | } |
| 33 | EXPORT_SYMBOL(add_wait_queue); |
| 34 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 35 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
| 37 | unsigned long flags; |
| 38 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 39 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 40 | spin_lock_irqsave(&wq_head->lock, flags); |
| 41 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
| 42 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } |
| 44 | EXPORT_SYMBOL(add_wait_queue_exclusive); |
| 45 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 46 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | { |
| 48 | unsigned long flags; |
| 49 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 50 | spin_lock_irqsave(&wq_head->lock, flags); |
| 51 | __remove_wait_queue(wq_head, wq_entry); |
| 52 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } |
| 54 | EXPORT_SYMBOL(remove_wait_queue); |
| 55 | |
| 56 | |
| 57 | /* |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 58 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 59 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
| 60 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 61 | * |
| 62 | * There are circumstances in which we can try to wake a task which has already |
| 63 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
| 64 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 65 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 66 | static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 67 | int nr_exclusive, int wake_flags, void *key) |
| 68 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 69 | wait_queue_entry_t *curr, *next; |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 70 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 71 | list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) { |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 72 | unsigned flags = curr->flags; |
| 73 | |
| 74 | if (curr->func(curr, mode, wake_flags, key) && |
| 75 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
| 76 | break; |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | /** |
| 81 | * __wake_up - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 82 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 83 | * @mode: which threads |
| 84 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 85 | * @key: is directly passed to the wakeup function |
| 86 | * |
| 87 | * It may be assumed that this function implies a write memory barrier before |
| 88 | * changing the task state if and only if any tasks are woken up. |
| 89 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 90 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 91 | int nr_exclusive, void *key) |
| 92 | { |
| 93 | unsigned long flags; |
| 94 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 95 | spin_lock_irqsave(&wq_head->lock, flags); |
| 96 | __wake_up_common(wq_head, mode, nr_exclusive, 0, key); |
| 97 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 98 | } |
| 99 | EXPORT_SYMBOL(__wake_up); |
| 100 | |
| 101 | /* |
| 102 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 103 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 104 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 105 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 106 | __wake_up_common(wq_head, mode, nr, 0, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 107 | } |
| 108 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
| 109 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 110 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 111 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 112 | __wake_up_common(wq_head, mode, 1, 0, key); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 113 | } |
| 114 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); |
| 115 | |
| 116 | /** |
| 117 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 118 | * @wq_head: the waitqueue |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 119 | * @mode: which threads |
| 120 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 121 | * @key: opaque value to be passed to wakeup targets |
| 122 | * |
| 123 | * The sync wakeup differs that the waker knows that it will schedule |
| 124 | * away soon, so while the target thread will be woken up, it will not |
| 125 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 126 | * with each other. This can prevent needless bouncing between CPUs. |
| 127 | * |
| 128 | * On UP it can prevent extra preemption. |
| 129 | * |
| 130 | * It may be assumed that this function implies a write memory barrier before |
| 131 | * changing the task state if and only if any tasks are woken up. |
| 132 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 133 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 134 | int nr_exclusive, void *key) |
| 135 | { |
| 136 | unsigned long flags; |
| 137 | int wake_flags = 1; /* XXX WF_SYNC */ |
| 138 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 139 | if (unlikely(!wq_head)) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 140 | return; |
| 141 | |
| 142 | if (unlikely(nr_exclusive != 1)) |
| 143 | wake_flags = 0; |
| 144 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 145 | spin_lock_irqsave(&wq_head->lock, flags); |
| 146 | __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key); |
| 147 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 148 | } |
| 149 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 150 | |
| 151 | /* |
| 152 | * __wake_up_sync - see __wake_up_sync_key() |
| 153 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 154 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 155 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 156 | __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
Peter Zijlstra | b414587 | 2013-10-04 17:24:35 +0200 | [diff] [blame] | 157 | } |
| 158 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 159 | |
| 160 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
| 162 | * because we need a memory barrier there on SMP, so that any |
| 163 | * wake-function that tests for the wait-queue being active |
| 164 | * will be guaranteed to see waitqueue addition _or_ subsequent |
| 165 | * tests in this thread will see the wakeup having taken place. |
| 166 | * |
| 167 | * The spin_unlock() itself is semi-permeable and only protects |
| 168 | * one way (it only protects stuff inside the critical region and |
| 169 | * stops them from bleeding out - it would still allow subsequent |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 170 | * loads to move into the critical region). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 172 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 173 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | { |
| 175 | unsigned long flags; |
| 176 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 177 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 178 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 179 | if (list_empty(&wq_entry->task_list)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 180 | __add_wait_queue(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 181 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 182 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
| 184 | EXPORT_SYMBOL(prepare_to_wait); |
| 185 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 186 | void |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 187 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
| 189 | unsigned long flags; |
| 190 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 191 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 192 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 193 | if (list_empty(&wq_entry->task_list)) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 194 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Tejun Heo | a25d644 | 2008-10-15 22:01:38 -0700 | [diff] [blame] | 195 | set_current_state(state); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 196 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | } |
| 198 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
| 199 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 200 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 201 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 202 | wq_entry->flags = flags; |
| 203 | wq_entry->private = current; |
| 204 | wq_entry->func = autoremove_wake_function; |
| 205 | INIT_LIST_HEAD(&wq_entry->task_list); |
Oleg Nesterov | 0176bea | 2016-09-06 16:00:55 +0200 | [diff] [blame] | 206 | } |
| 207 | EXPORT_SYMBOL(init_wait_entry); |
| 208 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 209 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 210 | { |
| 211 | unsigned long flags; |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 212 | long ret = 0; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 213 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 214 | spin_lock_irqsave(&wq_head->lock, flags); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 215 | if (unlikely(signal_pending_state(state, current))) { |
| 216 | /* |
| 217 | * Exclusive waiter must not fail if it was selected by wakeup, |
| 218 | * it should "consume" the condition we were waiting for. |
| 219 | * |
| 220 | * The caller will recheck the condition and return success if |
| 221 | * we were already woken up, we can not miss the event because |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 222 | * wakeup locks/unlocks the same wq_head->lock. |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 223 | * |
| 224 | * But we need to ensure that set-condition + wakeup after that |
| 225 | * can't see us, it should wake up another exclusive waiter if |
| 226 | * we fail. |
| 227 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 228 | list_del_init(&wq_entry->task_list); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 229 | ret = -ERESTARTSYS; |
| 230 | } else { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 231 | if (list_empty(&wq_entry->task_list)) { |
| 232 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 233 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 234 | else |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 235 | __add_wait_queue(wq_head, wq_entry); |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 236 | } |
| 237 | set_current_state(state); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 238 | } |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 239 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 240 | |
Oleg Nesterov | b1ea06a | 2016-09-08 18:48:15 +0200 | [diff] [blame] | 241 | return ret; |
Oleg Nesterov | c2d8164 | 2013-10-07 18:18:24 +0200 | [diff] [blame] | 242 | } |
| 243 | EXPORT_SYMBOL(prepare_to_wait_event); |
| 244 | |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 245 | /* |
| 246 | * Note! These two wait functions are entered with the |
| 247 | * wait-queue lock held (and interrupts off in the _irq |
| 248 | * case), so there is no race with testing the wakeup |
| 249 | * condition in the caller before they add the wait |
| 250 | * entry to the wake queue. |
| 251 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 252 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 253 | { |
| 254 | if (likely(list_empty(&wait->task_list))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 255 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 256 | |
| 257 | set_current_state(TASK_INTERRUPTIBLE); |
| 258 | if (signal_pending(current)) |
| 259 | return -ERESTARTSYS; |
| 260 | |
| 261 | spin_unlock(&wq->lock); |
| 262 | schedule(); |
| 263 | spin_lock(&wq->lock); |
| 264 | return 0; |
| 265 | } |
| 266 | EXPORT_SYMBOL(do_wait_intr); |
| 267 | |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 268 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 269 | { |
| 270 | if (likely(list_empty(&wait->task_list))) |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 271 | __add_wait_queue_entry_tail(wq, wait); |
Linus Torvalds | bd0f9b3 | 2017-03-07 15:33:14 -0800 | [diff] [blame] | 272 | |
| 273 | set_current_state(TASK_INTERRUPTIBLE); |
| 274 | if (signal_pending(current)) |
| 275 | return -ERESTARTSYS; |
| 276 | |
| 277 | spin_unlock_irq(&wq->lock); |
| 278 | schedule(); |
| 279 | spin_lock_irq(&wq->lock); |
| 280 | return 0; |
| 281 | } |
| 282 | EXPORT_SYMBOL(do_wait_intr_irq); |
| 283 | |
Randy Dunlap | ee2f154 | 2010-10-26 14:17:25 -0700 | [diff] [blame] | 284 | /** |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 285 | * finish_wait - clean up after waiting in a queue |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 286 | * @wq_head: waitqueue waited on |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 287 | * @wq_entry: wait descriptor |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 288 | * |
| 289 | * Sets current thread back to running state and removes |
| 290 | * the wait descriptor from the given waitqueue if still |
| 291 | * queued. |
| 292 | */ |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 293 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | { |
| 295 | unsigned long flags; |
| 296 | |
| 297 | __set_current_state(TASK_RUNNING); |
| 298 | /* |
| 299 | * We can check for list emptiness outside the lock |
| 300 | * IFF: |
| 301 | * - we use the "careful" check that verifies both |
| 302 | * the next and prev pointers, so that there cannot |
| 303 | * be any half-pending updates in progress on other |
| 304 | * CPU's that we haven't seen yet (and that might |
| 305 | * still change the stack area. |
| 306 | * and |
| 307 | * - all other users take the lock (ie we can only |
| 308 | * have _one_ other CPU that looks at or modifies |
| 309 | * the list). |
| 310 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 311 | if (!list_empty_careful(&wq_entry->task_list)) { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 312 | spin_lock_irqsave(&wq_head->lock, flags); |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 313 | list_del_init(&wq_entry->task_list); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 314 | spin_unlock_irqrestore(&wq_head->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } |
| 316 | } |
| 317 | EXPORT_SYMBOL(finish_wait); |
| 318 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 319 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | { |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 321 | int ret = default_wake_function(wq_entry, mode, sync, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
| 323 | if (ret) |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 324 | list_del_init(&wq_entry->task_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | return ret; |
| 326 | } |
| 327 | EXPORT_SYMBOL(autoremove_wake_function); |
| 328 | |
Peter Zijlstra | cb6538e | 2014-10-31 11:57:30 +0100 | [diff] [blame] | 329 | static inline bool is_kthread_should_stop(void) |
| 330 | { |
| 331 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); |
| 332 | } |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 333 | |
| 334 | /* |
| 335 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); |
| 336 | * |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 337 | * add_wait_queue(&wq_head, &wait); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 338 | * for (;;) { |
| 339 | * if (condition) |
| 340 | * break; |
| 341 | * |
| 342 | * p->state = mode; condition = true; |
| 343 | * smp_mb(); // A smp_wmb(); // C |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 344 | * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 345 | * schedule() try_to_wake_up(); |
| 346 | * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 347 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 348 | * smp_mb() // B smp_wmb(); // C |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 349 | * wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 350 | * } |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 351 | * remove_wait_queue(&wq_head, &wait); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 352 | * |
| 353 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 354 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 355 | { |
| 356 | set_current_state(mode); /* A */ |
| 357 | /* |
| 358 | * The above implies an smp_mb(), which matches with the smp_wmb() from |
| 359 | * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must |
| 360 | * also observe all state before the wakeup. |
| 361 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 362 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 363 | timeout = schedule_timeout(timeout); |
| 364 | __set_current_state(TASK_RUNNING); |
| 365 | |
| 366 | /* |
| 367 | * The below implies an smp_mb(), it too pairs with the smp_wmb() from |
| 368 | * woken_wake_function() such that we must either observe the wait |
| 369 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss |
| 370 | * an event. |
| 371 | */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 372 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 373 | |
| 374 | return timeout; |
| 375 | } |
| 376 | EXPORT_SYMBOL(wait_woken); |
| 377 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 378 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 379 | { |
| 380 | /* |
| 381 | * Although this function is called under waitqueue lock, LOCK |
| 382 | * doesn't imply write barrier and the users expects write |
| 383 | * barrier semantics on wakeup functions. The following |
| 384 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 385 | * and is paired with smp_store_mb() in wait_woken(). |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 386 | */ |
| 387 | smp_wmb(); /* C */ |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 388 | wq_entry->flags |= WQ_FLAG_WOKEN; |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 389 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 390 | return default_wake_function(wq_entry, mode, sync, key); |
Peter Zijlstra | 61ada52 | 2014-09-24 10:18:47 +0200 | [diff] [blame] | 391 | } |
| 392 | EXPORT_SYMBOL(woken_wake_function); |
| 393 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 394 | int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { |
| 396 | struct wait_bit_key *key = arg; |
| 397 | struct wait_bit_queue *wait_bit |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 398 | = container_of(wq_entry, struct wait_bit_queue, wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
| 400 | if (wait_bit->key.flags != key->flags || |
| 401 | wait_bit->key.bit_nr != key->bit_nr || |
| 402 | test_bit(key->bit_nr, key->flags)) |
| 403 | return 0; |
| 404 | else |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 405 | return autoremove_wake_function(wq_entry, mode, sync, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |
| 407 | EXPORT_SYMBOL(wake_bit_function); |
| 408 | |
| 409 | /* |
| 410 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) |
| 411 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are |
| 412 | * permitted return codes. Nonzero return codes halt waiting and return. |
| 413 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 414 | int __sched |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 415 | __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue *q, |
NeilBrown | c122132 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 416 | wait_bit_action_f *action, unsigned mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | { |
| 418 | int ret = 0; |
| 419 | |
| 420 | do { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 421 | prepare_to_wait(wq_head, &q->wait, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | if (test_bit(q->key.bit_nr, q->key.flags)) |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 423 | ret = (*action)(&q->key, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 425 | finish_wait(wq_head, &q->wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | return ret; |
| 427 | } |
| 428 | EXPORT_SYMBOL(__wait_on_bit); |
| 429 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 430 | int __sched out_of_line_wait_on_bit(void *word, int bit, |
NeilBrown | c122132 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 431 | wait_bit_action_f *action, unsigned mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 433 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | DEFINE_WAIT_BIT(wait, word, bit); |
| 435 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 436 | return __wait_on_bit(wq_head, &wait, action, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | } |
| 438 | EXPORT_SYMBOL(out_of_line_wait_on_bit); |
| 439 | |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 440 | int __sched out_of_line_wait_on_bit_timeout( |
| 441 | void *word, int bit, wait_bit_action_f *action, |
| 442 | unsigned mode, unsigned long timeout) |
| 443 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 444 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 445 | DEFINE_WAIT_BIT(wait, word, bit); |
| 446 | |
| 447 | wait.key.timeout = jiffies + timeout; |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 448 | return __wait_on_bit(wq_head, &wait, action, mode); |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 449 | } |
| 450 | EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); |
| 451 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 452 | int __sched |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 453 | __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue *q, |
NeilBrown | c122132 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 454 | wait_bit_action_f *action, unsigned mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | { |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 456 | int ret = 0; |
Johannes Weiner | 777c6c5 | 2009-02-04 15:12:14 -0800 | [diff] [blame] | 457 | |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 458 | for (;;) { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 459 | prepare_to_wait_exclusive(wq_head, &q->wait, mode); |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 460 | if (test_bit(q->key.bit_nr, q->key.flags)) { |
| 461 | ret = action(&q->key, mode); |
| 462 | /* |
| 463 | * See the comment in prepare_to_wait_event(). |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 464 | * finish_wait() does not necessarily takes wwq_head->lock, |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 465 | * but test_and_set_bit() implies mb() which pairs with |
| 466 | * smp_mb__after_atomic() before wake_up_page(). |
| 467 | */ |
| 468 | if (ret) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 469 | finish_wait(wq_head, &q->wait); |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 470 | } |
| 471 | if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) { |
| 472 | if (!ret) |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 473 | finish_wait(wq_head, &q->wait); |
Oleg Nesterov | eaf9ef5 | 2016-09-06 16:00:53 +0200 | [diff] [blame] | 474 | return 0; |
| 475 | } else if (ret) { |
| 476 | return ret; |
| 477 | } |
| 478 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | } |
| 480 | EXPORT_SYMBOL(__wait_on_bit_lock); |
| 481 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 482 | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, |
NeilBrown | c122132 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 483 | wait_bit_action_f *action, unsigned mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 485 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | DEFINE_WAIT_BIT(wait, word, bit); |
| 487 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 488 | return __wait_on_bit_lock(wq_head, &wait, action, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | } |
| 490 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); |
| 491 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 492 | void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { |
| 494 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 495 | if (waitqueue_active(wq_head)) |
| 496 | __wake_up(wq_head, TASK_NORMAL, 1, &key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | EXPORT_SYMBOL(__wake_up_bit); |
| 499 | |
| 500 | /** |
| 501 | * wake_up_bit - wake up a waiter on a bit |
| 502 | * @word: the word being waited on, a kernel virtual address |
| 503 | * @bit: the bit of the word being waited on |
| 504 | * |
| 505 | * There is a standard hashed waitqueue table for generic use. This |
| 506 | * is the part of the hashtable's accessor API that wakes up waiters |
| 507 | * on a bit. For instance, if one were to have waiters on a bitflag, |
| 508 | * one would call wake_up_bit() after clearing the bit. |
| 509 | * |
| 510 | * In order for this to function properly, as it uses waitqueue_active() |
| 511 | * internally, some kind of memory barrier must be done prior to calling |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 512 | * this. Typically, this will be smp_mb__after_atomic(), but in some |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | * cases where bitflags are manipulated non-atomically under a lock, one |
| 514 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), |
| 515 | * because spin_unlock() does not guarantee a memory barrier. |
| 516 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 517 | void wake_up_bit(void *word, int bit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | { |
| 519 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); |
| 520 | } |
| 521 | EXPORT_SYMBOL(wake_up_bit); |
| 522 | |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 523 | /* |
| 524 | * Manipulate the atomic_t address to produce a better bit waitqueue table hash |
| 525 | * index (we're keying off bit -1, but that would produce a horrible hash |
| 526 | * value). |
| 527 | */ |
| 528 | static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) |
| 529 | { |
| 530 | if (BITS_PER_LONG == 64) { |
| 531 | unsigned long q = (unsigned long)p; |
| 532 | return bit_waitqueue((void *)(q & ~1), q & 1); |
| 533 | } |
| 534 | return bit_waitqueue(p, 0); |
| 535 | } |
| 536 | |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 537 | static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 538 | void *arg) |
| 539 | { |
| 540 | struct wait_bit_key *key = arg; |
| 541 | struct wait_bit_queue *wait_bit |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 542 | = container_of(wq_entry, struct wait_bit_queue, wait); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 543 | atomic_t *val = key->flags; |
| 544 | |
| 545 | if (wait_bit->key.flags != key->flags || |
| 546 | wait_bit->key.bit_nr != key->bit_nr || |
| 547 | atomic_read(val) != 0) |
| 548 | return 0; |
Ingo Molnar | 50816c4 | 2017-03-05 10:33:16 +0100 | [diff] [blame] | 549 | return autoremove_wake_function(wq_entry, mode, sync, key); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 550 | } |
| 551 | |
| 552 | /* |
| 553 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, |
| 554 | * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero |
| 555 | * return codes halt waiting and return. |
| 556 | */ |
| 557 | static __sched |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 558 | int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue *q, |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 559 | int (*action)(atomic_t *), unsigned mode) |
| 560 | { |
| 561 | atomic_t *val; |
| 562 | int ret = 0; |
| 563 | |
| 564 | do { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 565 | prepare_to_wait(wq_head, &q->wait, mode); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 566 | val = q->key.flags; |
| 567 | if (atomic_read(val) == 0) |
David Howells | 42577ca | 2013-07-23 16:49:24 +0100 | [diff] [blame] | 568 | break; |
| 569 | ret = (*action)(val); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 570 | } while (!ret && atomic_read(val) != 0); |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 571 | finish_wait(wq_head, &q->wait); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 572 | return ret; |
| 573 | } |
| 574 | |
| 575 | #define DEFINE_WAIT_ATOMIC_T(name, p) \ |
| 576 | struct wait_bit_queue name = { \ |
| 577 | .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ |
| 578 | .wait = { \ |
| 579 | .private = current, \ |
| 580 | .func = wake_atomic_t_function, \ |
| 581 | .task_list = \ |
| 582 | LIST_HEAD_INIT((name).wait.task_list), \ |
| 583 | }, \ |
| 584 | } |
| 585 | |
| 586 | __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), |
| 587 | unsigned mode) |
| 588 | { |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 589 | struct wait_queue_head *wq_head = atomic_t_waitqueue(p); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 590 | DEFINE_WAIT_ATOMIC_T(wait, p); |
| 591 | |
Ingo Molnar | 9d9d676 | 2017-03-05 11:10:18 +0100 | [diff] [blame^] | 592 | return __wait_on_atomic_t(wq_head, &wait, action, mode); |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 593 | } |
| 594 | EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); |
| 595 | |
| 596 | /** |
| 597 | * wake_up_atomic_t - Wake up a waiter on a atomic_t |
Randy Dunlap | 2203547 | 2013-08-18 20:08:07 -0700 | [diff] [blame] | 598 | * @p: The atomic_t being waited on, a kernel virtual address |
David Howells | cb65537 | 2013-05-10 19:50:26 +0100 | [diff] [blame] | 599 | * |
| 600 | * Wake up anyone waiting for the atomic_t to go to zero. |
| 601 | * |
| 602 | * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t |
| 603 | * check is done by the waiter's wake function, not the by the waker itself). |
| 604 | */ |
| 605 | void wake_up_atomic_t(atomic_t *p) |
| 606 | { |
| 607 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); |
| 608 | } |
| 609 | EXPORT_SYMBOL(wake_up_atomic_t); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 610 | |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 611 | __sched int bit_wait(struct wait_bit_key *word, int mode) |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 612 | { |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 613 | schedule(); |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 614 | if (signal_pending_state(mode, current)) |
Peter Zijlstra | 6898563 | 2015-12-01 14:04:04 +0100 | [diff] [blame] | 615 | return -EINTR; |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 616 | return 0; |
| 617 | } |
| 618 | EXPORT_SYMBOL(bit_wait); |
| 619 | |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 620 | __sched int bit_wait_io(struct wait_bit_key *word, int mode) |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 621 | { |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 622 | io_schedule(); |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 623 | if (signal_pending_state(mode, current)) |
Peter Zijlstra | 6898563 | 2015-12-01 14:04:04 +0100 | [diff] [blame] | 624 | return -EINTR; |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 625 | return 0; |
| 626 | } |
| 627 | EXPORT_SYMBOL(bit_wait_io); |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 628 | |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 629 | __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 630 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 631 | unsigned long now = READ_ONCE(jiffies); |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 632 | if (time_after_eq(now, word->timeout)) |
| 633 | return -EAGAIN; |
| 634 | schedule_timeout(word->timeout - now); |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 635 | if (signal_pending_state(mode, current)) |
Peter Zijlstra | 6898563 | 2015-12-01 14:04:04 +0100 | [diff] [blame] | 636 | return -EINTR; |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 637 | return 0; |
| 638 | } |
| 639 | EXPORT_SYMBOL_GPL(bit_wait_timeout); |
| 640 | |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 641 | __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 642 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 643 | unsigned long now = READ_ONCE(jiffies); |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 644 | if (time_after_eq(now, word->timeout)) |
| 645 | return -EAGAIN; |
| 646 | io_schedule_timeout(word->timeout - now); |
Peter Zijlstra | dfd01f0 | 2015-12-13 22:11:16 +0100 | [diff] [blame] | 647 | if (signal_pending_state(mode, current)) |
Peter Zijlstra | 6898563 | 2015-12-01 14:04:04 +0100 | [diff] [blame] | 648 | return -EINTR; |
NeilBrown | cbbce82 | 2014-09-25 13:55:19 +1000 | [diff] [blame] | 649 | return 0; |
| 650 | } |
| 651 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); |