blob: 203aeea96f1678e02a002a45e8c896b2d67e84b0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic waiting primitives.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * (C) 2004 Nadia Yvette Chambers, Oracle
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/init.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04007#include <linux/export.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +01008#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01009#include <linux/sched/debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11#include <linux/wait.h>
12#include <linux/hash.h>
Peter Zijlstracb6538e2014-10-31 11:57:30 +010013#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Ingo Molnar9d9d6762017-03-05 11:10:18 +010015void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
Ingo Molnar21d71f52006-07-10 04:45:32 -070016{
Ingo Molnar9d9d6762017-03-05 11:10:18 +010017 spin_lock_init(&wq_head->lock);
18 lockdep_set_class_and_name(&wq_head->lock, key, name);
19 INIT_LIST_HEAD(&wq_head->task_list);
Ingo Molnar21d71f52006-07-10 04:45:32 -070020}
Ingo Molnareb4542b2006-07-03 00:25:07 -070021
Peter Zijlstra2fc39112009-08-10 12:33:05 +010022EXPORT_SYMBOL(__init_waitqueue_head);
Ingo Molnareb4542b2006-07-03 00:25:07 -070023
Ingo Molnar9d9d6762017-03-05 11:10:18 +010024void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 unsigned long flags;
27
Ingo Molnar50816c42017-03-05 10:33:16 +010028 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010029 spin_lock_irqsave(&wq_head->lock, flags);
30 __add_wait_queue_entry_tail(wq_head, wq_entry);
31 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032}
33EXPORT_SYMBOL(add_wait_queue);
34
Ingo Molnar9d9d6762017-03-05 11:10:18 +010035void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
37 unsigned long flags;
38
Ingo Molnar50816c42017-03-05 10:33:16 +010039 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010040 spin_lock_irqsave(&wq_head->lock, flags);
41 __add_wait_queue_entry_tail(wq_head, wq_entry);
42 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043}
44EXPORT_SYMBOL(add_wait_queue_exclusive);
45
Ingo Molnar9d9d6762017-03-05 11:10:18 +010046void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
48 unsigned long flags;
49
Ingo Molnar9d9d6762017-03-05 11:10:18 +010050 spin_lock_irqsave(&wq_head->lock, flags);
51 __remove_wait_queue(wq_head, wq_entry);
52 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54EXPORT_SYMBOL(remove_wait_queue);
55
56
57/*
Peter Zijlstrab4145872013-10-04 17:24:35 +020058 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +010066static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +020067 int nr_exclusive, int wake_flags, void *key)
68{
Ingo Molnarac6424b2017-06-20 12:06:13 +020069 wait_queue_entry_t *curr, *next;
Peter Zijlstrab4145872013-10-04 17:24:35 +020070
Ingo Molnar9d9d6762017-03-05 11:10:18 +010071 list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) {
Peter Zijlstrab4145872013-10-04 17:24:35 +020072 unsigned flags = curr->flags;
73
74 if (curr->func(curr, mode, wake_flags, key) &&
75 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
76 break;
77 }
78}
79
80/**
81 * __wake_up - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +010082 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +020083 * @mode: which threads
84 * @nr_exclusive: how many wake-one or wake-many threads to wake up
85 * @key: is directly passed to the wakeup function
86 *
87 * It may be assumed that this function implies a write memory barrier before
88 * changing the task state if and only if any tasks are woken up.
89 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +010090void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +020091 int nr_exclusive, void *key)
92{
93 unsigned long flags;
94
Ingo Molnar9d9d6762017-03-05 11:10:18 +010095 spin_lock_irqsave(&wq_head->lock, flags);
96 __wake_up_common(wq_head, mode, nr_exclusive, 0, key);
97 spin_unlock_irqrestore(&wq_head->lock, flags);
Peter Zijlstrab4145872013-10-04 17:24:35 +020098}
99EXPORT_SYMBOL(__wake_up);
100
101/*
102 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
103 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100104void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200105{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100106 __wake_up_common(wq_head, mode, nr, 0, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200107}
108EXPORT_SYMBOL_GPL(__wake_up_locked);
109
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100110void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200111{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100112 __wake_up_common(wq_head, mode, 1, 0, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200113}
114EXPORT_SYMBOL_GPL(__wake_up_locked_key);
115
116/**
117 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100118 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200119 * @mode: which threads
120 * @nr_exclusive: how many wake-one or wake-many threads to wake up
121 * @key: opaque value to be passed to wakeup targets
122 *
123 * The sync wakeup differs that the waker knows that it will schedule
124 * away soon, so while the target thread will be woken up, it will not
125 * be migrated to another CPU - ie. the two threads are 'synchronized'
126 * with each other. This can prevent needless bouncing between CPUs.
127 *
128 * On UP it can prevent extra preemption.
129 *
130 * It may be assumed that this function implies a write memory barrier before
131 * changing the task state if and only if any tasks are woken up.
132 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100133void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200134 int nr_exclusive, void *key)
135{
136 unsigned long flags;
137 int wake_flags = 1; /* XXX WF_SYNC */
138
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100139 if (unlikely(!wq_head))
Peter Zijlstrab4145872013-10-04 17:24:35 +0200140 return;
141
142 if (unlikely(nr_exclusive != 1))
143 wake_flags = 0;
144
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100145 spin_lock_irqsave(&wq_head->lock, flags);
146 __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key);
147 spin_unlock_irqrestore(&wq_head->lock, flags);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200148}
149EXPORT_SYMBOL_GPL(__wake_up_sync_key);
150
151/*
152 * __wake_up_sync - see __wake_up_sync_key()
153 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100154void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200155{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100156 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200157}
158EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
159
160/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * Note: we use "set_current_state()" _after_ the wait-queue add,
162 * because we need a memory barrier there on SMP, so that any
163 * wake-function that tests for the wait-queue being active
164 * will be guaranteed to see waitqueue addition _or_ subsequent
165 * tests in this thread will see the wakeup having taken place.
166 *
167 * The spin_unlock() itself is semi-permeable and only protects
168 * one way (it only protects stuff inside the critical region and
169 * stops them from bleeding out - it would still allow subsequent
Michael Opdenacker59c51592007-05-09 08:57:56 +0200170 * loads to move into the critical region).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800172void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100173prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
175 unsigned long flags;
176
Ingo Molnar50816c42017-03-05 10:33:16 +0100177 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100178 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar50816c42017-03-05 10:33:16 +0100179 if (list_empty(&wq_entry->task_list))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100180 __add_wait_queue(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700181 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100182 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184EXPORT_SYMBOL(prepare_to_wait);
185
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800186void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100187prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
189 unsigned long flags;
190
Ingo Molnar50816c42017-03-05 10:33:16 +0100191 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100192 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar50816c42017-03-05 10:33:16 +0100193 if (list_empty(&wq_entry->task_list))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100194 __add_wait_queue_entry_tail(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700195 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100196 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198EXPORT_SYMBOL(prepare_to_wait_exclusive);
199
Ingo Molnar50816c42017-03-05 10:33:16 +0100200void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200201{
Ingo Molnar50816c42017-03-05 10:33:16 +0100202 wq_entry->flags = flags;
203 wq_entry->private = current;
204 wq_entry->func = autoremove_wake_function;
205 INIT_LIST_HEAD(&wq_entry->task_list);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200206}
207EXPORT_SYMBOL(init_wait_entry);
208
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100209long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200210{
211 unsigned long flags;
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200212 long ret = 0;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200213
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100214 spin_lock_irqsave(&wq_head->lock, flags);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200215 if (unlikely(signal_pending_state(state, current))) {
216 /*
217 * Exclusive waiter must not fail if it was selected by wakeup,
218 * it should "consume" the condition we were waiting for.
219 *
220 * The caller will recheck the condition and return success if
221 * we were already woken up, we can not miss the event because
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100222 * wakeup locks/unlocks the same wq_head->lock.
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200223 *
224 * But we need to ensure that set-condition + wakeup after that
225 * can't see us, it should wake up another exclusive waiter if
226 * we fail.
227 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100228 list_del_init(&wq_entry->task_list);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200229 ret = -ERESTARTSYS;
230 } else {
Ingo Molnar50816c42017-03-05 10:33:16 +0100231 if (list_empty(&wq_entry->task_list)) {
232 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100233 __add_wait_queue_entry_tail(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200234 else
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100235 __add_wait_queue(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200236 }
237 set_current_state(state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200238 }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100239 spin_unlock_irqrestore(&wq_head->lock, flags);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200240
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200241 return ret;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200242}
243EXPORT_SYMBOL(prepare_to_wait_event);
244
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800245/*
246 * Note! These two wait functions are entered with the
247 * wait-queue lock held (and interrupts off in the _irq
248 * case), so there is no race with testing the wakeup
249 * condition in the caller before they add the wait
250 * entry to the wake queue.
251 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200252int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800253{
254 if (likely(list_empty(&wait->task_list)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200255 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800256
257 set_current_state(TASK_INTERRUPTIBLE);
258 if (signal_pending(current))
259 return -ERESTARTSYS;
260
261 spin_unlock(&wq->lock);
262 schedule();
263 spin_lock(&wq->lock);
264 return 0;
265}
266EXPORT_SYMBOL(do_wait_intr);
267
Ingo Molnarac6424b2017-06-20 12:06:13 +0200268int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800269{
270 if (likely(list_empty(&wait->task_list)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200271 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800272
273 set_current_state(TASK_INTERRUPTIBLE);
274 if (signal_pending(current))
275 return -ERESTARTSYS;
276
277 spin_unlock_irq(&wq->lock);
278 schedule();
279 spin_lock_irq(&wq->lock);
280 return 0;
281}
282EXPORT_SYMBOL(do_wait_intr_irq);
283
Randy Dunlapee2f1542010-10-26 14:17:25 -0700284/**
Johannes Weiner777c6c52009-02-04 15:12:14 -0800285 * finish_wait - clean up after waiting in a queue
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100286 * @wq_head: waitqueue waited on
Ingo Molnar50816c42017-03-05 10:33:16 +0100287 * @wq_entry: wait descriptor
Johannes Weiner777c6c52009-02-04 15:12:14 -0800288 *
289 * Sets current thread back to running state and removes
290 * the wait descriptor from the given waitqueue if still
291 * queued.
292 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100293void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 unsigned long flags;
296
297 __set_current_state(TASK_RUNNING);
298 /*
299 * We can check for list emptiness outside the lock
300 * IFF:
301 * - we use the "careful" check that verifies both
302 * the next and prev pointers, so that there cannot
303 * be any half-pending updates in progress on other
304 * CPU's that we haven't seen yet (and that might
305 * still change the stack area.
306 * and
307 * - all other users take the lock (ie we can only
308 * have _one_ other CPU that looks at or modifies
309 * the list).
310 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100311 if (!list_empty_careful(&wq_entry->task_list)) {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100312 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar50816c42017-03-05 10:33:16 +0100313 list_del_init(&wq_entry->task_list);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100314 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316}
317EXPORT_SYMBOL(finish_wait);
318
Ingo Molnar50816c42017-03-05 10:33:16 +0100319int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Ingo Molnar50816c42017-03-05 10:33:16 +0100321 int ret = default_wake_function(wq_entry, mode, sync, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 if (ret)
Ingo Molnar50816c42017-03-05 10:33:16 +0100324 list_del_init(&wq_entry->task_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 return ret;
326}
327EXPORT_SYMBOL(autoremove_wake_function);
328
Peter Zijlstracb6538e2014-10-31 11:57:30 +0100329static inline bool is_kthread_should_stop(void)
330{
331 return (current->flags & PF_KTHREAD) && kthread_should_stop();
332}
Peter Zijlstra61ada522014-09-24 10:18:47 +0200333
334/*
335 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
336 *
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100337 * add_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200338 * for (;;) {
339 * if (condition)
340 * break;
341 *
342 * p->state = mode; condition = true;
343 * smp_mb(); // A smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100344 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200345 * schedule() try_to_wake_up();
346 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
Ingo Molnar50816c42017-03-05 10:33:16 +0100347 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200348 * smp_mb() // B smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100349 * wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200350 * }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100351 * remove_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200352 *
353 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100354long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200355{
356 set_current_state(mode); /* A */
357 /*
358 * The above implies an smp_mb(), which matches with the smp_wmb() from
359 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
360 * also observe all state before the wakeup.
361 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100362 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
Peter Zijlstra61ada522014-09-24 10:18:47 +0200363 timeout = schedule_timeout(timeout);
364 __set_current_state(TASK_RUNNING);
365
366 /*
367 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
368 * woken_wake_function() such that we must either observe the wait
369 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
370 * an event.
371 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100372 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
Peter Zijlstra61ada522014-09-24 10:18:47 +0200373
374 return timeout;
375}
376EXPORT_SYMBOL(wait_woken);
377
Ingo Molnar50816c42017-03-05 10:33:16 +0100378int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200379{
380 /*
381 * Although this function is called under waitqueue lock, LOCK
382 * doesn't imply write barrier and the users expects write
383 * barrier semantics on wakeup functions. The following
384 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200385 * and is paired with smp_store_mb() in wait_woken().
Peter Zijlstra61ada522014-09-24 10:18:47 +0200386 */
387 smp_wmb(); /* C */
Ingo Molnar50816c42017-03-05 10:33:16 +0100388 wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200389
Ingo Molnar50816c42017-03-05 10:33:16 +0100390 return default_wake_function(wq_entry, mode, sync, key);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200391}
392EXPORT_SYMBOL(woken_wake_function);
393
Ingo Molnar50816c42017-03-05 10:33:16 +0100394int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
396 struct wait_bit_key *key = arg;
397 struct wait_bit_queue *wait_bit
Ingo Molnar50816c42017-03-05 10:33:16 +0100398 = container_of(wq_entry, struct wait_bit_queue, wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 if (wait_bit->key.flags != key->flags ||
401 wait_bit->key.bit_nr != key->bit_nr ||
402 test_bit(key->bit_nr, key->flags))
403 return 0;
404 else
Ingo Molnar50816c42017-03-05 10:33:16 +0100405 return autoremove_wake_function(wq_entry, mode, sync, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407EXPORT_SYMBOL(wake_bit_function);
408
409/*
410 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
411 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
412 * permitted return codes. Nonzero return codes halt waiting and return.
413 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800414int __sched
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100415__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
NeilBrownc1221322014-07-07 15:16:04 +1000416 wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
418 int ret = 0;
419
420 do {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100421 prepare_to_wait(wq_head, &q->wait, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 if (test_bit(q->key.bit_nr, q->key.flags))
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100423 ret = (*action)(&q->key, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100425 finish_wait(wq_head, &q->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return ret;
427}
428EXPORT_SYMBOL(__wait_on_bit);
429
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800430int __sched out_of_line_wait_on_bit(void *word, int bit,
NeilBrownc1221322014-07-07 15:16:04 +1000431 wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100433 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 DEFINE_WAIT_BIT(wait, word, bit);
435
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100436 return __wait_on_bit(wq_head, &wait, action, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
438EXPORT_SYMBOL(out_of_line_wait_on_bit);
439
NeilBrowncbbce822014-09-25 13:55:19 +1000440int __sched out_of_line_wait_on_bit_timeout(
441 void *word, int bit, wait_bit_action_f *action,
442 unsigned mode, unsigned long timeout)
443{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100444 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
NeilBrowncbbce822014-09-25 13:55:19 +1000445 DEFINE_WAIT_BIT(wait, word, bit);
446
447 wait.key.timeout = jiffies + timeout;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100448 return __wait_on_bit(wq_head, &wait, action, mode);
NeilBrowncbbce822014-09-25 13:55:19 +1000449}
450EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
451
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800452int __sched
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100453__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
NeilBrownc1221322014-07-07 15:16:04 +1000454 wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200456 int ret = 0;
Johannes Weiner777c6c52009-02-04 15:12:14 -0800457
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200458 for (;;) {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100459 prepare_to_wait_exclusive(wq_head, &q->wait, mode);
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200460 if (test_bit(q->key.bit_nr, q->key.flags)) {
461 ret = action(&q->key, mode);
462 /*
463 * See the comment in prepare_to_wait_event().
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100464 * finish_wait() does not necessarily takes wwq_head->lock,
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200465 * but test_and_set_bit() implies mb() which pairs with
466 * smp_mb__after_atomic() before wake_up_page().
467 */
468 if (ret)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100469 finish_wait(wq_head, &q->wait);
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200470 }
471 if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) {
472 if (!ret)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100473 finish_wait(wq_head, &q->wait);
Oleg Nesteroveaf9ef52016-09-06 16:00:53 +0200474 return 0;
475 } else if (ret) {
476 return ret;
477 }
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480EXPORT_SYMBOL(__wait_on_bit_lock);
481
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800482int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
NeilBrownc1221322014-07-07 15:16:04 +1000483 wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100485 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 DEFINE_WAIT_BIT(wait, word, bit);
487
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100488 return __wait_on_bit_lock(wq_head, &wait, action, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
491
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100492void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493{
494 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100495 if (waitqueue_active(wq_head))
496 __wake_up(wq_head, TASK_NORMAL, 1, &key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498EXPORT_SYMBOL(__wake_up_bit);
499
500/**
501 * wake_up_bit - wake up a waiter on a bit
502 * @word: the word being waited on, a kernel virtual address
503 * @bit: the bit of the word being waited on
504 *
505 * There is a standard hashed waitqueue table for generic use. This
506 * is the part of the hashtable's accessor API that wakes up waiters
507 * on a bit. For instance, if one were to have waiters on a bitflag,
508 * one would call wake_up_bit() after clearing the bit.
509 *
510 * In order for this to function properly, as it uses waitqueue_active()
511 * internally, some kind of memory barrier must be done prior to calling
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100512 * this. Typically, this will be smp_mb__after_atomic(), but in some
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 * cases where bitflags are manipulated non-atomically under a lock, one
514 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
515 * because spin_unlock() does not guarantee a memory barrier.
516 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800517void wake_up_bit(void *word, int bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
519 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
520}
521EXPORT_SYMBOL(wake_up_bit);
522
David Howellscb655372013-05-10 19:50:26 +0100523/*
524 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
525 * index (we're keying off bit -1, but that would produce a horrible hash
526 * value).
527 */
528static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
529{
530 if (BITS_PER_LONG == 64) {
531 unsigned long q = (unsigned long)p;
532 return bit_waitqueue((void *)(q & ~1), q & 1);
533 }
534 return bit_waitqueue(p, 0);
535}
536
Ingo Molnar50816c42017-03-05 10:33:16 +0100537static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
David Howellscb655372013-05-10 19:50:26 +0100538 void *arg)
539{
540 struct wait_bit_key *key = arg;
541 struct wait_bit_queue *wait_bit
Ingo Molnar50816c42017-03-05 10:33:16 +0100542 = container_of(wq_entry, struct wait_bit_queue, wait);
David Howellscb655372013-05-10 19:50:26 +0100543 atomic_t *val = key->flags;
544
545 if (wait_bit->key.flags != key->flags ||
546 wait_bit->key.bit_nr != key->bit_nr ||
547 atomic_read(val) != 0)
548 return 0;
Ingo Molnar50816c42017-03-05 10:33:16 +0100549 return autoremove_wake_function(wq_entry, mode, sync, key);
David Howellscb655372013-05-10 19:50:26 +0100550}
551
552/*
553 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
554 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
555 * return codes halt waiting and return.
556 */
557static __sched
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100558int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
David Howellscb655372013-05-10 19:50:26 +0100559 int (*action)(atomic_t *), unsigned mode)
560{
561 atomic_t *val;
562 int ret = 0;
563
564 do {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100565 prepare_to_wait(wq_head, &q->wait, mode);
David Howellscb655372013-05-10 19:50:26 +0100566 val = q->key.flags;
567 if (atomic_read(val) == 0)
David Howells42577ca2013-07-23 16:49:24 +0100568 break;
569 ret = (*action)(val);
David Howellscb655372013-05-10 19:50:26 +0100570 } while (!ret && atomic_read(val) != 0);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100571 finish_wait(wq_head, &q->wait);
David Howellscb655372013-05-10 19:50:26 +0100572 return ret;
573}
574
575#define DEFINE_WAIT_ATOMIC_T(name, p) \
576 struct wait_bit_queue name = { \
577 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
578 .wait = { \
579 .private = current, \
580 .func = wake_atomic_t_function, \
581 .task_list = \
582 LIST_HEAD_INIT((name).wait.task_list), \
583 }, \
584 }
585
586__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
587 unsigned mode)
588{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100589 struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
David Howellscb655372013-05-10 19:50:26 +0100590 DEFINE_WAIT_ATOMIC_T(wait, p);
591
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100592 return __wait_on_atomic_t(wq_head, &wait, action, mode);
David Howellscb655372013-05-10 19:50:26 +0100593}
594EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
595
596/**
597 * wake_up_atomic_t - Wake up a waiter on a atomic_t
Randy Dunlap22035472013-08-18 20:08:07 -0700598 * @p: The atomic_t being waited on, a kernel virtual address
David Howellscb655372013-05-10 19:50:26 +0100599 *
600 * Wake up anyone waiting for the atomic_t to go to zero.
601 *
602 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
603 * check is done by the waiter's wake function, not the by the waker itself).
604 */
605void wake_up_atomic_t(atomic_t *p)
606{
607 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
608}
609EXPORT_SYMBOL(wake_up_atomic_t);
NeilBrown74316202014-07-07 15:16:04 +1000610
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100611__sched int bit_wait(struct wait_bit_key *word, int mode)
NeilBrown74316202014-07-07 15:16:04 +1000612{
NeilBrown74316202014-07-07 15:16:04 +1000613 schedule();
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100614 if (signal_pending_state(mode, current))
Peter Zijlstra68985632015-12-01 14:04:04 +0100615 return -EINTR;
NeilBrown74316202014-07-07 15:16:04 +1000616 return 0;
617}
618EXPORT_SYMBOL(bit_wait);
619
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100620__sched int bit_wait_io(struct wait_bit_key *word, int mode)
NeilBrown74316202014-07-07 15:16:04 +1000621{
NeilBrown74316202014-07-07 15:16:04 +1000622 io_schedule();
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100623 if (signal_pending_state(mode, current))
Peter Zijlstra68985632015-12-01 14:04:04 +0100624 return -EINTR;
NeilBrown74316202014-07-07 15:16:04 +1000625 return 0;
626}
627EXPORT_SYMBOL(bit_wait_io);
NeilBrowncbbce822014-09-25 13:55:19 +1000628
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100629__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
NeilBrowncbbce822014-09-25 13:55:19 +1000630{
Jason Low316c1608d2015-04-28 13:00:20 -0700631 unsigned long now = READ_ONCE(jiffies);
NeilBrowncbbce822014-09-25 13:55:19 +1000632 if (time_after_eq(now, word->timeout))
633 return -EAGAIN;
634 schedule_timeout(word->timeout - now);
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100635 if (signal_pending_state(mode, current))
Peter Zijlstra68985632015-12-01 14:04:04 +0100636 return -EINTR;
NeilBrowncbbce822014-09-25 13:55:19 +1000637 return 0;
638}
639EXPORT_SYMBOL_GPL(bit_wait_timeout);
640
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100641__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
NeilBrowncbbce822014-09-25 13:55:19 +1000642{
Jason Low316c1608d2015-04-28 13:00:20 -0700643 unsigned long now = READ_ONCE(jiffies);
NeilBrowncbbce822014-09-25 13:55:19 +1000644 if (time_after_eq(now, word->timeout))
645 return -EAGAIN;
646 io_schedule_timeout(word->timeout - now);
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100647 if (signal_pending_state(mode, current))
Peter Zijlstra68985632015-12-01 14:04:04 +0100648 return -EINTR;
NeilBrowncbbce822014-09-25 13:55:19 +1000649 return 0;
650}
651EXPORT_SYMBOL_GPL(bit_wait_io_timeout);