blob: 98feab7933c76a0d178cd7da0115376641e7bbad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic waiting primitives.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * (C) 2004 Nadia Yvette Chambers, Oracle
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/init.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04007#include <linux/export.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +01008#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01009#include <linux/sched/debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11#include <linux/wait.h>
12#include <linux/hash.h>
Peter Zijlstracb6538e2014-10-31 11:57:30 +010013#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Ingo Molnar9d9d6762017-03-05 11:10:18 +010015void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
Ingo Molnar21d71f52006-07-10 04:45:32 -070016{
Ingo Molnar9d9d6762017-03-05 11:10:18 +010017 spin_lock_init(&wq_head->lock);
18 lockdep_set_class_and_name(&wq_head->lock, key, name);
Ingo Molnar2055da92017-06-20 12:06:46 +020019 INIT_LIST_HEAD(&wq_head->head);
Ingo Molnar21d71f52006-07-10 04:45:32 -070020}
Ingo Molnareb4542b2006-07-03 00:25:07 -070021
Peter Zijlstra2fc39112009-08-10 12:33:05 +010022EXPORT_SYMBOL(__init_waitqueue_head);
Ingo Molnareb4542b2006-07-03 00:25:07 -070023
Ingo Molnar9d9d6762017-03-05 11:10:18 +010024void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 unsigned long flags;
27
Ingo Molnar50816c42017-03-05 10:33:16 +010028 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010029 spin_lock_irqsave(&wq_head->lock, flags);
30 __add_wait_queue_entry_tail(wq_head, wq_entry);
31 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032}
33EXPORT_SYMBOL(add_wait_queue);
34
Ingo Molnar9d9d6762017-03-05 11:10:18 +010035void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
37 unsigned long flags;
38
Ingo Molnar50816c42017-03-05 10:33:16 +010039 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010040 spin_lock_irqsave(&wq_head->lock, flags);
41 __add_wait_queue_entry_tail(wq_head, wq_entry);
42 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043}
44EXPORT_SYMBOL(add_wait_queue_exclusive);
45
Ingo Molnar9d9d6762017-03-05 11:10:18 +010046void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
48 unsigned long flags;
49
Ingo Molnar9d9d6762017-03-05 11:10:18 +010050 spin_lock_irqsave(&wq_head->lock, flags);
51 __remove_wait_queue(wq_head, wq_entry);
52 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54EXPORT_SYMBOL(remove_wait_queue);
55
Tim Chen2554db92017-08-25 09:13:54 -070056/*
57 * Scan threshold to break wait queue walk.
58 * This allows a waker to take a break from holding the
59 * wait queue lock during the wait queue walk.
60 */
61#define WAITQUEUE_WALK_BREAK_CNT 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63/*
Peter Zijlstrab4145872013-10-04 17:24:35 +020064 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
65 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
66 * number) then we wake all the non-exclusive tasks and one exclusive task.
67 *
68 * There are circumstances in which we can try to wake a task which has already
69 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
70 * zero in this (rare) case, and we handle it by continuing to scan the queue.
71 */
Tim Chen2554db92017-08-25 09:13:54 -070072static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
73 int nr_exclusive, int wake_flags, void *key,
74 wait_queue_entry_t *bookmark)
Peter Zijlstrab4145872013-10-04 17:24:35 +020075{
Ingo Molnarac6424b2017-06-20 12:06:13 +020076 wait_queue_entry_t *curr, *next;
Tim Chen2554db92017-08-25 09:13:54 -070077 int cnt = 0;
Peter Zijlstrab4145872013-10-04 17:24:35 +020078
Tim Chen2554db92017-08-25 09:13:54 -070079 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
80 curr = list_next_entry(bookmark, entry);
81
82 list_del(&bookmark->entry);
83 bookmark->flags = 0;
84 } else
85 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
86
87 if (&curr->entry == &wq_head->head)
88 return nr_exclusive;
89
90 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
Peter Zijlstrab4145872013-10-04 17:24:35 +020091 unsigned flags = curr->flags;
Tim Chen2554db92017-08-25 09:13:54 -070092 int ret;
93
94 if (flags & WQ_FLAG_BOOKMARK)
95 continue;
96
97 ret = curr->func(curr, mode, wake_flags, key);
Linus Torvalds3510ca22017-08-27 13:55:12 -070098 if (ret < 0)
99 break;
100 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200101 break;
Tim Chen2554db92017-08-25 09:13:54 -0700102
103 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
104 (&next->entry != &wq_head->head)) {
105 bookmark->flags = WQ_FLAG_BOOKMARK;
106 list_add_tail(&bookmark->entry, &next->entry);
107 break;
108 }
109 }
110 return nr_exclusive;
111}
112
113static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
114 int nr_exclusive, int wake_flags, void *key)
115{
116 unsigned long flags;
117 wait_queue_entry_t bookmark;
118
119 bookmark.flags = 0;
120 bookmark.private = NULL;
121 bookmark.func = NULL;
122 INIT_LIST_HEAD(&bookmark.entry);
123
124 spin_lock_irqsave(&wq_head->lock, flags);
125 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
126 spin_unlock_irqrestore(&wq_head->lock, flags);
127
128 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
129 spin_lock_irqsave(&wq_head->lock, flags);
130 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
131 wake_flags, key, &bookmark);
132 spin_unlock_irqrestore(&wq_head->lock, flags);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200133 }
134}
135
136/**
137 * __wake_up - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100138 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200139 * @mode: which threads
140 * @nr_exclusive: how many wake-one or wake-many threads to wake up
141 * @key: is directly passed to the wakeup function
142 *
143 * It may be assumed that this function implies a write memory barrier before
144 * changing the task state if and only if any tasks are woken up.
145 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100146void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200147 int nr_exclusive, void *key)
148{
Tim Chen2554db92017-08-25 09:13:54 -0700149 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200150}
151EXPORT_SYMBOL(__wake_up);
152
153/*
154 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
155 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100156void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200157{
Tim Chen2554db92017-08-25 09:13:54 -0700158 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200159}
160EXPORT_SYMBOL_GPL(__wake_up_locked);
161
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100162void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200163{
Tim Chen2554db92017-08-25 09:13:54 -0700164 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key);
167
Tim Chen11a19c72017-08-25 09:13:55 -0700168void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
169 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
170{
171 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
172}
173EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
174
Peter Zijlstrab4145872013-10-04 17:24:35 +0200175/**
176 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100177 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200178 * @mode: which threads
179 * @nr_exclusive: how many wake-one or wake-many threads to wake up
180 * @key: opaque value to be passed to wakeup targets
181 *
182 * The sync wakeup differs that the waker knows that it will schedule
183 * away soon, so while the target thread will be woken up, it will not
184 * be migrated to another CPU - ie. the two threads are 'synchronized'
185 * with each other. This can prevent needless bouncing between CPUs.
186 *
187 * On UP it can prevent extra preemption.
188 *
189 * It may be assumed that this function implies a write memory barrier before
190 * changing the task state if and only if any tasks are woken up.
191 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100192void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200193 int nr_exclusive, void *key)
194{
Peter Zijlstrab4145872013-10-04 17:24:35 +0200195 int wake_flags = 1; /* XXX WF_SYNC */
196
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100197 if (unlikely(!wq_head))
Peter Zijlstrab4145872013-10-04 17:24:35 +0200198 return;
199
200 if (unlikely(nr_exclusive != 1))
201 wake_flags = 0;
202
Tim Chen2554db92017-08-25 09:13:54 -0700203 __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200204}
205EXPORT_SYMBOL_GPL(__wake_up_sync_key);
206
207/*
208 * __wake_up_sync - see __wake_up_sync_key()
209 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100210void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200211{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100212 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200213}
214EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
215
216/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 * Note: we use "set_current_state()" _after_ the wait-queue add,
218 * because we need a memory barrier there on SMP, so that any
219 * wake-function that tests for the wait-queue being active
220 * will be guaranteed to see waitqueue addition _or_ subsequent
221 * tests in this thread will see the wakeup having taken place.
222 *
223 * The spin_unlock() itself is semi-permeable and only protects
224 * one way (it only protects stuff inside the critical region and
225 * stops them from bleeding out - it would still allow subsequent
Michael Opdenacker59c51592007-05-09 08:57:56 +0200226 * loads to move into the critical region).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800228void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100229prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 unsigned long flags;
232
Ingo Molnar50816c42017-03-05 10:33:16 +0100233 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100234 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200235 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100236 __add_wait_queue(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700237 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100238 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239}
240EXPORT_SYMBOL(prepare_to_wait);
241
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800242void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100243prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned long flags;
246
Ingo Molnar50816c42017-03-05 10:33:16 +0100247 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100248 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200249 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100250 __add_wait_queue_entry_tail(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700251 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100252 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
254EXPORT_SYMBOL(prepare_to_wait_exclusive);
255
Ingo Molnar50816c42017-03-05 10:33:16 +0100256void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200257{
Ingo Molnar50816c42017-03-05 10:33:16 +0100258 wq_entry->flags = flags;
259 wq_entry->private = current;
260 wq_entry->func = autoremove_wake_function;
Ingo Molnar2055da92017-06-20 12:06:46 +0200261 INIT_LIST_HEAD(&wq_entry->entry);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200262}
263EXPORT_SYMBOL(init_wait_entry);
264
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100265long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200266{
267 unsigned long flags;
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200268 long ret = 0;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200269
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100270 spin_lock_irqsave(&wq_head->lock, flags);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200271 if (unlikely(signal_pending_state(state, current))) {
272 /*
273 * Exclusive waiter must not fail if it was selected by wakeup,
274 * it should "consume" the condition we were waiting for.
275 *
276 * The caller will recheck the condition and return success if
277 * we were already woken up, we can not miss the event because
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100278 * wakeup locks/unlocks the same wq_head->lock.
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200279 *
280 * But we need to ensure that set-condition + wakeup after that
281 * can't see us, it should wake up another exclusive waiter if
282 * we fail.
283 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200284 list_del_init(&wq_entry->entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200285 ret = -ERESTARTSYS;
286 } else {
Ingo Molnar2055da92017-06-20 12:06:46 +0200287 if (list_empty(&wq_entry->entry)) {
Ingo Molnar50816c42017-03-05 10:33:16 +0100288 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100289 __add_wait_queue_entry_tail(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200290 else
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100291 __add_wait_queue(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200292 }
293 set_current_state(state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200294 }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100295 spin_unlock_irqrestore(&wq_head->lock, flags);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200296
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200297 return ret;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200298}
299EXPORT_SYMBOL(prepare_to_wait_event);
300
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800301/*
302 * Note! These two wait functions are entered with the
303 * wait-queue lock held (and interrupts off in the _irq
304 * case), so there is no race with testing the wakeup
305 * condition in the caller before they add the wait
306 * entry to the wake queue.
307 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200308int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800309{
Ingo Molnar2055da92017-06-20 12:06:46 +0200310 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200311 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800312
313 set_current_state(TASK_INTERRUPTIBLE);
314 if (signal_pending(current))
315 return -ERESTARTSYS;
316
317 spin_unlock(&wq->lock);
318 schedule();
319 spin_lock(&wq->lock);
320 return 0;
321}
322EXPORT_SYMBOL(do_wait_intr);
323
Ingo Molnarac6424b2017-06-20 12:06:13 +0200324int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800325{
Ingo Molnar2055da92017-06-20 12:06:46 +0200326 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200327 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800328
329 set_current_state(TASK_INTERRUPTIBLE);
330 if (signal_pending(current))
331 return -ERESTARTSYS;
332
333 spin_unlock_irq(&wq->lock);
334 schedule();
335 spin_lock_irq(&wq->lock);
336 return 0;
337}
338EXPORT_SYMBOL(do_wait_intr_irq);
339
Randy Dunlapee2f1542010-10-26 14:17:25 -0700340/**
Johannes Weiner777c6c52009-02-04 15:12:14 -0800341 * finish_wait - clean up after waiting in a queue
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100342 * @wq_head: waitqueue waited on
Ingo Molnar50816c42017-03-05 10:33:16 +0100343 * @wq_entry: wait descriptor
Johannes Weiner777c6c52009-02-04 15:12:14 -0800344 *
345 * Sets current thread back to running state and removes
346 * the wait descriptor from the given waitqueue if still
347 * queued.
348 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100349void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long flags;
352
353 __set_current_state(TASK_RUNNING);
354 /*
355 * We can check for list emptiness outside the lock
356 * IFF:
357 * - we use the "careful" check that verifies both
358 * the next and prev pointers, so that there cannot
359 * be any half-pending updates in progress on other
360 * CPU's that we haven't seen yet (and that might
361 * still change the stack area.
362 * and
363 * - all other users take the lock (ie we can only
364 * have _one_ other CPU that looks at or modifies
365 * the list).
366 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200367 if (!list_empty_careful(&wq_entry->entry)) {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100368 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200369 list_del_init(&wq_entry->entry);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100370 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
372}
373EXPORT_SYMBOL(finish_wait);
374
Ingo Molnar50816c42017-03-05 10:33:16 +0100375int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Ingo Molnar50816c42017-03-05 10:33:16 +0100377 int ret = default_wake_function(wq_entry, mode, sync, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 if (ret)
Ingo Molnar2055da92017-06-20 12:06:46 +0200380 list_del_init(&wq_entry->entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return ret;
382}
383EXPORT_SYMBOL(autoremove_wake_function);
384
Peter Zijlstracb6538e2014-10-31 11:57:30 +0100385static inline bool is_kthread_should_stop(void)
386{
387 return (current->flags & PF_KTHREAD) && kthread_should_stop();
388}
Peter Zijlstra61ada522014-09-24 10:18:47 +0200389
390/*
391 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
392 *
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100393 * add_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200394 * for (;;) {
395 * if (condition)
396 * break;
397 *
398 * p->state = mode; condition = true;
399 * smp_mb(); // A smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100400 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200401 * schedule() try_to_wake_up();
402 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
Ingo Molnar50816c42017-03-05 10:33:16 +0100403 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200404 * smp_mb() // B smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100405 * wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200406 * }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100407 * remove_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200408 *
409 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100410long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200411{
412 set_current_state(mode); /* A */
413 /*
414 * The above implies an smp_mb(), which matches with the smp_wmb() from
415 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
416 * also observe all state before the wakeup.
417 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100418 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
Peter Zijlstra61ada522014-09-24 10:18:47 +0200419 timeout = schedule_timeout(timeout);
420 __set_current_state(TASK_RUNNING);
421
422 /*
423 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
424 * woken_wake_function() such that we must either observe the wait
425 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
426 * an event.
427 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100428 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
Peter Zijlstra61ada522014-09-24 10:18:47 +0200429
430 return timeout;
431}
432EXPORT_SYMBOL(wait_woken);
433
Ingo Molnar50816c42017-03-05 10:33:16 +0100434int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200435{
436 /*
437 * Although this function is called under waitqueue lock, LOCK
438 * doesn't imply write barrier and the users expects write
439 * barrier semantics on wakeup functions. The following
440 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200441 * and is paired with smp_store_mb() in wait_woken().
Peter Zijlstra61ada522014-09-24 10:18:47 +0200442 */
443 smp_wmb(); /* C */
Ingo Molnar50816c42017-03-05 10:33:16 +0100444 wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200445
Ingo Molnar50816c42017-03-05 10:33:16 +0100446 return default_wake_function(wq_entry, mode, sync, key);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200447}
448EXPORT_SYMBOL(woken_wake_function);