blob: c1e566a114ca64d5ef4d27a7db41c2d66f8039c0 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Generic waiting primitives.
4 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01005 * (C) 2004 Nadia Yvette Chambers, Oracle
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
Ingo Molnar325ea102018-03-03 12:20:47 +01007#include "sched.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Ingo Molnar9d9d6762017-03-05 11:10:18 +01009void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
Ingo Molnar21d71f52006-07-10 04:45:32 -070010{
Ingo Molnar9d9d6762017-03-05 11:10:18 +010011 spin_lock_init(&wq_head->lock);
12 lockdep_set_class_and_name(&wq_head->lock, key, name);
Ingo Molnar2055da92017-06-20 12:06:46 +020013 INIT_LIST_HEAD(&wq_head->head);
Ingo Molnar21d71f52006-07-10 04:45:32 -070014}
Ingo Molnareb4542b2006-07-03 00:25:07 -070015
Peter Zijlstra2fc39112009-08-10 12:33:05 +010016EXPORT_SYMBOL(__init_waitqueue_head);
Ingo Molnareb4542b2006-07-03 00:25:07 -070017
Ingo Molnar9d9d6762017-03-05 11:10:18 +010018void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070019{
20 unsigned long flags;
21
Ingo Molnar50816c42017-03-05 10:33:16 +010022 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010023 spin_lock_irqsave(&wq_head->lock, flags);
Omar Sandovalc6b9d9a2017-12-05 23:15:31 -080024 __add_wait_queue(wq_head, wq_entry);
Ingo Molnar9d9d6762017-03-05 11:10:18 +010025 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
27EXPORT_SYMBOL(add_wait_queue);
28
Ingo Molnar9d9d6762017-03-05 11:10:18 +010029void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 unsigned long flags;
32
Ingo Molnar50816c42017-03-05 10:33:16 +010033 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010034 spin_lock_irqsave(&wq_head->lock, flags);
35 __add_wait_queue_entry_tail(wq_head, wq_entry);
36 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037}
38EXPORT_SYMBOL(add_wait_queue_exclusive);
39
Ingo Molnar9d9d6762017-03-05 11:10:18 +010040void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 unsigned long flags;
43
Ingo Molnar9d9d6762017-03-05 11:10:18 +010044 spin_lock_irqsave(&wq_head->lock, flags);
45 __remove_wait_queue(wq_head, wq_entry);
46 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047}
48EXPORT_SYMBOL(remove_wait_queue);
49
Tim Chen2554db92017-08-25 09:13:54 -070050/*
51 * Scan threshold to break wait queue walk.
52 * This allows a waker to take a break from holding the
53 * wait queue lock during the wait queue walk.
54 */
55#define WAITQUEUE_WALK_BREAK_CNT 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/*
Peter Zijlstrab4145872013-10-04 17:24:35 +020058 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
Tim Chen2554db92017-08-25 09:13:54 -070066static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key,
68 wait_queue_entry_t *bookmark)
Peter Zijlstrab4145872013-10-04 17:24:35 +020069{
Ingo Molnarac6424b2017-06-20 12:06:13 +020070 wait_queue_entry_t *curr, *next;
Tim Chen2554db92017-08-25 09:13:54 -070071 int cnt = 0;
Peter Zijlstrab4145872013-10-04 17:24:35 +020072
Christoph Hellwige05a8e42018-08-21 21:56:34 -070073 lockdep_assert_held(&wq_head->lock);
74
Tim Chen2554db92017-08-25 09:13:54 -070075 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 curr = list_next_entry(bookmark, entry);
77
78 list_del(&bookmark->entry);
79 bookmark->flags = 0;
80 } else
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82
83 if (&curr->entry == &wq_head->head)
84 return nr_exclusive;
85
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
Peter Zijlstrab4145872013-10-04 17:24:35 +020087 unsigned flags = curr->flags;
Tim Chen2554db92017-08-25 09:13:54 -070088 int ret;
89
90 if (flags & WQ_FLAG_BOOKMARK)
91 continue;
92
93 ret = curr->func(curr, mode, wake_flags, key);
Linus Torvalds3510ca22017-08-27 13:55:12 -070094 if (ret < 0)
95 break;
96 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +020097 break;
Tim Chen2554db92017-08-25 09:13:54 -070098
99 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100 (&next->entry != &wq_head->head)) {
101 bookmark->flags = WQ_FLAG_BOOKMARK;
102 list_add_tail(&bookmark->entry, &next->entry);
103 break;
104 }
105 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100106
Tim Chen2554db92017-08-25 09:13:54 -0700107 return nr_exclusive;
108}
109
110static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 int nr_exclusive, int wake_flags, void *key)
112{
113 unsigned long flags;
114 wait_queue_entry_t bookmark;
115
116 bookmark.flags = 0;
117 bookmark.private = NULL;
118 bookmark.func = NULL;
119 INIT_LIST_HEAD(&bookmark.entry);
120
Pavel Begunkov016190a2019-06-11 15:29:07 +0300121 do {
Tim Chen2554db92017-08-25 09:13:54 -0700122 spin_lock_irqsave(&wq_head->lock, flags);
123 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 wake_flags, key, &bookmark);
125 spin_unlock_irqrestore(&wq_head->lock, flags);
Pavel Begunkov016190a2019-06-11 15:29:07 +0300126 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200127}
128
129/**
130 * __wake_up - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100131 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
135 *
Andrea Parri7696f992018-07-16 11:06:03 -0700136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
Peter Zijlstrab4145872013-10-04 17:24:35 +0200138 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100139void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200140 int nr_exclusive, void *key)
141{
Tim Chen2554db92017-08-25 09:13:54 -0700142 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200143}
144EXPORT_SYMBOL(__wake_up);
145
146/*
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100149void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200150{
Tim Chen2554db92017-08-25 09:13:54 -0700151 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200152}
153EXPORT_SYMBOL_GPL(__wake_up_locked);
154
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100155void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200156{
Tim Chen2554db92017-08-25 09:13:54 -0700157 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200158}
159EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
Tim Chen11a19c72017-08-25 09:13:55 -0700161void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163{
164 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
Peter Zijlstrab4145872013-10-04 17:24:35 +0200168/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100170 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200171 * @mode: which threads
172 * @nr_exclusive: how many wake-one or wake-many threads to wake up
173 * @key: opaque value to be passed to wakeup targets
174 *
175 * The sync wakeup differs that the waker knows that it will schedule
176 * away soon, so while the target thread will be woken up, it will not
177 * be migrated to another CPU - ie. the two threads are 'synchronized'
178 * with each other. This can prevent needless bouncing between CPUs.
179 *
180 * On UP it can prevent extra preemption.
181 *
Andrea Parri7696f992018-07-16 11:06:03 -0700182 * If this function wakes up a task, it executes a full memory barrier before
183 * accessing the task state.
Peter Zijlstrab4145872013-10-04 17:24:35 +0200184 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100185void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200186 int nr_exclusive, void *key)
187{
Peter Zijlstrab4145872013-10-04 17:24:35 +0200188 int wake_flags = 1; /* XXX WF_SYNC */
189
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100190 if (unlikely(!wq_head))
Peter Zijlstrab4145872013-10-04 17:24:35 +0200191 return;
192
193 if (unlikely(nr_exclusive != 1))
194 wake_flags = 0;
195
Tim Chen2554db92017-08-25 09:13:54 -0700196 __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200197}
198EXPORT_SYMBOL_GPL(__wake_up_sync_key);
199
200/*
201 * __wake_up_sync - see __wake_up_sync_key()
202 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100203void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200204{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100205 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200206}
207EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
208
209/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * Note: we use "set_current_state()" _after_ the wait-queue add,
211 * because we need a memory barrier there on SMP, so that any
212 * wake-function that tests for the wait-queue being active
213 * will be guaranteed to see waitqueue addition _or_ subsequent
214 * tests in this thread will see the wakeup having taken place.
215 *
216 * The spin_unlock() itself is semi-permeable and only protects
217 * one way (it only protects stuff inside the critical region and
218 * stops them from bleeding out - it would still allow subsequent
Michael Opdenacker59c51592007-05-09 08:57:56 +0200219 * loads to move into the critical region).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800221void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100222prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
224 unsigned long flags;
225
Ingo Molnar50816c42017-03-05 10:33:16 +0100226 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100227 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200228 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100229 __add_wait_queue(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700230 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100231 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232}
233EXPORT_SYMBOL(prepare_to_wait);
234
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800235void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100236prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237{
238 unsigned long flags;
239
Ingo Molnar50816c42017-03-05 10:33:16 +0100240 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100241 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200242 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100243 __add_wait_queue_entry_tail(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700244 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100245 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247EXPORT_SYMBOL(prepare_to_wait_exclusive);
248
Ingo Molnar50816c42017-03-05 10:33:16 +0100249void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200250{
Ingo Molnar50816c42017-03-05 10:33:16 +0100251 wq_entry->flags = flags;
252 wq_entry->private = current;
253 wq_entry->func = autoremove_wake_function;
Ingo Molnar2055da92017-06-20 12:06:46 +0200254 INIT_LIST_HEAD(&wq_entry->entry);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200255}
256EXPORT_SYMBOL(init_wait_entry);
257
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100258long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200259{
260 unsigned long flags;
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200261 long ret = 0;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200262
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100263 spin_lock_irqsave(&wq_head->lock, flags);
Davidlohr Bueso34ec35a2019-01-03 15:28:48 -0800264 if (signal_pending_state(state, current)) {
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200265 /*
266 * Exclusive waiter must not fail if it was selected by wakeup,
267 * it should "consume" the condition we were waiting for.
268 *
269 * The caller will recheck the condition and return success if
270 * we were already woken up, we can not miss the event because
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100271 * wakeup locks/unlocks the same wq_head->lock.
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200272 *
273 * But we need to ensure that set-condition + wakeup after that
274 * can't see us, it should wake up another exclusive waiter if
275 * we fail.
276 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200277 list_del_init(&wq_entry->entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200278 ret = -ERESTARTSYS;
279 } else {
Ingo Molnar2055da92017-06-20 12:06:46 +0200280 if (list_empty(&wq_entry->entry)) {
Ingo Molnar50816c42017-03-05 10:33:16 +0100281 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100282 __add_wait_queue_entry_tail(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200283 else
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100284 __add_wait_queue(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200285 }
286 set_current_state(state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200287 }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100288 spin_unlock_irqrestore(&wq_head->lock, flags);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200289
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200290 return ret;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200291}
292EXPORT_SYMBOL(prepare_to_wait_event);
293
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800294/*
295 * Note! These two wait functions are entered with the
296 * wait-queue lock held (and interrupts off in the _irq
297 * case), so there is no race with testing the wakeup
298 * condition in the caller before they add the wait
299 * entry to the wake queue.
300 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200301int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800302{
Ingo Molnar2055da92017-06-20 12:06:46 +0200303 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200304 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800305
306 set_current_state(TASK_INTERRUPTIBLE);
307 if (signal_pending(current))
308 return -ERESTARTSYS;
309
310 spin_unlock(&wq->lock);
311 schedule();
312 spin_lock(&wq->lock);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100313
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800314 return 0;
315}
316EXPORT_SYMBOL(do_wait_intr);
317
Ingo Molnarac6424b2017-06-20 12:06:13 +0200318int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800319{
Ingo Molnar2055da92017-06-20 12:06:46 +0200320 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200321 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800322
323 set_current_state(TASK_INTERRUPTIBLE);
324 if (signal_pending(current))
325 return -ERESTARTSYS;
326
327 spin_unlock_irq(&wq->lock);
328 schedule();
329 spin_lock_irq(&wq->lock);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100330
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800331 return 0;
332}
333EXPORT_SYMBOL(do_wait_intr_irq);
334
Randy Dunlapee2f1542010-10-26 14:17:25 -0700335/**
Johannes Weiner777c6c52009-02-04 15:12:14 -0800336 * finish_wait - clean up after waiting in a queue
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100337 * @wq_head: waitqueue waited on
Ingo Molnar50816c42017-03-05 10:33:16 +0100338 * @wq_entry: wait descriptor
Johannes Weiner777c6c52009-02-04 15:12:14 -0800339 *
340 * Sets current thread back to running state and removes
341 * the wait descriptor from the given waitqueue if still
342 * queued.
343 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100344void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 unsigned long flags;
347
348 __set_current_state(TASK_RUNNING);
349 /*
350 * We can check for list emptiness outside the lock
351 * IFF:
352 * - we use the "careful" check that verifies both
353 * the next and prev pointers, so that there cannot
354 * be any half-pending updates in progress on other
355 * CPU's that we haven't seen yet (and that might
356 * still change the stack area.
357 * and
358 * - all other users take the lock (ie we can only
359 * have _one_ other CPU that looks at or modifies
360 * the list).
361 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200362 if (!list_empty_careful(&wq_entry->entry)) {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100363 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200364 list_del_init(&wq_entry->entry);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100365 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
367}
368EXPORT_SYMBOL(finish_wait);
369
Ingo Molnar50816c42017-03-05 10:33:16 +0100370int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Ingo Molnar50816c42017-03-05 10:33:16 +0100372 int ret = default_wake_function(wq_entry, mode, sync, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 if (ret)
Ingo Molnar2055da92017-06-20 12:06:46 +0200375 list_del_init(&wq_entry->entry);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return ret;
378}
379EXPORT_SYMBOL(autoremove_wake_function);
380
Peter Zijlstracb6538e2014-10-31 11:57:30 +0100381static inline bool is_kthread_should_stop(void)
382{
383 return (current->flags & PF_KTHREAD) && kthread_should_stop();
384}
Peter Zijlstra61ada522014-09-24 10:18:47 +0200385
386/*
387 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
388 *
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100389 * add_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200390 * for (;;) {
391 * if (condition)
392 * break;
393 *
Andrea Parri76e079f2018-07-16 11:06:01 -0700394 * // in wait_woken() // in woken_wake_function()
Peter Zijlstra61ada522014-09-24 10:18:47 +0200395 *
Andrea Parri76e079f2018-07-16 11:06:01 -0700396 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
397 * smp_mb(); // A try_to_wake_up():
398 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
399 * schedule() if (p->state & mode)
400 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
401 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
402 * smp_mb(); // B condition = true;
403 * } smp_mb(); // C
404 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200405 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100406long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200407{
Peter Zijlstra61ada522014-09-24 10:18:47 +0200408 /*
Andrea Parri76e079f2018-07-16 11:06:01 -0700409 * The below executes an smp_mb(), which matches with the full barrier
410 * executed by the try_to_wake_up() in woken_wake_function() such that
411 * either we see the store to wq_entry->flags in woken_wake_function()
412 * or woken_wake_function() sees our store to current->state.
Peter Zijlstra61ada522014-09-24 10:18:47 +0200413 */
Andrea Parri76e079f2018-07-16 11:06:01 -0700414 set_current_state(mode); /* A */
Ingo Molnar50816c42017-03-05 10:33:16 +0100415 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
Peter Zijlstra61ada522014-09-24 10:18:47 +0200416 timeout = schedule_timeout(timeout);
417 __set_current_state(TASK_RUNNING);
418
419 /*
Andrea Parri76e079f2018-07-16 11:06:01 -0700420 * The below executes an smp_mb(), which matches with the smp_mb() (C)
421 * in woken_wake_function() such that either we see the wait condition
422 * being true or the store to wq_entry->flags in woken_wake_function()
423 * follows ours in the coherence order.
Peter Zijlstra61ada522014-09-24 10:18:47 +0200424 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100425 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
Peter Zijlstra61ada522014-09-24 10:18:47 +0200426
427 return timeout;
428}
429EXPORT_SYMBOL(wait_woken);
430
Ingo Molnar50816c42017-03-05 10:33:16 +0100431int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200432{
Andrea Parri76e079f2018-07-16 11:06:01 -0700433 /* Pairs with the smp_store_mb() in wait_woken(). */
434 smp_mb(); /* C */
Ingo Molnar50816c42017-03-05 10:33:16 +0100435 wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200436
Ingo Molnar50816c42017-03-05 10:33:16 +0100437 return default_wake_function(wq_entry, mode, sync, key);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200438}
439EXPORT_SYMBOL(woken_wake_function);