blob: a79fb15c1dd445068496058436bad16d52eedf41 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +01009#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020012typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070018 void *private;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
David Howellscb655372013-05-10 19:50:26 +010026#define WAIT_ATOMIC_T_BIT_NR -1
Linus Torvalds1da177e2005-04-16 15:20:36 -070027};
28
29struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
32};
33
34struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
37};
38typedef struct __wait_queue_head wait_queue_head_t;
39
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080040struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/*
43 * Macros for declaration and initialisaton of the datatypes
44 */
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070047 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070055 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 .task_list = { &(name).task_list, &(name).task_list } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
63
David Howellscb655372013-05-10 19:50:26 +010064#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010067extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010068
69#define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
72 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010073 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010074 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080076#ifdef CONFIG_LOCKDEP
77# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81#else
82# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83#endif
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{
87 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070088 q->private = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 q->func = default_wake_function;
90}
91
92static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
94{
95 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070096 q->private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 q->func = func;
98}
99
100static inline int waitqueue_active(wait_queue_head_t *q)
101{
102 return !list_empty(&q->task_list);
103}
104
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800105extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110{
111 list_add(&new->task_list, &head->task_list);
112}
113
114/*
115 * Used for wake-one threads:
116 */
Changli Gaoa93d2f12010-05-07 14:33:26 +0800117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
119{
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800125 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 list_add_tail(&new->task_list, &head->task_list);
128}
129
Changli Gaoa93d2f12010-05-07 14:33:26 +0800130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
139{
140 list_del(&old->task_list);
141}
142
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100153void wake_up_atomic_t(atomic_t *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
David Howellscb655372013-05-10 19:50:26 +0100156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800157wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800170/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700171 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800172 */
Davide Libenzic0da3772009-03-31 15:24:20 -0700173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800181
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200182#define ___wait_cond_timeout(condition, ret) \
183({ \
184 bool __cond = (condition); \
185 if (__cond && !ret) \
186 ret = 1; \
187 __cond || !ret; \
188})
189
Peter Zijlstra41a14312013-10-02 11:22:21 +0200190#define ___wait_signal_pending(state) \
191 ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
192 (state == TASK_KILLABLE && fatal_signal_pending(current)))
193
194#define ___wait_nop_ret int ret __always_unused
195
196#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
197do { \
198 __label__ __out; \
199 DEFINE_WAIT(__wait); \
200 \
201 for (;;) { \
202 if (exclusive) \
203 prepare_to_wait_exclusive(&wq, &__wait, state); \
204 else \
205 prepare_to_wait(&wq, &__wait, state); \
206 \
207 if (condition) \
208 break; \
209 \
210 if (___wait_signal_pending(state)) { \
211 ret = -ERESTARTSYS; \
212 if (exclusive) { \
213 abort_exclusive_wait(&wq, &__wait, \
214 state, NULL); \
215 goto __out; \
216 } \
217 break; \
218 } \
219 \
220 cmd; \
221 } \
222 finish_wait(&wq, &__wait); \
223__out: ; \
224} while (0)
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226#define __wait_event(wq, condition) \
Peter Zijlstra854267f2013-10-02 11:22:22 +0200227 ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
228 ___wait_nop_ret, schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230/**
231 * wait_event - sleep until a condition gets true
232 * @wq: the waitqueue to wait on
233 * @condition: a C expression for the event to wait for
234 *
235 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
236 * @condition evaluates to true. The @condition is checked each time
237 * the waitqueue @wq is woken up.
238 *
239 * wake_up() has to be called after changing any variable that could
240 * change the result of the wait condition.
241 */
242#define wait_event(wq, condition) \
243do { \
244 if (condition) \
245 break; \
246 __wait_event(wq, condition); \
247} while (0)
248
249#define __wait_event_timeout(wq, condition, ret) \
Peter Zijlstraddc19942013-10-02 11:22:23 +0200250 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \
251 TASK_UNINTERRUPTIBLE, 0, ret, \
252 ret = schedule_timeout(ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254/**
255 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
256 * @wq: the waitqueue to wait on
257 * @condition: a C expression for the event to wait for
258 * @timeout: timeout, in jiffies
259 *
260 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
261 * @condition evaluates to true. The @condition is checked each time
262 * the waitqueue @wq is woken up.
263 *
264 * wake_up() has to be called after changing any variable that could
265 * change the result of the wait condition.
266 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700267 * The function returns 0 if the @timeout elapsed, or the remaining
268 * jiffies (at least 1) if the @condition evaluated to %true before
269 * the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 */
271#define wait_event_timeout(wq, condition, timeout) \
272({ \
273 long __ret = timeout; \
274 if (!(condition)) \
275 __wait_event_timeout(wq, condition, __ret); \
276 __ret; \
277})
278
279#define __wait_event_interruptible(wq, condition, ret) \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200280 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \
281 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283/**
284 * wait_event_interruptible - sleep until a condition gets true
285 * @wq: the waitqueue to wait on
286 * @condition: a C expression for the event to wait for
287 *
288 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
289 * @condition evaluates to true or a signal is received.
290 * The @condition is checked each time the waitqueue @wq is woken up.
291 *
292 * wake_up() has to be called after changing any variable that could
293 * change the result of the wait condition.
294 *
295 * The function will return -ERESTARTSYS if it was interrupted by a
296 * signal and 0 if @condition evaluated to true.
297 */
298#define wait_event_interruptible(wq, condition) \
299({ \
300 int __ret = 0; \
301 if (!(condition)) \
302 __wait_event_interruptible(wq, condition, __ret); \
303 __ret; \
304})
305
306#define __wait_event_interruptible_timeout(wq, condition, ret) \
Peter Zijlstrac2ebb1f2013-10-02 11:22:25 +0200307 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \
308 TASK_INTERRUPTIBLE, 0, ret, \
309 ret = schedule_timeout(ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311/**
312 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
313 * @wq: the waitqueue to wait on
314 * @condition: a C expression for the event to wait for
315 * @timeout: timeout, in jiffies
316 *
317 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
318 * @condition evaluates to true or a signal is received.
319 * The @condition is checked each time the waitqueue @wq is woken up.
320 *
321 * wake_up() has to be called after changing any variable that could
322 * change the result of the wait condition.
323 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700324 * Returns:
325 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
326 * a signal, or the remaining jiffies (at least 1) if the @condition
327 * evaluated to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 */
329#define wait_event_interruptible_timeout(wq, condition, timeout) \
330({ \
331 long __ret = timeout; \
332 if (!(condition)) \
333 __wait_event_interruptible_timeout(wq, condition, __ret); \
334 __ret; \
335})
336
Kent Overstreet774a08b2013-05-07 16:18:43 -0700337#define __wait_event_hrtimeout(wq, condition, timeout, state) \
338({ \
339 int __ret = 0; \
340 DEFINE_WAIT(__wait); \
341 struct hrtimer_sleeper __t; \
342 \
343 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
344 HRTIMER_MODE_REL); \
345 hrtimer_init_sleeper(&__t, current); \
346 if ((timeout).tv64 != KTIME_MAX) \
347 hrtimer_start_range_ns(&__t.timer, timeout, \
348 current->timer_slack_ns, \
349 HRTIMER_MODE_REL); \
350 \
351 for (;;) { \
352 prepare_to_wait(&wq, &__wait, state); \
353 if (condition) \
354 break; \
355 if (state == TASK_INTERRUPTIBLE && \
356 signal_pending(current)) { \
357 __ret = -ERESTARTSYS; \
358 break; \
359 } \
360 if (!__t.task) { \
361 __ret = -ETIME; \
362 break; \
363 } \
364 schedule(); \
365 } \
366 \
367 hrtimer_cancel(&__t.timer); \
368 destroy_hrtimer_on_stack(&__t.timer); \
369 finish_wait(&wq, &__wait); \
370 __ret; \
371})
372
373/**
374 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
375 * @wq: the waitqueue to wait on
376 * @condition: a C expression for the event to wait for
377 * @timeout: timeout, as a ktime_t
378 *
379 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
380 * @condition evaluates to true or a signal is received.
381 * The @condition is checked each time the waitqueue @wq is woken up.
382 *
383 * wake_up() has to be called after changing any variable that could
384 * change the result of the wait condition.
385 *
386 * The function returns 0 if @condition became true, or -ETIME if the timeout
387 * elapsed.
388 */
389#define wait_event_hrtimeout(wq, condition, timeout) \
390({ \
391 int __ret = 0; \
392 if (!(condition)) \
393 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
394 TASK_UNINTERRUPTIBLE); \
395 __ret; \
396})
397
398/**
399 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
400 * @wq: the waitqueue to wait on
401 * @condition: a C expression for the event to wait for
402 * @timeout: timeout, as a ktime_t
403 *
404 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
405 * @condition evaluates to true or a signal is received.
406 * The @condition is checked each time the waitqueue @wq is woken up.
407 *
408 * wake_up() has to be called after changing any variable that could
409 * change the result of the wait condition.
410 *
411 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
412 * interrupted by a signal, or -ETIME if the timeout elapsed.
413 */
414#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
415({ \
416 long __ret = 0; \
417 if (!(condition)) \
418 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
419 TASK_INTERRUPTIBLE); \
420 __ret; \
421})
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423#define __wait_event_interruptible_exclusive(wq, condition, ret) \
424do { \
Peter Zijlstrabb632bc2013-10-02 11:22:20 +0200425 __label__ __out; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 DEFINE_WAIT(__wait); \
427 \
428 for (;;) { \
429 prepare_to_wait_exclusive(&wq, &__wait, \
430 TASK_INTERRUPTIBLE); \
Peter Zijlstrabb632bc2013-10-02 11:22:20 +0200431 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 break; \
Peter Zijlstra2f2a2b62013-10-02 11:22:18 +0200433 if (signal_pending(current)) { \
434 ret = -ERESTARTSYS; \
435 abort_exclusive_wait(&wq, &__wait, \
Johannes Weiner777c6c52009-02-04 15:12:14 -0800436 TASK_INTERRUPTIBLE, NULL); \
Peter Zijlstrabb632bc2013-10-02 11:22:20 +0200437 goto __out; \
Peter Zijlstra2f2a2b62013-10-02 11:22:18 +0200438 } \
439 schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 } \
Peter Zijlstrabb632bc2013-10-02 11:22:20 +0200441 finish_wait(&wq, &__wait); \
442__out: ; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443} while (0)
444
445#define wait_event_interruptible_exclusive(wq, condition) \
446({ \
447 int __ret = 0; \
448 if (!(condition)) \
449 __wait_event_interruptible_exclusive(wq, condition, __ret);\
450 __ret; \
451})
452
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200453
454#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
455({ \
456 int __ret = 0; \
457 DEFINE_WAIT(__wait); \
458 if (exclusive) \
459 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
460 do { \
461 if (likely(list_empty(&__wait.task_list))) \
462 __add_wait_queue_tail(&(wq), &__wait); \
463 set_current_state(TASK_INTERRUPTIBLE); \
464 if (signal_pending(current)) { \
465 __ret = -ERESTARTSYS; \
466 break; \
467 } \
468 if (irq) \
469 spin_unlock_irq(&(wq).lock); \
470 else \
471 spin_unlock(&(wq).lock); \
472 schedule(); \
473 if (irq) \
474 spin_lock_irq(&(wq).lock); \
475 else \
476 spin_lock(&(wq).lock); \
477 } while (!(condition)); \
478 __remove_wait_queue(&(wq), &__wait); \
479 __set_current_state(TASK_RUNNING); \
480 __ret; \
481})
482
483
484/**
485 * wait_event_interruptible_locked - sleep until a condition gets true
486 * @wq: the waitqueue to wait on
487 * @condition: a C expression for the event to wait for
488 *
489 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
490 * @condition evaluates to true or a signal is received.
491 * The @condition is checked each time the waitqueue @wq is woken up.
492 *
493 * It must be called with wq.lock being held. This spinlock is
494 * unlocked while sleeping but @condition testing is done while lock
495 * is held and when this macro exits the lock is held.
496 *
497 * The lock is locked/unlocked using spin_lock()/spin_unlock()
498 * functions which must match the way they are locked/unlocked outside
499 * of this macro.
500 *
501 * wake_up_locked() has to be called after changing any variable that could
502 * change the result of the wait condition.
503 *
504 * The function will return -ERESTARTSYS if it was interrupted by a
505 * signal and 0 if @condition evaluated to true.
506 */
507#define wait_event_interruptible_locked(wq, condition) \
508 ((condition) \
509 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
510
511/**
512 * wait_event_interruptible_locked_irq - sleep until a condition gets true
513 * @wq: the waitqueue to wait on
514 * @condition: a C expression for the event to wait for
515 *
516 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
517 * @condition evaluates to true or a signal is received.
518 * The @condition is checked each time the waitqueue @wq is woken up.
519 *
520 * It must be called with wq.lock being held. This spinlock is
521 * unlocked while sleeping but @condition testing is done while lock
522 * is held and when this macro exits the lock is held.
523 *
524 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
525 * functions which must match the way they are locked/unlocked outside
526 * of this macro.
527 *
528 * wake_up_locked() has to be called after changing any variable that could
529 * change the result of the wait condition.
530 *
531 * The function will return -ERESTARTSYS if it was interrupted by a
532 * signal and 0 if @condition evaluated to true.
533 */
534#define wait_event_interruptible_locked_irq(wq, condition) \
535 ((condition) \
536 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
537
538/**
539 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
540 * @wq: the waitqueue to wait on
541 * @condition: a C expression for the event to wait for
542 *
543 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
544 * @condition evaluates to true or a signal is received.
545 * The @condition is checked each time the waitqueue @wq is woken up.
546 *
547 * It must be called with wq.lock being held. This spinlock is
548 * unlocked while sleeping but @condition testing is done while lock
549 * is held and when this macro exits the lock is held.
550 *
551 * The lock is locked/unlocked using spin_lock()/spin_unlock()
552 * functions which must match the way they are locked/unlocked outside
553 * of this macro.
554 *
555 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
556 * set thus when other process waits process on the list if this
557 * process is awaken further processes are not considered.
558 *
559 * wake_up_locked() has to be called after changing any variable that could
560 * change the result of the wait condition.
561 *
562 * The function will return -ERESTARTSYS if it was interrupted by a
563 * signal and 0 if @condition evaluated to true.
564 */
565#define wait_event_interruptible_exclusive_locked(wq, condition) \
566 ((condition) \
567 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
568
569/**
570 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
571 * @wq: the waitqueue to wait on
572 * @condition: a C expression for the event to wait for
573 *
574 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
575 * @condition evaluates to true or a signal is received.
576 * The @condition is checked each time the waitqueue @wq is woken up.
577 *
578 * It must be called with wq.lock being held. This spinlock is
579 * unlocked while sleeping but @condition testing is done while lock
580 * is held and when this macro exits the lock is held.
581 *
582 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
583 * functions which must match the way they are locked/unlocked outside
584 * of this macro.
585 *
586 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
587 * set thus when other process waits process on the list if this
588 * process is awaken further processes are not considered.
589 *
590 * wake_up_locked() has to be called after changing any variable that could
591 * change the result of the wait condition.
592 *
593 * The function will return -ERESTARTSYS if it was interrupted by a
594 * signal and 0 if @condition evaluated to true.
595 */
596#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
597 ((condition) \
598 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
599
600
601
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500602#define __wait_event_killable(wq, condition, ret) \
603do { \
604 DEFINE_WAIT(__wait); \
605 \
606 for (;;) { \
607 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
608 if (condition) \
609 break; \
610 if (!fatal_signal_pending(current)) { \
611 schedule(); \
612 continue; \
613 } \
614 ret = -ERESTARTSYS; \
615 break; \
616 } \
617 finish_wait(&wq, &__wait); \
618} while (0)
619
620/**
621 * wait_event_killable - sleep until a condition gets true
622 * @wq: the waitqueue to wait on
623 * @condition: a C expression for the event to wait for
624 *
625 * The process is put to sleep (TASK_KILLABLE) until the
626 * @condition evaluates to true or a signal is received.
627 * The @condition is checked each time the waitqueue @wq is woken up.
628 *
629 * wake_up() has to be called after changing any variable that could
630 * change the result of the wait condition.
631 *
632 * The function will return -ERESTARTSYS if it was interrupted by a
633 * signal and 0 if @condition evaluated to true.
634 */
635#define wait_event_killable(wq, condition) \
636({ \
637 int __ret = 0; \
638 if (!(condition)) \
639 __wait_event_killable(wq, condition, __ret); \
640 __ret; \
641})
642
Lukas Czernereed8c022012-11-30 11:42:40 +0100643
644#define __wait_event_lock_irq(wq, condition, lock, cmd) \
645do { \
646 DEFINE_WAIT(__wait); \
647 \
648 for (;;) { \
649 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
650 if (condition) \
651 break; \
652 spin_unlock_irq(&lock); \
653 cmd; \
654 schedule(); \
655 spin_lock_irq(&lock); \
656 } \
657 finish_wait(&wq, &__wait); \
658} while (0)
659
660/**
661 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
662 * condition is checked under the lock. This
663 * is expected to be called with the lock
664 * taken.
665 * @wq: the waitqueue to wait on
666 * @condition: a C expression for the event to wait for
667 * @lock: a locked spinlock_t, which will be released before cmd
668 * and schedule() and reacquired afterwards.
669 * @cmd: a command which is invoked outside the critical section before
670 * sleep
671 *
672 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
673 * @condition evaluates to true. The @condition is checked each time
674 * the waitqueue @wq is woken up.
675 *
676 * wake_up() has to be called after changing any variable that could
677 * change the result of the wait condition.
678 *
679 * This is supposed to be called while holding the lock. The lock is
680 * dropped before invoking the cmd and going to sleep and is reacquired
681 * afterwards.
682 */
683#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
684do { \
685 if (condition) \
686 break; \
687 __wait_event_lock_irq(wq, condition, lock, cmd); \
688} while (0)
689
690/**
691 * wait_event_lock_irq - sleep until a condition gets true. The
692 * condition is checked under the lock. This
693 * is expected to be called with the lock
694 * taken.
695 * @wq: the waitqueue to wait on
696 * @condition: a C expression for the event to wait for
697 * @lock: a locked spinlock_t, which will be released before schedule()
698 * and reacquired afterwards.
699 *
700 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
701 * @condition evaluates to true. The @condition is checked each time
702 * the waitqueue @wq is woken up.
703 *
704 * wake_up() has to be called after changing any variable that could
705 * change the result of the wait condition.
706 *
707 * This is supposed to be called while holding the lock. The lock is
708 * dropped before going to sleep and is reacquired afterwards.
709 */
710#define wait_event_lock_irq(wq, condition, lock) \
711do { \
712 if (condition) \
713 break; \
714 __wait_event_lock_irq(wq, condition, lock, ); \
715} while (0)
716
717
718#define __wait_event_interruptible_lock_irq(wq, condition, \
719 lock, ret, cmd) \
720do { \
721 DEFINE_WAIT(__wait); \
722 \
723 for (;;) { \
724 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
725 if (condition) \
726 break; \
727 if (signal_pending(current)) { \
728 ret = -ERESTARTSYS; \
729 break; \
730 } \
731 spin_unlock_irq(&lock); \
732 cmd; \
733 schedule(); \
734 spin_lock_irq(&lock); \
735 } \
736 finish_wait(&wq, &__wait); \
737} while (0)
738
739/**
740 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
741 * The condition is checked under the lock. This is expected to
742 * be called with the lock taken.
743 * @wq: the waitqueue to wait on
744 * @condition: a C expression for the event to wait for
745 * @lock: a locked spinlock_t, which will be released before cmd and
746 * schedule() and reacquired afterwards.
747 * @cmd: a command which is invoked outside the critical section before
748 * sleep
749 *
750 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
751 * @condition evaluates to true or a signal is received. The @condition is
752 * checked each time the waitqueue @wq is woken up.
753 *
754 * wake_up() has to be called after changing any variable that could
755 * change the result of the wait condition.
756 *
757 * This is supposed to be called while holding the lock. The lock is
758 * dropped before invoking the cmd and going to sleep and is reacquired
759 * afterwards.
760 *
761 * The macro will return -ERESTARTSYS if it was interrupted by a signal
762 * and 0 if @condition evaluated to true.
763 */
764#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
765({ \
766 int __ret = 0; \
767 \
768 if (!(condition)) \
769 __wait_event_interruptible_lock_irq(wq, condition, \
770 lock, __ret, cmd); \
771 __ret; \
772})
773
774/**
775 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
776 * The condition is checked under the lock. This is expected
777 * to be called with the lock taken.
778 * @wq: the waitqueue to wait on
779 * @condition: a C expression for the event to wait for
780 * @lock: a locked spinlock_t, which will be released before schedule()
781 * and reacquired afterwards.
782 *
783 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
784 * @condition evaluates to true or signal is received. The @condition is
785 * checked each time the waitqueue @wq is woken up.
786 *
787 * wake_up() has to be called after changing any variable that could
788 * change the result of the wait condition.
789 *
790 * This is supposed to be called while holding the lock. The lock is
791 * dropped before going to sleep and is reacquired afterwards.
792 *
793 * The macro will return -ERESTARTSYS if it was interrupted by a signal
794 * and 0 if @condition evaluated to true.
795 */
796#define wait_event_interruptible_lock_irq(wq, condition, lock) \
797({ \
798 int __ret = 0; \
799 \
800 if (!(condition)) \
801 __wait_event_interruptible_lock_irq(wq, condition, \
802 lock, __ret, ); \
803 __ret; \
804})
805
Martin Peschked79ff142013-08-22 17:45:36 +0200806#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
807 lock, ret) \
808do { \
809 DEFINE_WAIT(__wait); \
810 \
811 for (;;) { \
812 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200813 if (___wait_cond_timeout(condition, ret)) \
Martin Peschked79ff142013-08-22 17:45:36 +0200814 break; \
815 if (signal_pending(current)) { \
816 ret = -ERESTARTSYS; \
817 break; \
818 } \
819 spin_unlock_irq(&lock); \
820 ret = schedule_timeout(ret); \
821 spin_lock_irq(&lock); \
Martin Peschked79ff142013-08-22 17:45:36 +0200822 } \
823 finish_wait(&wq, &__wait); \
824} while (0)
825
826/**
827 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
828 * The condition is checked under the lock. This is expected
829 * to be called with the lock taken.
830 * @wq: the waitqueue to wait on
831 * @condition: a C expression for the event to wait for
832 * @lock: a locked spinlock_t, which will be released before schedule()
833 * and reacquired afterwards.
834 * @timeout: timeout, in jiffies
835 *
836 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
837 * @condition evaluates to true or signal is received. The @condition is
838 * checked each time the waitqueue @wq is woken up.
839 *
840 * wake_up() has to be called after changing any variable that could
841 * change the result of the wait condition.
842 *
843 * This is supposed to be called while holding the lock. The lock is
844 * dropped before going to sleep and is reacquired afterwards.
845 *
846 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
847 * was interrupted by a signal, and the remaining jiffies otherwise
848 * if the condition evaluated to true before the timeout elapsed.
849 */
850#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
851 timeout) \
852({ \
853 int __ret = timeout; \
854 \
855 if (!(condition)) \
856 __wait_event_interruptible_lock_irq_timeout( \
857 wq, condition, lock, __ret); \
858 __ret; \
859})
860
Lukas Czernereed8c022012-11-30 11:42:40 +0100861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 * These are the old interfaces to sleep waiting for an event.
Ingo Molnar0fec1712007-07-09 18:52:01 +0200864 * They are racy. DO NOT use them, use the wait_event* interfaces above.
865 * We plan to remove these interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 */
Ingo Molnar0fec1712007-07-09 18:52:01 +0200867extern void sleep_on(wait_queue_head_t *q);
868extern long sleep_on_timeout(wait_queue_head_t *q,
869 signed long timeout);
870extern void interruptible_sleep_on(wait_queue_head_t *q);
871extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
872 signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874/*
875 * Waitqueues which are removed from the waitqueue_head at wakeup time
876 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800877void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
878void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
879void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Johannes Weiner777c6c52009-02-04 15:12:14 -0800880void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
881 unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
883int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
884
Eric Dumazetbf368e42009-04-28 02:24:21 -0700885#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700887 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700888 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200889 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 }
891
Eric Dumazetbf368e42009-04-28 02:24:21 -0700892#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894#define DEFINE_WAIT_BIT(name, word, bit) \
895 struct wait_bit_queue name = { \
896 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
897 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700898 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 .func = wake_bit_function, \
900 .task_list = \
901 LIST_HEAD_INIT((name).wait.task_list), \
902 }, \
903 }
904
905#define init_wait(wait) \
906 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700907 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 (wait)->func = autoremove_wake_function; \
909 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400910 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 } while (0)
912
913/**
914 * wait_on_bit - wait for a bit to be cleared
915 * @word: the word being waited on, a kernel virtual address
916 * @bit: the bit of the word being waited on
917 * @action: the function used to sleep, which may take special actions
918 * @mode: the task state to sleep in
919 *
920 * There is a standard hashed waitqueue table for generic use. This
921 * is the part of the hashtable's accessor API that waits on a bit.
922 * For instance, if one were to have waiters on a bitflag, one would
923 * call wait_on_bit() in threads waiting for the bit to clear.
924 * One uses wait_on_bit() where one is waiting for the bit to clear,
925 * but has no intention of setting it.
926 */
927static inline int wait_on_bit(void *word, int bit,
928 int (*action)(void *), unsigned mode)
929{
930 if (!test_bit(bit, word))
931 return 0;
932 return out_of_line_wait_on_bit(word, bit, action, mode);
933}
934
935/**
936 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
937 * @word: the word being waited on, a kernel virtual address
938 * @bit: the bit of the word being waited on
939 * @action: the function used to sleep, which may take special actions
940 * @mode: the task state to sleep in
941 *
942 * There is a standard hashed waitqueue table for generic use. This
943 * is the part of the hashtable's accessor API that waits on a bit
944 * when one intends to set it, for instance, trying to lock bitflags.
945 * For instance, if one were to have waiters trying to set bitflag
946 * and waiting for it to clear before setting it, one would call
947 * wait_on_bit() in threads waiting to be able to set the bit.
948 * One uses wait_on_bit_lock() where one is waiting for the bit to
949 * clear with the intention of setting it, and when done, clearing it.
950 */
951static inline int wait_on_bit_lock(void *word, int bit,
952 int (*action)(void *), unsigned mode)
953{
954 if (!test_and_set_bit(bit, word))
955 return 0;
956 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
957}
David Howellscb655372013-05-10 19:50:26 +0100958
959/**
960 * wait_on_atomic_t - Wait for an atomic_t to become 0
961 * @val: The atomic value being waited on, a kernel virtual address
962 * @action: the function used to sleep, which may take special actions
963 * @mode: the task state to sleep in
964 *
965 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
966 * the purpose of getting a waitqueue, but we set the key to a bit number
967 * outside of the target 'word'.
968 */
969static inline
970int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
971{
972 if (atomic_read(val) == 0)
973 return 0;
974 return out_of_line_wait_on_atomic_t(val, action, mode);
975}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977#endif