Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SWAIT_H |
| 3 | #define _LINUX_SWAIT_H |
| 4 | |
| 5 | #include <linux/list.h> |
| 6 | #include <linux/stddef.h> |
| 7 | #include <linux/spinlock.h> |
Sebastian Andrzej Siewior | a59a68f | 2018-05-04 12:42:24 +0200 | [diff] [blame] | 8 | #include <linux/wait.h> |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 9 | #include <asm/current.h> |
| 10 | |
| 11 | /* |
Davidlohr Bueso | 12ac678 | 2020-04-21 21:07:39 -0700 | [diff] [blame] | 12 | * Simple waitqueues are semantically very different to regular wait queues |
| 13 | * (wait.h). The most important difference is that the simple waitqueue allows |
| 14 | * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold |
| 15 | * times. |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 16 | * |
Davidlohr Bueso | 88796e7 | 2017-10-20 10:13:46 -0700 | [diff] [blame] | 17 | * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all |
| 18 | * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher |
| 19 | * priority task a chance to run. |
| 20 | * |
| 21 | * Secondly, we had to drop a fair number of features of the other waitqueue |
| 22 | * code; notably: |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 23 | * |
| 24 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; |
| 25 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right |
| 26 | * sleeper state. |
| 27 | * |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 28 | * - the !exclusive mode; because that leads to O(n) wakeups, everything is |
Davidlohr Bueso | 12ac678 | 2020-04-21 21:07:39 -0700 | [diff] [blame] | 29 | * exclusive. As such swake_up_one will only ever awake _one_ waiter. |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 30 | * |
Davidlohr Bueso | 88796e7 | 2017-10-20 10:13:46 -0700 | [diff] [blame] | 31 | * - custom wake callback functions; because you cannot give any guarantees |
| 32 | * about random code. This also allows swait to be used in RT, such that |
| 33 | * raw spinlock can be used for the swait queue head. |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 34 | * |
Davidlohr Bueso | 88796e7 | 2017-10-20 10:13:46 -0700 | [diff] [blame] | 35 | * As a side effect of these; the data structures are slimmer albeit more ad-hoc. |
| 36 | * For all the above, note that simple wait queues should _only_ be used under |
| 37 | * very specific realtime constraints -- it is best to stick with the regular |
| 38 | * wait queues in most cases. |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 39 | */ |
| 40 | |
| 41 | struct task_struct; |
| 42 | |
| 43 | struct swait_queue_head { |
| 44 | raw_spinlock_t lock; |
| 45 | struct list_head task_list; |
| 46 | }; |
| 47 | |
| 48 | struct swait_queue { |
| 49 | struct task_struct *task; |
| 50 | struct list_head task_list; |
| 51 | }; |
| 52 | |
| 53 | #define __SWAITQUEUE_INITIALIZER(name) { \ |
| 54 | .task = current, \ |
| 55 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
| 56 | } |
| 57 | |
| 58 | #define DECLARE_SWAITQUEUE(name) \ |
| 59 | struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) |
| 60 | |
| 61 | #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
| 62 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| 63 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
| 64 | } |
| 65 | |
| 66 | #define DECLARE_SWAIT_QUEUE_HEAD(name) \ |
| 67 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) |
| 68 | |
| 69 | extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, |
| 70 | struct lock_class_key *key); |
| 71 | |
| 72 | #define init_swait_queue_head(q) \ |
| 73 | do { \ |
| 74 | static struct lock_class_key __key; \ |
| 75 | __init_swait_queue_head((q), #q, &__key); \ |
| 76 | } while (0) |
| 77 | |
| 78 | #ifdef CONFIG_LOCKDEP |
| 79 | # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ |
| 80 | ({ init_swait_queue_head(&name); name; }) |
| 81 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ |
| 82 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) |
| 83 | #else |
| 84 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ |
| 85 | DECLARE_SWAIT_QUEUE_HEAD(name) |
| 86 | #endif |
| 87 | |
Davidlohr Bueso | 8cd641e | 2017-09-13 13:08:18 -0700 | [diff] [blame] | 88 | /** |
| 89 | * swait_active -- locklessly test for waiters on the queue |
| 90 | * @wq: the waitqueue to test for waiters |
| 91 | * |
| 92 | * returns true if the wait list is not empty |
| 93 | * |
| 94 | * NOTE: this function is lockless and requires care, incorrect usage _will_ |
| 95 | * lead to sporadic and non-obvious failure. |
| 96 | * |
| 97 | * NOTE2: this function has the same above implications as regular waitqueues. |
| 98 | * |
| 99 | * Use either while holding swait_queue_head::lock or when used for wakeups |
| 100 | * with an extra smp_mb() like: |
| 101 | * |
| 102 | * CPU0 - waker CPU1 - waiter |
| 103 | * |
| 104 | * for (;;) { |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 105 | * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); |
Davidlohr Bueso | 8cd641e | 2017-09-13 13:08:18 -0700 | [diff] [blame] | 106 | * smp_mb(); // smp_mb() from set_current_state() |
| 107 | * if (swait_active(wq_head)) if (@cond) |
| 108 | * wake_up(wq_head); break; |
| 109 | * schedule(); |
| 110 | * } |
| 111 | * finish_swait(&wq_head, &wait); |
| 112 | * |
| 113 | * Because without the explicit smp_mb() it's possible for the |
| 114 | * swait_active() load to get hoisted over the @cond store such that we'll |
| 115 | * observe an empty wait list while the waiter might not observe @cond. |
| 116 | * This, in turn, can trigger missing wakeups. |
| 117 | * |
| 118 | * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), |
| 119 | * which (when the lock is uncontended) are of roughly equal cost. |
| 120 | */ |
| 121 | static inline int swait_active(struct swait_queue_head *wq) |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 122 | { |
Davidlohr Bueso | 8cd641e | 2017-09-13 13:08:18 -0700 | [diff] [blame] | 123 | return !list_empty(&wq->task_list); |
| 124 | } |
| 125 | |
| 126 | /** |
| 127 | * swq_has_sleeper - check if there are any waiting processes |
| 128 | * @wq: the waitqueue to test for waiters |
| 129 | * |
| 130 | * Returns true if @wq has waiting processes |
| 131 | * |
| 132 | * Please refer to the comment for swait_active. |
| 133 | */ |
| 134 | static inline bool swq_has_sleeper(struct swait_queue_head *wq) |
| 135 | { |
| 136 | /* |
| 137 | * We need to be sure we are in sync with the list_add() |
| 138 | * modifications to the wait queue (task_list). |
| 139 | * |
| 140 | * This memory barrier should be paired with one on the |
| 141 | * waiting side. |
| 142 | */ |
| 143 | smp_mb(); |
| 144 | return swait_active(wq); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 145 | } |
| 146 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 147 | extern void swake_up_one(struct swait_queue_head *q); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 148 | extern void swake_up_all(struct swait_queue_head *q); |
| 149 | extern void swake_up_locked(struct swait_queue_head *q); |
| 150 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 151 | extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 152 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| 153 | |
| 154 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 155 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 156 | |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 157 | /* as per ___wait_event() but for swait, therefore "exclusive == 1" */ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 158 | #define ___swait_event(wq, condition, state, ret, cmd) \ |
| 159 | ({ \ |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 160 | __label__ __out; \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 161 | struct swait_queue __wait; \ |
| 162 | long __ret = ret; \ |
| 163 | \ |
| 164 | INIT_LIST_HEAD(&__wait.task_list); \ |
| 165 | for (;;) { \ |
| 166 | long __int = prepare_to_swait_event(&wq, &__wait, state);\ |
| 167 | \ |
| 168 | if (condition) \ |
| 169 | break; \ |
| 170 | \ |
| 171 | if (___wait_is_interruptible(state) && __int) { \ |
| 172 | __ret = __int; \ |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 173 | goto __out; \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 174 | } \ |
| 175 | \ |
| 176 | cmd; \ |
| 177 | } \ |
| 178 | finish_swait(&wq, &__wait); \ |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 179 | __out: __ret; \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 180 | }) |
| 181 | |
| 182 | #define __swait_event(wq, condition) \ |
| 183 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ |
| 184 | schedule()) |
| 185 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 186 | #define swait_event_exclusive(wq, condition) \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 187 | do { \ |
| 188 | if (condition) \ |
| 189 | break; \ |
| 190 | __swait_event(wq, condition); \ |
| 191 | } while (0) |
| 192 | |
| 193 | #define __swait_event_timeout(wq, condition, timeout) \ |
| 194 | ___swait_event(wq, ___wait_cond_timeout(condition), \ |
| 195 | TASK_UNINTERRUPTIBLE, timeout, \ |
| 196 | __ret = schedule_timeout(__ret)) |
| 197 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 198 | #define swait_event_timeout_exclusive(wq, condition, timeout) \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 199 | ({ \ |
| 200 | long __ret = timeout; \ |
| 201 | if (!___wait_cond_timeout(condition)) \ |
| 202 | __ret = __swait_event_timeout(wq, condition, timeout); \ |
| 203 | __ret; \ |
| 204 | }) |
| 205 | |
| 206 | #define __swait_event_interruptible(wq, condition) \ |
| 207 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ |
| 208 | schedule()) |
| 209 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 210 | #define swait_event_interruptible_exclusive(wq, condition) \ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 211 | ({ \ |
| 212 | int __ret = 0; \ |
| 213 | if (!(condition)) \ |
| 214 | __ret = __swait_event_interruptible(wq, condition); \ |
| 215 | __ret; \ |
| 216 | }) |
| 217 | |
| 218 | #define __swait_event_interruptible_timeout(wq, condition, timeout) \ |
| 219 | ___swait_event(wq, ___wait_cond_timeout(condition), \ |
| 220 | TASK_INTERRUPTIBLE, timeout, \ |
| 221 | __ret = schedule_timeout(__ret)) |
| 222 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 223 | #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 224 | ({ \ |
| 225 | long __ret = timeout; \ |
| 226 | if (!___wait_cond_timeout(condition)) \ |
| 227 | __ret = __swait_event_interruptible_timeout(wq, \ |
| 228 | condition, timeout); \ |
| 229 | __ret; \ |
| 230 | }) |
| 231 | |
Luis R. Rodriguez | 352eee1 | 2017-06-20 14:45:46 -0700 | [diff] [blame] | 232 | #define __swait_event_idle(wq, condition) \ |
| 233 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) |
| 234 | |
| 235 | /** |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 236 | * swait_event_idle_exclusive - wait without system load contribution |
Luis R. Rodriguez | 352eee1 | 2017-06-20 14:45:46 -0700 | [diff] [blame] | 237 | * @wq: the waitqueue to wait on |
| 238 | * @condition: a C expression for the event to wait for |
| 239 | * |
| 240 | * The process is put to sleep (TASK_IDLE) until the @condition evaluates to |
| 241 | * true. The @condition is checked each time the waitqueue @wq is woken up. |
| 242 | * |
| 243 | * This function is mostly used when a kthread or workqueue waits for some |
| 244 | * condition and doesn't want to contribute to system load. Signals are |
| 245 | * ignored. |
| 246 | */ |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 247 | #define swait_event_idle_exclusive(wq, condition) \ |
Luis R. Rodriguez | 352eee1 | 2017-06-20 14:45:46 -0700 | [diff] [blame] | 248 | do { \ |
| 249 | if (condition) \ |
| 250 | break; \ |
| 251 | __swait_event_idle(wq, condition); \ |
| 252 | } while (0) |
| 253 | |
| 254 | #define __swait_event_idle_timeout(wq, condition, timeout) \ |
| 255 | ___swait_event(wq, ___wait_cond_timeout(condition), \ |
| 256 | TASK_IDLE, timeout, \ |
| 257 | __ret = schedule_timeout(__ret)) |
| 258 | |
| 259 | /** |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 260 | * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution |
Luis R. Rodriguez | 352eee1 | 2017-06-20 14:45:46 -0700 | [diff] [blame] | 261 | * @wq: the waitqueue to wait on |
| 262 | * @condition: a C expression for the event to wait for |
| 263 | * @timeout: timeout at which we'll give up in jiffies |
| 264 | * |
| 265 | * The process is put to sleep (TASK_IDLE) until the @condition evaluates to |
| 266 | * true. The @condition is checked each time the waitqueue @wq is woken up. |
| 267 | * |
| 268 | * This function is mostly used when a kthread or workqueue waits for some |
| 269 | * condition and doesn't want to contribute to system load. Signals are |
| 270 | * ignored. |
| 271 | * |
| 272 | * Returns: |
| 273 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
| 274 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
| 275 | * or the remaining jiffies (at least 1) if the @condition evaluated |
| 276 | * to %true before the @timeout elapsed. |
| 277 | */ |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 278 | #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ |
Luis R. Rodriguez | 352eee1 | 2017-06-20 14:45:46 -0700 | [diff] [blame] | 279 | ({ \ |
| 280 | long __ret = timeout; \ |
| 281 | if (!___wait_cond_timeout(condition)) \ |
| 282 | __ret = __swait_event_idle_timeout(wq, \ |
| 283 | condition, timeout); \ |
| 284 | __ret; \ |
| 285 | }) |
| 286 | |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 287 | #endif /* _LINUX_SWAIT_H */ |