Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 2 | #ifndef _LINUX_RCUWAIT_H_ |
| 3 | #define _LINUX_RCUWAIT_H_ |
| 4 | |
| 5 | #include <linux/rcupdate.h> |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 6 | #include <linux/sched/signal.h> |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 7 | |
| 8 | /* |
| 9 | * rcuwait provides a way of blocking and waking up a single |
Eric W. Biederman | 154abaf | 2019-09-14 07:34:30 -0500 | [diff] [blame] | 10 | * task in an rcu-safe manner. |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 11 | * |
Eric W. Biederman | 154abaf | 2019-09-14 07:34:30 -0500 | [diff] [blame] | 12 | * The only time @task is non-nil is when a user is blocked (or |
| 13 | * checking if it needs to) on a condition, and reset as soon as we |
| 14 | * know that the condition has succeeded and are awoken. |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 15 | */ |
| 16 | struct rcuwait { |
Joel Fernandes (Google) | 03f4b48 | 2019-03-20 20:34:25 -0400 | [diff] [blame] | 17 | struct task_struct __rcu *task; |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 18 | }; |
| 19 | |
| 20 | #define __RCUWAIT_INITIALIZER(name) \ |
| 21 | { .task = NULL, } |
| 22 | |
| 23 | static inline void rcuwait_init(struct rcuwait *w) |
| 24 | { |
| 25 | w->task = NULL; |
| 26 | } |
| 27 | |
Davidlohr Bueso | 191a43b | 2020-04-23 22:48:36 -0700 | [diff] [blame] | 28 | /* |
| 29 | * Note: this provides no serialization and, just as with waitqueues, |
| 30 | * requires care to estimate as to whether or not the wait is active. |
| 31 | */ |
| 32 | static inline int rcuwait_active(struct rcuwait *w) |
| 33 | { |
Paolo Bonzini | febd668 | 2020-05-18 06:30:09 -0400 | [diff] [blame] | 34 | return !!rcu_access_pointer(w->task); |
Davidlohr Bueso | 191a43b | 2020-04-23 22:48:36 -0700 | [diff] [blame] | 35 | } |
| 36 | |
Davidlohr Bueso | 9d9a6eb | 2020-04-23 22:48:34 -0700 | [diff] [blame] | 37 | extern int rcuwait_wake_up(struct rcuwait *w); |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * The caller is responsible for locking around rcuwait_wait_event(), |
Davidlohr Bueso | 5c21f7b | 2020-04-23 22:48:35 -0700 | [diff] [blame] | 41 | * and [prepare_to/finish]_rcuwait() such that writes to @task are |
| 42 | * properly serialized. |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 43 | */ |
Davidlohr Bueso | 5c21f7b | 2020-04-23 22:48:35 -0700 | [diff] [blame] | 44 | |
| 45 | static inline void prepare_to_rcuwait(struct rcuwait *w) |
| 46 | { |
| 47 | rcu_assign_pointer(w->task, current); |
| 48 | } |
| 49 | |
| 50 | static inline void finish_rcuwait(struct rcuwait *w) |
| 51 | { |
| 52 | rcu_assign_pointer(w->task, NULL); |
| 53 | __set_current_state(TASK_RUNNING); |
| 54 | } |
| 55 | |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 56 | #define rcuwait_wait_event(w, condition, state) \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 57 | ({ \ |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 58 | int __ret = 0; \ |
Davidlohr Bueso | 5c21f7b | 2020-04-23 22:48:35 -0700 | [diff] [blame] | 59 | prepare_to_rcuwait(w); \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 60 | for (;;) { \ |
| 61 | /* \ |
| 62 | * Implicit barrier (A) pairs with (B) in \ |
Davidlohr Bueso | 7e1f946 | 2017-01-29 07:42:12 -0800 | [diff] [blame] | 63 | * rcuwait_wake_up(). \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 64 | */ \ |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 65 | set_current_state(state); \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 66 | if (condition) \ |
| 67 | break; \ |
| 68 | \ |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 69 | if (signal_pending_state(state, current)) { \ |
| 70 | __ret = -EINTR; \ |
| 71 | break; \ |
| 72 | } \ |
| 73 | \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 74 | schedule(); \ |
| 75 | } \ |
Davidlohr Bueso | 5c21f7b | 2020-04-23 22:48:35 -0700 | [diff] [blame] | 76 | finish_rcuwait(w); \ |
Peter Zijlstra (Intel) | 80fbaf1 | 2020-03-21 12:25:55 +0100 | [diff] [blame] | 77 | __ret; \ |
Davidlohr Bueso | 8f95c90 | 2017-01-11 07:22:25 -0800 | [diff] [blame] | 78 | }) |
| 79 | |
| 80 | #endif /* _LINUX_RCUWAIT_H_ */ |