blob: 61c56cca95c433167f334711a2e41c5be837729a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Davidlohr Bueso8f95c902017-01-11 07:22:25 -08002#ifndef _LINUX_RCUWAIT_H_
3#define _LINUX_RCUWAIT_H_
4
5#include <linux/rcupdate.h>
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +01006#include <linux/sched/signal.h>
Davidlohr Bueso8f95c902017-01-11 07:22:25 -08007
8/*
9 * rcuwait provides a way of blocking and waking up a single
Eric W. Biederman154abaf2019-09-14 07:34:30 -050010 * task in an rcu-safe manner.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080011 *
Eric W. Biederman154abaf2019-09-14 07:34:30 -050012 * The only time @task is non-nil is when a user is blocked (or
13 * checking if it needs to) on a condition, and reset as soon as we
14 * know that the condition has succeeded and are awoken.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080015 */
16struct rcuwait {
Joel Fernandes (Google)03f4b482019-03-20 20:34:25 -040017 struct task_struct __rcu *task;
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080018};
19
20#define __RCUWAIT_INITIALIZER(name) \
21 { .task = NULL, }
22
23static inline void rcuwait_init(struct rcuwait *w)
24{
25 w->task = NULL;
26}
27
Davidlohr Bueso191a43b2020-04-23 22:48:36 -070028/*
29 * Note: this provides no serialization and, just as with waitqueues,
30 * requires care to estimate as to whether or not the wait is active.
31 */
32static inline int rcuwait_active(struct rcuwait *w)
33{
Paolo Bonzinifebd6682020-05-18 06:30:09 -040034 return !!rcu_access_pointer(w->task);
Davidlohr Bueso191a43b2020-04-23 22:48:36 -070035}
36
Davidlohr Bueso9d9a6eb2020-04-23 22:48:34 -070037extern int rcuwait_wake_up(struct rcuwait *w);
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080038
39/*
40 * The caller is responsible for locking around rcuwait_wait_event(),
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070041 * and [prepare_to/finish]_rcuwait() such that writes to @task are
42 * properly serialized.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080043 */
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070044
45static inline void prepare_to_rcuwait(struct rcuwait *w)
46{
47 rcu_assign_pointer(w->task, current);
48}
49
50static inline void finish_rcuwait(struct rcuwait *w)
51{
52 rcu_assign_pointer(w->task, NULL);
53 __set_current_state(TASK_RUNNING);
54}
55
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010056#define rcuwait_wait_event(w, condition, state) \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080057({ \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010058 int __ret = 0; \
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070059 prepare_to_rcuwait(w); \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080060 for (;;) { \
61 /* \
62 * Implicit barrier (A) pairs with (B) in \
Davidlohr Bueso7e1f9462017-01-29 07:42:12 -080063 * rcuwait_wake_up(). \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080064 */ \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010065 set_current_state(state); \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080066 if (condition) \
67 break; \
68 \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010069 if (signal_pending_state(state, current)) { \
70 __ret = -EINTR; \
71 break; \
72 } \
73 \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080074 schedule(); \
75 } \
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070076 finish_rcuwait(w); \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010077 __ret; \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080078})
79
80#endif /* _LINUX_RCUWAIT_H_ */