blob: 35bdcfd84d42827dc95cdc57e75994ed1fe483d9 [file] [log] [blame]
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -08001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07008#ifdef CONFIG_TASKS_RCU_GENERIC
Paul E. McKenney5873b8a2020-03-03 11:49:21 -08009
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080016typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
Paul E. McKenney9796e1a2020-03-22 13:18:54 -070018typedef void (*postscan_func_t)(struct list_head *hop);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080019typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070020typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080021
Paul E. McKenney07e10512020-03-02 15:16:57 -080022/**
23 * Definition for a Tasks-RCU-like mechanism.
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
26 * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080029 * @gp_func: This flavor's grace-period-wait function.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070030 * @gp_state: Grace period's most recent state transition (debugging).
Paul E. McKenney4fe192d2020-09-09 22:05:41 -070031 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
Paul E. McKenney2393a612020-09-09 21:36:34 -070032 * @init_fract: Initial backoff sleep interval.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070033 * @gp_jiffies: Time of last @gp_state transition.
34 * @gp_start: Most recent grace-period start in jiffies.
Paul E. McKenney238dbce2020-03-18 10:54:05 -070035 * @n_gps: Number of grace periods completed since boot.
36 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
Paul E. McKenney7e0669c2020-03-25 14:36:05 -070037 * @n_ipis_fails: Number of IPI-send failures.
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080038 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
41 * @holdout_func: This flavor's holdout-list scan function (optional).
42 * @postgp_func: This flavor's post-grace-period function (optional).
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080043 * @call_func: This flavor's call_rcu()-equivalent function.
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080044 * @name: This flavor's textual name.
45 * @kname: This flavor's kthread name.
Paul E. McKenney07e10512020-03-02 15:16:57 -080046 */
47struct rcu_tasks {
48 struct rcu_head *cbs_head;
49 struct rcu_head **cbs_tail;
50 struct wait_queue_head cbs_wq;
51 raw_spinlock_t cbs_lock;
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070052 int gp_state;
Paul E. McKenney4fe192d2020-09-09 22:05:41 -070053 int gp_sleep;
Paul E. McKenney2393a612020-09-09 21:36:34 -070054 int init_fract;
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070055 unsigned long gp_jiffies;
Paul E. McKenney88092d02020-03-17 08:57:02 -070056 unsigned long gp_start;
Paul E. McKenney238dbce2020-03-18 10:54:05 -070057 unsigned long n_gps;
58 unsigned long n_ipis;
Paul E. McKenney7e0669c2020-03-25 14:36:05 -070059 unsigned long n_ipis_fails;
Paul E. McKenney07e10512020-03-02 15:16:57 -080060 struct task_struct *kthread_ptr;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080061 rcu_tasks_gp_func_t gp_func;
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080062 pregp_func_t pregp_func;
63 pertask_func_t pertask_func;
64 postscan_func_t postscan_func;
65 holdouts_func_t holdouts_func;
66 postgp_func_t postgp_func;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080067 call_rcu_func_t call_func;
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080068 char *name;
69 char *kname;
Paul E. McKenney07e10512020-03-02 15:16:57 -080070};
71
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080072#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
73static struct rcu_tasks rt_name = \
Paul E. McKenney07e10512020-03-02 15:16:57 -080074{ \
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080075 .cbs_tail = &rt_name.cbs_head, \
76 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
77 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080078 .gp_func = gp, \
79 .call_func = call, \
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080080 .name = n, \
81 .kname = #rt_name, \
Paul E. McKenney07e10512020-03-02 15:16:57 -080082}
83
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080084/* Track exiting tasks in order to allow them to be waited for. */
85DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -070087/* Avoid IPIing CPUs early in the grace period. */
Paul E. McKenney574de872020-09-09 21:51:09 -070088#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -070089static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90module_param(rcu_task_ipi_delay, int, 0644);
91
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080092/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
93#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95module_param(rcu_task_stall_timeout, int, 0644);
96
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070097/* RCU tasks grace-period state for debugging. */
98#define RTGS_INIT 0
99#define RTGS_WAIT_WAIT_CBS 1
100#define RTGS_WAIT_GP 2
101#define RTGS_PRE_WAIT_GP 3
102#define RTGS_SCAN_TASKLIST 4
103#define RTGS_POST_SCAN_TASKLIST 5
104#define RTGS_WAIT_SCAN_HOLDOUTS 6
105#define RTGS_SCAN_HOLDOUTS 7
106#define RTGS_POST_GP 8
107#define RTGS_WAIT_READERS 9
108#define RTGS_INVOKE_CBS 10
109#define RTGS_WAIT_CBS 11
Paul E. McKenney83444962020-05-28 20:03:48 -0700110#ifndef CONFIG_TINY_RCU
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700111static const char * const rcu_tasks_gp_state_names[] = {
112 "RTGS_INIT",
113 "RTGS_WAIT_WAIT_CBS",
114 "RTGS_WAIT_GP",
115 "RTGS_PRE_WAIT_GP",
116 "RTGS_SCAN_TASKLIST",
117 "RTGS_POST_SCAN_TASKLIST",
118 "RTGS_WAIT_SCAN_HOLDOUTS",
119 "RTGS_SCAN_HOLDOUTS",
120 "RTGS_POST_GP",
121 "RTGS_WAIT_READERS",
122 "RTGS_INVOKE_CBS",
123 "RTGS_WAIT_CBS",
124};
Paul E. McKenney83444962020-05-28 20:03:48 -0700125#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700126
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800127////////////////////////////////////////////////////////////////////////
128//
129// Generic code.
130
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700131/* Record grace-period phase and time. */
132static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133{
134 rtp->gp_state = newstate;
135 rtp->gp_jiffies = jiffies;
136}
137
Paul E. McKenney83444962020-05-28 20:03:48 -0700138#ifndef CONFIG_TINY_RCU
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700139/* Return state name. */
140static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141{
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 int j = READ_ONCE(i); // Prevent the compiler from reading twice
144
145 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 return "???";
147 return rcu_tasks_gp_state_names[j];
148}
Paul E. McKenney83444962020-05-28 20:03:48 -0700149#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700150
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800151// Enqueue a callback for the specified flavor of Tasks RCU.
152static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800154{
155 unsigned long flags;
156 bool needwake;
157
158 rhp->next = NULL;
159 rhp->func = func;
Paul E. McKenney07e10512020-03-02 15:16:57 -0800160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 needwake = !rtp->cbs_head;
162 WRITE_ONCE(*rtp->cbs_tail, rhp);
163 rtp->cbs_tail = &rhp->next;
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800165 /* We can't create the thread unless interrupts are enabled. */
Paul E. McKenney07e10512020-03-02 15:16:57 -0800166 if (needwake && READ_ONCE(rtp->kthread_ptr))
167 wake_up(&rtp->cbs_wq);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800168}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800169
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800170// Wait for a grace period for the specified flavor of Tasks RCU.
171static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800172{
173 /* Complain if the scheduler has not started. */
174 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 "synchronize_rcu_tasks called too soon");
176
177 /* Wait for the grace period. */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800178 wait_rcu_gp(rtp->call_func);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800179}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800180
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800181/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182static int __noreturn rcu_tasks_kthread(void *arg)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800183{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800184 unsigned long flags;
185 struct rcu_head *list;
186 struct rcu_head *next;
187 struct rcu_tasks *rtp = arg;
188
189 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
190 housekeeping_affine(current, HK_FLAG_RCU);
191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
192
193 /*
194 * Each pass through the following loop makes one check for
195 * newly arrived callbacks, and, if there are some, waits for
196 * one RCU-tasks grace period and then invokes the callbacks.
197 * This loop is terminated by the system going down. ;-)
198 */
199 for (;;) {
200
201 /* Pick up any new callbacks. */
202 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
Paul E. McKenney43766c32020-03-16 20:38:29 -0700203 smp_mb__after_spinlock(); // Order updates vs. GP.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800204 list = rtp->cbs_head;
205 rtp->cbs_head = NULL;
206 rtp->cbs_tail = &rtp->cbs_head;
207 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
208
209 /* If there were none, wait a bit and start over. */
210 if (!list) {
211 wait_event_interruptible(rtp->cbs_wq,
212 READ_ONCE(rtp->cbs_head));
213 if (!rtp->cbs_head) {
214 WARN_ON(signal_pending(current));
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700215 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
Paul E. McKenneyea6eed92020-05-07 16:47:13 -0700216 schedule_timeout_idle(HZ/10);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800217 }
218 continue;
219 }
220
221 // Wait for one grace period.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700222 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
Paul E. McKenney88092d02020-03-17 08:57:02 -0700223 rtp->gp_start = jiffies;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800224 rtp->gp_func(rtp);
Paul E. McKenney238dbce2020-03-18 10:54:05 -0700225 rtp->n_gps++;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800226
227 /* Invoke the callbacks. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700228 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800229 while (list) {
230 next = list->next;
231 local_bh_disable();
232 list->func(list);
233 local_bh_enable();
234 list = next;
235 cond_resched();
236 }
237 /* Paranoid sleep to keep this from entering a tight loop */
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700238 schedule_timeout_idle(rtp->gp_sleep);
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700239
240 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800241 }
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800242}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800243
244/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
245static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246{
247 struct task_struct *t;
248
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800249 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
250 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800251 return;
252 smp_mb(); /* Ensure others see full kthread. */
253}
254
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800255#ifndef CONFIG_TINY_RCU
256
257/*
258 * Print any non-default Tasks RCU settings.
259 */
260static void __init rcu_tasks_bootup_oddness(void)
261{
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700262#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800263 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
264 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700265#endif /* #ifdef CONFIG_TASKS_RCU */
266#ifdef CONFIG_TASKS_RCU
267 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800268#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800269#ifdef CONFIG_TASKS_RUDE_RCU
270 pr_info("\tRude variant of Tasks RCU enabled.\n");
271#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700272#ifdef CONFIG_TASKS_TRACE_RCU
273 pr_info("\tTracing variant of Tasks RCU enabled.\n");
274#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800275}
276
277#endif /* #ifndef CONFIG_TINY_RCU */
278
Paul E. McKenney83444962020-05-28 20:03:48 -0700279#ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700280/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
281static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282{
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700283 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700284 rtp->kname,
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700285 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700286 jiffies - data_race(rtp->gp_jiffies),
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700287 data_race(rtp->n_gps),
288 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700289 ".k"[!!data_race(rtp->kthread_ptr)],
290 ".C"[!!data_race(rtp->cbs_head)],
291 s);
292}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700293#endif // #ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700294
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700295static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296
297#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800298
299////////////////////////////////////////////////////////////////////////
300//
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800301// Shared code between task-list-scanning variants of Tasks RCU.
302
303/* Wait for one RCU-tasks grace period. */
304static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305{
306 struct task_struct *g, *t;
307 unsigned long lastreport;
308 LIST_HEAD(holdouts);
309 int fract;
310
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700311 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800312 rtp->pregp_func();
313
314 /*
315 * There were callbacks, so we need to wait for an RCU-tasks
316 * grace period. Start off by scanning the task list for tasks
317 * that are not already voluntarily blocked. Mark these tasks
318 * and make a list of them in holdouts.
319 */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700320 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800321 rcu_read_lock();
322 for_each_process_thread(g, t)
323 rtp->pertask_func(t, &holdouts);
324 rcu_read_unlock();
325
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700326 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
Paul E. McKenney9796e1a2020-03-22 13:18:54 -0700327 rtp->postscan_func(&holdouts);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800328
329 /*
330 * Each pass through the following loop scans the list of holdout
331 * tasks, removing any that are no longer holdouts. When the list
332 * is empty, we are done.
333 */
334 lastreport = jiffies;
335
Paul E. McKenney2393a612020-09-09 21:36:34 -0700336 // Start off with initial wait and slowly back off to 1 HZ wait.
337 fract = rtp->init_fract;
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800338
Paul E. McKenney77dc1742020-09-15 15:41:50 -0700339 while (!list_empty(&holdouts)) {
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800340 bool firstreport;
341 bool needreport;
342 int rtst;
343
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800344 /* Slowly back off waiting for holdouts */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700345 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700346 schedule_timeout_idle(fract);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800347
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700348 if (fract < HZ)
349 fract++;
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800350
351 rtst = READ_ONCE(rcu_task_stall_timeout);
352 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
353 if (needreport)
354 lastreport = jiffies;
355 firstreport = true;
356 WARN_ON(signal_pending(current));
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700357 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800358 rtp->holdouts_func(&holdouts, needreport, &firstreport);
359 }
360
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700361 set_tasks_gp_state(rtp, RTGS_POST_GP);
362 rtp->postgp_func(rtp);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800363}
364
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700365#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
366
367#ifdef CONFIG_TASKS_RCU
368
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800369////////////////////////////////////////////////////////////////////////
370//
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800371// Simple variant of RCU whose quiescent states are voluntary context
372// switch, cond_resched_rcu_qs(), user-space execution, and idle.
373// As such, grace periods can take one good long time. There are no
374// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
375// because this implementation is intended to get the system into a safe
376// state for some of the manipulations involved in tracing and the like.
377// Finally, this implementation does not support high call_rcu_tasks()
378// rates from multiple CPUs. If this is required, per-CPU callback lists
379// will be needed.
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800380
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800381/* Pre-grace-period preparation. */
382static void rcu_tasks_pregp_step(void)
383{
384 /*
385 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
386 * to complete. Invoking synchronize_rcu() suffices because all
387 * these transitions occur with interrupts disabled. Without this
388 * synchronize_rcu(), a read-side critical section that started
389 * before the grace period might be incorrectly seen as having
390 * started after the grace period.
391 *
392 * This synchronize_rcu() also dispenses with the need for a
393 * memory barrier on the first store to t->rcu_tasks_holdout,
394 * as it forces the store to happen after the beginning of the
395 * grace period.
396 */
397 synchronize_rcu();
398}
399
400/* Per-task initial processing. */
401static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
402{
403 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
404 get_task_struct(t);
405 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
406 WRITE_ONCE(t->rcu_tasks_holdout, true);
407 list_add(&t->rcu_tasks_holdout_list, hop);
408 }
409}
410
411/* Processing between scanning taskslist and draining the holdout list. */
Paul E. McKenney04a3c5a2020-05-28 19:27:06 -0700412static void rcu_tasks_postscan(struct list_head *hop)
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800413{
414 /*
415 * Wait for tasks that are in the process of exiting. This
416 * does only part of the job, ensuring that all tasks that were
417 * previously exiting reach the point where they have disabled
418 * preemption, allowing the later synchronize_rcu() to finish
419 * the job.
420 */
421 synchronize_srcu(&tasks_rcu_exit_srcu);
422}
423
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800424/* See if tasks are still holding out, complain if so. */
425static void check_holdout_task(struct task_struct *t,
426 bool needreport, bool *firstreport)
427{
428 int cpu;
429
430 if (!READ_ONCE(t->rcu_tasks_holdout) ||
431 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
432 !READ_ONCE(t->on_rq) ||
433 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
434 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
435 WRITE_ONCE(t->rcu_tasks_holdout, false);
436 list_del_init(&t->rcu_tasks_holdout_list);
437 put_task_struct(t);
438 return;
439 }
440 rcu_request_urgent_qs_task(t);
441 if (!needreport)
442 return;
443 if (*firstreport) {
444 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
445 *firstreport = false;
446 }
447 cpu = task_cpu(t);
448 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
449 t, ".I"[is_idle_task(t)],
450 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
451 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
452 t->rcu_tasks_idle_cpu, cpu);
453 sched_show_task(t);
454}
455
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800456/* Scan the holdout lists for tasks no longer holding out. */
457static void check_all_holdout_tasks(struct list_head *hop,
458 bool needreport, bool *firstreport)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800459{
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800460 struct task_struct *t, *t1;
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800461
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800462 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
463 check_holdout_task(t, needreport, firstreport);
464 cond_resched();
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800465 }
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800466}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800467
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800468/* Finish off the Tasks-RCU grace period. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700469static void rcu_tasks_postgp(struct rcu_tasks *rtp)
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800470{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800471 /*
472 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
473 * memory barriers prior to them in the schedule() path, memory
474 * reordering on other CPUs could cause their RCU-tasks read-side
475 * critical sections to extend past the end of the grace period.
476 * However, because these ->nvcsw updates are carried out with
477 * interrupts disabled, we can use synchronize_rcu() to force the
478 * needed ordering on all such CPUs.
479 *
480 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
481 * accesses to be within the grace period, avoiding the need for
482 * memory barriers for ->rcu_tasks_holdout accesses.
483 *
484 * In addition, this synchronize_rcu() waits for exiting tasks
485 * to complete their final preempt_disable() region of execution,
486 * cleaning up after the synchronize_srcu() above.
487 */
488 synchronize_rcu();
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800489}
490
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800491void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800492DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800493
494/**
495 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
496 * @rhp: structure to be used for queueing the RCU updates.
497 * @func: actual callback function to be invoked after the grace period
498 *
499 * The callback function will be invoked some time after a full grace
500 * period elapses, in other words after all currently executing RCU
501 * read-side critical sections have completed. call_rcu_tasks() assumes
502 * that the read-side critical sections end at a voluntary context
503 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
504 * or transition to usermode execution. As such, there are no read-side
505 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
506 * this primitive is intended to determine that all tasks have passed
507 * through a safe state, not so much for data-strcuture synchronization.
508 *
509 * See the description of call_rcu() for more detailed information on
510 * memory ordering guarantees.
511 */
512void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
513{
514 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
515}
516EXPORT_SYMBOL_GPL(call_rcu_tasks);
517
518/**
519 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
520 *
521 * Control will return to the caller some time after a full rcu-tasks
522 * grace period has elapsed, in other words after all currently
523 * executing rcu-tasks read-side critical sections have elapsed. These
524 * read-side critical sections are delimited by calls to schedule(),
525 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
526 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
527 *
528 * This is a very specialized primitive, intended only for a few uses in
529 * tracing and other situations requiring manipulation of function
530 * preambles and profiling hooks. The synchronize_rcu_tasks() function
531 * is not (yet) intended for heavy use from multiple CPUs.
532 *
533 * See the description of synchronize_rcu() for more detailed information
534 * on memory ordering guarantees.
535 */
536void synchronize_rcu_tasks(void)
537{
538 synchronize_rcu_tasks_generic(&rcu_tasks);
539}
540EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
541
542/**
543 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
544 *
545 * Although the current implementation is guaranteed to wait, it is not
546 * obligated to, for example, if there are no pending callbacks.
547 */
548void rcu_barrier_tasks(void)
549{
550 /* There is only one callback queue, so this is easy. ;-) */
551 synchronize_rcu_tasks();
552}
553EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
554
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800555static int __init rcu_spawn_tasks_kthread(void)
556{
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700557 rcu_tasks.gp_sleep = HZ / 10;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700558 rcu_tasks.init_fract = HZ / 10;
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800559 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
560 rcu_tasks.pertask_func = rcu_tasks_pertask;
561 rcu_tasks.postscan_func = rcu_tasks_postscan;
562 rcu_tasks.holdouts_func = check_all_holdout_tasks;
563 rcu_tasks.postgp_func = rcu_tasks_postgp;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800564 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800565 return 0;
566}
567core_initcall(rcu_spawn_tasks_kthread);
568
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700569#if !defined(CONFIG_TINY_RCU)
570void show_rcu_tasks_classic_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700571{
572 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
573}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700574EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
575#endif // !defined(CONFIG_TINY_RCU)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700576
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700577/* Do the srcu_read_lock() for the above synchronize_srcu(). */
578void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
579{
580 preempt_disable();
581 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
582 preempt_enable();
583}
584
585/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
586void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
587{
588 struct task_struct *t = current;
589
590 preempt_disable();
591 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
592 preempt_enable();
593 exit_tasks_rcu_finish_trace(t);
594}
595
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700596#else /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700597void exit_tasks_rcu_start(void) { }
598void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700599#endif /* #else #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800600
601#ifdef CONFIG_TASKS_RUDE_RCU
602
603////////////////////////////////////////////////////////////////////////
604//
605// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
606// passing an empty function to schedule_on_each_cpu(). This approach
607// provides an asynchronous call_rcu_tasks_rude() API and batching
608// of concurrent calls to the synchronous synchronize_rcu_rude() API.
609// This sends IPIs far and wide and induces otherwise unnecessary context
610// switches on all online CPUs, whether idle or not.
611
612// Empty function to allow workqueues to force a context switch.
613static void rcu_tasks_be_rude(struct work_struct *work)
614{
615}
616
617// Wait for one rude RCU-tasks grace period.
618static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
619{
Paul E. McKenney238dbce2020-03-18 10:54:05 -0700620 rtp->n_ipis += cpumask_weight(cpu_online_mask);
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800621 schedule_on_each_cpu(rcu_tasks_be_rude);
622}
623
624void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800625DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
626 "RCU Tasks Rude");
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800627
628/**
629 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
630 * @rhp: structure to be used for queueing the RCU updates.
631 * @func: actual callback function to be invoked after the grace period
632 *
633 * The callback function will be invoked some time after a full grace
634 * period elapses, in other words after all currently executing RCU
635 * read-side critical sections have completed. call_rcu_tasks_rude()
636 * assumes that the read-side critical sections end at context switch,
637 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
638 * there are no read-side primitives analogous to rcu_read_lock() and
639 * rcu_read_unlock() because this primitive is intended to determine
640 * that all tasks have passed through a safe state, not so much for
641 * data-strcuture synchronization.
642 *
643 * See the description of call_rcu() for more detailed information on
644 * memory ordering guarantees.
645 */
646void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
647{
648 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
649}
650EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
651
652/**
653 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
654 *
655 * Control will return to the caller some time after a rude rcu-tasks
656 * grace period has elapsed, in other words after all currently
657 * executing rcu-tasks read-side critical sections have elapsed. These
658 * read-side critical sections are delimited by calls to schedule(),
659 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
660 * anyway) cond_resched().
661 *
662 * This is a very specialized primitive, intended only for a few uses in
663 * tracing and other situations requiring manipulation of function preambles
664 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
665 * (yet) intended for heavy use from multiple CPUs.
666 *
667 * See the description of synchronize_rcu() for more detailed information
668 * on memory ordering guarantees.
669 */
670void synchronize_rcu_tasks_rude(void)
671{
672 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
673}
674EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
675
676/**
677 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
678 *
679 * Although the current implementation is guaranteed to wait, it is not
680 * obligated to, for example, if there are no pending callbacks.
681 */
682void rcu_barrier_tasks_rude(void)
683{
684 /* There is only one callback queue, so this is easy. ;-) */
685 synchronize_rcu_tasks_rude();
686}
687EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
688
689static int __init rcu_spawn_tasks_rude_kthread(void)
690{
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700691 rcu_tasks_rude.gp_sleep = HZ / 10;
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800692 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
693 return 0;
694}
695core_initcall(rcu_spawn_tasks_rude_kthread);
696
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700697#if !defined(CONFIG_TINY_RCU)
698void show_rcu_tasks_rude_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700699{
700 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
701}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700702EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
703#endif // !defined(CONFIG_TINY_RCU)
704#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700705
706////////////////////////////////////////////////////////////////////////
707//
708// Tracing variant of Tasks RCU. This variant is designed to be used
709// to protect tracing hooks, including those of BPF. This variant
710// therefore:
711//
712// 1. Has explicit read-side markers to allow finite grace periods
713// in the face of in-kernel loops for PREEMPT=n builds.
714//
715// 2. Protects code in the idle loop, exception entry/exit, and
716// CPU-hotplug code paths, similar to the capabilities of SRCU.
717//
718// 3. Avoids expensive read-side instruction, having overhead similar
719// to that of Preemptible RCU.
720//
721// There are of course downsides. The grace-period code can send IPIs to
722// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
723// It is necessary to scan the full tasklist, much as for Tasks RCU. There
724// is a single callback queue guarded by a single lock, again, much as for
725// Tasks RCU. If needed, these downsides can be at least partially remedied.
726//
727// Perhaps most important, this variant of RCU does not affect the vanilla
728// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
729// readers can operate from idle, offline, and exception entry/exit in no
730// way allows rcu_preempt and rcu_sched readers to also do so.
731
732// The lockdep state must be outside of #ifdef to be useful.
733#ifdef CONFIG_DEBUG_LOCK_ALLOC
734static struct lock_class_key rcu_lock_trace_key;
735struct lockdep_map rcu_trace_lock_map =
736 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
737EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
738#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
739
740#ifdef CONFIG_TASKS_TRACE_RCU
741
Paul E. McKenney30d8aa52020-06-09 09:24:51 -0700742static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
743static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700744
745// Record outstanding IPIs to each CPU. No point in sending two...
746static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
747
Paul E. McKenney40471502020-03-22 13:34:34 -0700748// The number of detections of task quiescent state relying on
749// heavyweight readers executing explicit memory barriers.
Paul E. McKenney6731da92020-09-09 14:14:34 -0700750static unsigned long n_heavy_reader_attempts;
751static unsigned long n_heavy_reader_updates;
752static unsigned long n_heavy_reader_ofl_updates;
Paul E. McKenney40471502020-03-22 13:34:34 -0700753
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -0700754void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
755DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
756 "RCU Tasks Trace");
757
Paul E. McKenneyb38f57c2020-03-20 14:29:08 -0700758/*
759 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
760 * while the scheduler locks are held.
761 */
762static void rcu_read_unlock_iw(struct irq_work *iwp)
763{
764 wake_up(&trc_wait);
765}
766static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
767
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700768/* If we are the last reader, wake up the grace-period kthread. */
Paul E. McKenney276c4102020-03-17 16:02:06 -0700769void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700770{
Paul E. McKenney276c4102020-03-17 16:02:06 -0700771 int nq = t->trc_reader_special.b.need_qs;
772
Paul E. McKenney9ae58d72020-03-18 17:16:37 -0700773 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
774 t->trc_reader_special.b.need_mb)
Paul E. McKenney276c4102020-03-17 16:02:06 -0700775 smp_mb(); // Pairs with update-side barriers.
776 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
777 if (nq)
778 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
779 WRITE_ONCE(t->trc_reader_nesting, nesting);
780 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
Paul E. McKenneyb38f57c2020-03-20 14:29:08 -0700781 irq_work_queue(&rcu_tasks_trace_iw);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700782}
783EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
784
785/* Add a task to the holdout list, if it is not already on the list. */
786static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
787{
788 if (list_empty(&t->trc_holdout_list)) {
789 get_task_struct(t);
790 list_add(&t->trc_holdout_list, bhp);
791 }
792}
793
794/* Remove a task from the holdout list, if it is in fact present. */
795static void trc_del_holdout(struct task_struct *t)
796{
797 if (!list_empty(&t->trc_holdout_list)) {
798 list_del_init(&t->trc_holdout_list);
799 put_task_struct(t);
800 }
801}
802
803/* IPI handler to check task state. */
804static void trc_read_check_handler(void *t_in)
805{
806 struct task_struct *t = current;
807 struct task_struct *texp = t_in;
808
809 // If the task is no longer running on this CPU, leave.
810 if (unlikely(texp != t)) {
811 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
812 wake_up(&trc_wait);
813 goto reset_ipi; // Already on holdout list, so will check later.
814 }
815
816 // If the task is not in a read-side critical section, and
817 // if this is the last reader, awaken the grace-period kthread.
818 if (likely(!t->trc_reader_nesting)) {
819 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
820 wake_up(&trc_wait);
821 // Mark as checked after decrement to avoid false
822 // positives on the above WARN_ON_ONCE().
823 WRITE_ONCE(t->trc_reader_checked, true);
824 goto reset_ipi;
825 }
Paul E. McKenneyba3a86e2020-09-14 15:44:37 -0700826 // If we are racing with an rcu_read_unlock_trace(), try again later.
827 if (unlikely(t->trc_reader_nesting < 0)) {
828 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
829 wake_up(&trc_wait);
830 goto reset_ipi;
831 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700832 WRITE_ONCE(t->trc_reader_checked, true);
833
834 // Get here if the task is in a read-side critical section. Set
835 // its state so that it will awaken the grace-period kthread upon
836 // exit from that critical section.
Paul E. McKenney276c4102020-03-17 16:02:06 -0700837 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
838 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700839
840reset_ipi:
841 // Allow future IPIs to be sent on CPU and for task.
842 // Also order this IPI handler against any later manipulations of
843 // the intended task.
844 smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
845 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
846}
847
848/* Callback function for scheduler to check locked-down task. */
849static bool trc_inspect_reader(struct task_struct *t, void *arg)
850{
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700851 int cpu = task_cpu(t);
852 bool in_qs = false;
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700853 bool ofl = cpu_is_offline(cpu);
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700854
855 if (task_curr(t)) {
Paul E. McKenney30d8aa52020-06-09 09:24:51 -0700856 WARN_ON_ONCE(ofl && !is_idle_task(t));
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700857
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700858 // If no chance of heavyweight readers, do it the hard way.
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700859 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700860 return false;
861
862 // If heavyweight readers are enabled on the remote task,
863 // we can inspect its state despite its currently running.
864 // However, we cannot safely change its state.
Paul E. McKenney40471502020-03-22 13:34:34 -0700865 n_heavy_reader_attempts++;
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700866 if (!ofl && // Check for "running" idle tasks on offline CPUs.
867 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700868 return false; // No quiescent state, do it the hard way.
Paul E. McKenney40471502020-03-22 13:34:34 -0700869 n_heavy_reader_updates++;
Paul E. McKenneyedf37752020-03-22 14:09:45 -0700870 if (ofl)
871 n_heavy_reader_ofl_updates++;
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700872 in_qs = true;
873 } else {
874 in_qs = likely(!t->trc_reader_nesting);
875 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700876
877 // Mark as checked. Because this is called from the grace-period
878 // kthread, also remove the task from the holdout list.
879 t->trc_reader_checked = true;
880 trc_del_holdout(t);
881
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700882 if (in_qs)
883 return true; // Already in quiescent state, done!!!
884
885 // The task is in a read-side critical section, so set up its
886 // state so that it will awaken the grace-period kthread upon exit
887 // from that critical section.
888 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
889 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
890 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700891 return true;
892}
893
894/* Attempt to extract the state for the specified task. */
895static void trc_wait_for_one_reader(struct task_struct *t,
896 struct list_head *bhp)
897{
898 int cpu;
899
900 // If a previous IPI is still in flight, let it complete.
901 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
902 return;
903
904 // The current task had better be in a quiescent state.
905 if (t == current) {
906 t->trc_reader_checked = true;
907 trc_del_holdout(t);
908 WARN_ON_ONCE(t->trc_reader_nesting);
909 return;
910 }
911
912 // Attempt to nail down the task for inspection.
913 get_task_struct(t);
914 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
915 put_task_struct(t);
916 return;
917 }
918 put_task_struct(t);
919
920 // If currently running, send an IPI, either way, add to list.
921 trc_add_holdout(t, bhp);
Paul E. McKenney574de872020-09-09 21:51:09 -0700922 if (task_curr(t) &&
923 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700924 // The task is currently running, so try IPIing it.
925 cpu = task_cpu(t);
926
927 // If there is already an IPI outstanding, let it happen.
928 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
929 return;
930
931 atomic_inc(&trc_n_readers_need_end);
932 per_cpu(trc_ipi_to_cpu, cpu) = true;
933 t->trc_ipi_to_cpu = cpu;
Paul E. McKenney238dbce2020-03-18 10:54:05 -0700934 rcu_tasks_trace.n_ipis++;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700935 if (smp_call_function_single(cpu,
936 trc_read_check_handler, t, 0)) {
937 // Just in case there is some other reason for
938 // failure than the target CPU being offline.
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700939 rcu_tasks_trace.n_ipis_fails++;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700940 per_cpu(trc_ipi_to_cpu, cpu) = false;
941 t->trc_ipi_to_cpu = cpu;
942 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
943 WARN_ON_ONCE(1);
944 wake_up(&trc_wait);
945 }
946 }
947 }
948}
949
950/* Initialize for a new RCU-tasks-trace grace period. */
951static void rcu_tasks_trace_pregp_step(void)
952{
953 int cpu;
954
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700955 // Allow for fast-acting IPIs.
956 atomic_set(&trc_n_readers_need_end, 1);
957
958 // There shouldn't be any old IPIs, but...
959 for_each_possible_cpu(cpu)
960 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -0700961
962 // Disable CPU hotplug across the tasklist scan.
963 // This also waits for all readers in CPU-hotplug code paths.
964 cpus_read_lock();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700965}
966
967/* Do first-round processing for the specified task. */
968static void rcu_tasks_trace_pertask(struct task_struct *t,
969 struct list_head *hop)
970{
Paul E. McKenney276c4102020-03-17 16:02:06 -0700971 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
Paul E. McKenney43766c32020-03-16 20:38:29 -0700972 WRITE_ONCE(t->trc_reader_checked, false);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700973 t->trc_ipi_to_cpu = -1;
974 trc_wait_for_one_reader(t, hop);
975}
976
Paul E. McKenney9796e1a2020-03-22 13:18:54 -0700977/*
978 * Do intermediate processing between task and holdout scans and
979 * pick up the idle tasks.
980 */
981static void rcu_tasks_trace_postscan(struct list_head *hop)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700982{
Paul E. McKenney9796e1a2020-03-22 13:18:54 -0700983 int cpu;
984
985 for_each_possible_cpu(cpu)
986 rcu_tasks_trace_pertask(idle_task(cpu), hop);
987
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -0700988 // Re-enable CPU hotplug now that the tasklist scan has completed.
989 cpus_read_unlock();
990
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700991 // Wait for late-stage exiting tasks to finish exiting.
992 // These might have passed the call to exit_tasks_rcu_finish().
993 synchronize_rcu();
994 // Any tasks that exit after this point will set ->trc_reader_checked.
995}
996
Paul E. McKenney4593e772020-03-10 12:13:53 -0700997/* Show the state of a task stalling the current RCU tasks trace GP. */
998static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
999{
1000 int cpu;
1001
1002 if (*firstreport) {
1003 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1004 *firstreport = false;
1005 }
1006 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1007 cpu = task_cpu(t);
1008 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1009 t->pid,
1010 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1011 ".i"[is_idle_task(t)],
1012 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1013 t->trc_reader_nesting,
Paul E. McKenney276c4102020-03-17 16:02:06 -07001014 " N"[!!t->trc_reader_special.b.need_qs],
Paul E. McKenney4593e772020-03-10 12:13:53 -07001015 cpu);
1016 sched_show_task(t);
1017}
1018
1019/* List stalled IPIs for RCU tasks trace. */
1020static void show_stalled_ipi_trace(void)
1021{
1022 int cpu;
1023
1024 for_each_possible_cpu(cpu)
1025 if (per_cpu(trc_ipi_to_cpu, cpu))
1026 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1027}
1028
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001029/* Do one scan of the holdout list. */
1030static void check_all_holdout_tasks_trace(struct list_head *hop,
Paul E. McKenney4593e772020-03-10 12:13:53 -07001031 bool needreport, bool *firstreport)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001032{
1033 struct task_struct *g, *t;
1034
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001035 // Disable CPU hotplug across the holdout list scan.
1036 cpus_read_lock();
1037
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001038 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1039 // If safe and needed, try to check the current task.
1040 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1041 !READ_ONCE(t->trc_reader_checked))
1042 trc_wait_for_one_reader(t, hop);
1043
1044 // If check succeeded, remove this task from the list.
1045 if (READ_ONCE(t->trc_reader_checked))
1046 trc_del_holdout(t);
Paul E. McKenney4593e772020-03-10 12:13:53 -07001047 else if (needreport)
1048 show_stalled_task_trace(t, firstreport);
1049 }
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001050
1051 // Re-enable CPU hotplug now that the holdout list scan has completed.
1052 cpus_read_unlock();
1053
Paul E. McKenney4593e772020-03-10 12:13:53 -07001054 if (needreport) {
1055 if (firstreport)
1056 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1057 show_stalled_ipi_trace();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001058 }
1059}
1060
1061/* Wait for grace period to complete and provide ordering. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001062static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001063{
Paul E. McKenney4593e772020-03-10 12:13:53 -07001064 bool firstreport;
1065 struct task_struct *g, *t;
1066 LIST_HEAD(holdouts);
1067 long ret;
1068
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001069 // Remove the safety count.
1070 smp_mb__before_atomic(); // Order vs. earlier atomics
1071 atomic_dec(&trc_n_readers_need_end);
1072 smp_mb__after_atomic(); // Order vs. later atomics
1073
1074 // Wait for readers.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001075 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
Paul E. McKenney4593e772020-03-10 12:13:53 -07001076 for (;;) {
1077 ret = wait_event_idle_exclusive_timeout(
1078 trc_wait,
1079 atomic_read(&trc_n_readers_need_end) == 0,
1080 READ_ONCE(rcu_task_stall_timeout));
1081 if (ret)
1082 break; // Count reached zero.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001083 // Stall warning time, so make a list of the offenders.
Paul E. McKenneyf747c7e2020-09-15 14:27:38 -07001084 rcu_read_lock();
Paul E. McKenney4593e772020-03-10 12:13:53 -07001085 for_each_process_thread(g, t)
Paul E. McKenney276c4102020-03-17 16:02:06 -07001086 if (READ_ONCE(t->trc_reader_special.b.need_qs))
Paul E. McKenney4593e772020-03-10 12:13:53 -07001087 trc_add_holdout(t, &holdouts);
Paul E. McKenneyf747c7e2020-09-15 14:27:38 -07001088 rcu_read_unlock();
Paul E. McKenney4593e772020-03-10 12:13:53 -07001089 firstreport = true;
Paul E. McKenney592031c2020-09-15 14:03:34 -07001090 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1091 if (READ_ONCE(t->trc_reader_special.b.need_qs))
Paul E. McKenney4593e772020-03-10 12:13:53 -07001092 show_stalled_task_trace(t, &firstreport);
Paul E. McKenney592031c2020-09-15 14:03:34 -07001093 trc_del_holdout(t); // Release task_struct reference.
1094 }
Paul E. McKenney4593e772020-03-10 12:13:53 -07001095 if (firstreport)
1096 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1097 show_stalled_ipi_trace();
1098 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1099 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001100 smp_mb(); // Caller's code must be ordered after wakeup.
Paul E. McKenney43766c32020-03-16 20:38:29 -07001101 // Pairs with pretty much every ordering primitive.
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001102}
1103
1104/* Report any needed quiescent state for this exiting task. */
Paul E. McKenney25246fc2020-04-05 20:49:13 -07001105static void exit_tasks_rcu_finish_trace(struct task_struct *t)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001106{
1107 WRITE_ONCE(t->trc_reader_checked, true);
1108 WARN_ON_ONCE(t->trc_reader_nesting);
1109 WRITE_ONCE(t->trc_reader_nesting, 0);
Paul E. McKenney276c4102020-03-17 16:02:06 -07001110 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1111 rcu_read_unlock_trace_special(t, 0);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001112}
1113
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001114/**
1115 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1116 * @rhp: structure to be used for queueing the RCU updates.
1117 * @func: actual callback function to be invoked after the grace period
1118 *
1119 * The callback function will be invoked some time after a full grace
1120 * period elapses, in other words after all currently executing RCU
1121 * read-side critical sections have completed. call_rcu_tasks_trace()
1122 * assumes that the read-side critical sections end at context switch,
1123 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
1124 * there are no read-side primitives analogous to rcu_read_lock() and
1125 * rcu_read_unlock() because this primitive is intended to determine
1126 * that all tasks have passed through a safe state, not so much for
1127 * data-strcuture synchronization.
1128 *
1129 * See the description of call_rcu() for more detailed information on
1130 * memory ordering guarantees.
1131 */
1132void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1133{
1134 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1135}
1136EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1137
1138/**
1139 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1140 *
1141 * Control will return to the caller some time after a trace rcu-tasks
Paul E. McKenneyc7dcf812020-06-12 13:11:29 -07001142 * grace period has elapsed, in other words after all currently executing
1143 * rcu-tasks read-side critical sections have elapsed. These read-side
1144 * critical sections are delimited by calls to rcu_read_lock_trace()
1145 * and rcu_read_unlock_trace().
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001146 *
1147 * This is a very specialized primitive, intended only for a few uses in
1148 * tracing and other situations requiring manipulation of function preambles
1149 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1150 * (yet) intended for heavy use from multiple CPUs.
1151 *
1152 * See the description of synchronize_rcu() for more detailed information
1153 * on memory ordering guarantees.
1154 */
1155void synchronize_rcu_tasks_trace(void)
1156{
1157 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1158 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1159}
1160EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1161
1162/**
1163 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1164 *
1165 * Although the current implementation is guaranteed to wait, it is not
1166 * obligated to, for example, if there are no pending callbacks.
1167 */
1168void rcu_barrier_tasks_trace(void)
1169{
1170 /* There is only one callback queue, so this is easy. ;-) */
1171 synchronize_rcu_tasks_trace();
1172}
1173EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1174
1175static int __init rcu_spawn_tasks_trace_kthread(void)
1176{
Paul E. McKenney2393a612020-09-09 21:36:34 -07001177 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
Paul E. McKenney4fe192d2020-09-09 22:05:41 -07001178 rcu_tasks_trace.gp_sleep = HZ / 10;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -07001179 rcu_tasks_trace.init_fract = HZ / 10;
Paul E. McKenney2393a612020-09-09 21:36:34 -07001180 } else {
Paul E. McKenney4fe192d2020-09-09 22:05:41 -07001181 rcu_tasks_trace.gp_sleep = HZ / 200;
1182 if (rcu_tasks_trace.gp_sleep <= 0)
1183 rcu_tasks_trace.gp_sleep = 1;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -07001184 rcu_tasks_trace.init_fract = HZ / 200;
Paul E. McKenney2393a612020-09-09 21:36:34 -07001185 if (rcu_tasks_trace.init_fract <= 0)
1186 rcu_tasks_trace.init_fract = 1;
1187 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001188 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1189 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1190 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1191 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1192 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1193 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1194 return 0;
1195}
1196core_initcall(rcu_spawn_tasks_trace_kthread);
1197
Paul E. McKenney27c0f142020-09-15 17:08:03 -07001198#if !defined(CONFIG_TINY_RCU)
1199void show_rcu_tasks_trace_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001200{
Paul E. McKenney40471502020-03-22 13:34:34 -07001201 char buf[64];
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001202
Paul E. McKenneyedf37752020-03-22 14:09:45 -07001203 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1204 data_race(n_heavy_reader_ofl_updates),
Paul E. McKenney40471502020-03-22 13:34:34 -07001205 data_race(n_heavy_reader_updates),
1206 data_race(n_heavy_reader_attempts));
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001207 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1208}
Paul E. McKenney27c0f142020-09-15 17:08:03 -07001209EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1210#endif // !defined(CONFIG_TINY_RCU)
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001211
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001212#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney25246fc2020-04-05 20:49:13 -07001213static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001214#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07001215
Paul E. McKenney83444962020-05-28 20:03:48 -07001216#ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001217void show_rcu_tasks_gp_kthreads(void)
1218{
1219 show_rcu_tasks_classic_gp_kthread();
1220 show_rcu_tasks_rude_gp_kthread();
1221 show_rcu_tasks_trace_gp_kthread();
1222}
Paul E. McKenney83444962020-05-28 20:03:48 -07001223#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001224
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07001225#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1226static inline void rcu_tasks_bootup_oddness(void) {}
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001227void show_rcu_tasks_gp_kthreads(void) {}
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07001228#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */