blob: fd34fd673a8c5d28ef36d8d9ffaebc9102fa2b37 [file] [log] [blame]
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -08001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
Paul E. McKenney5873b8a2020-03-03 11:49:21 -08008
9////////////////////////////////////////////////////////////////////////
10//
11// Generic data structures.
12
13struct rcu_tasks;
14typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080015typedef void (*pregp_func_t)(void);
16typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
17typedef void (*postscan_func_t)(void);
18typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
19typedef void (*postgp_func_t)(void);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080020
Paul E. McKenney07e10512020-03-02 15:16:57 -080021/**
22 * Definition for a Tasks-RCU-like mechanism.
23 * @cbs_head: Head of callback list.
24 * @cbs_tail: Tail pointer for callback list.
25 * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
26 * @cbs_lock: Lock protecting callback list.
27 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080028 * @gp_func: This flavor's grace-period-wait function.
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080029 * @pregp_func: This flavor's pre-grace-period function (optional).
30 * @pertask_func: This flavor's per-task scan function (optional).
31 * @postscan_func: This flavor's post-task scan function (optional).
32 * @holdout_func: This flavor's holdout-list scan function (optional).
33 * @postgp_func: This flavor's post-grace-period function (optional).
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080034 * @call_func: This flavor's call_rcu()-equivalent function.
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080035 * @name: This flavor's textual name.
36 * @kname: This flavor's kthread name.
Paul E. McKenney07e10512020-03-02 15:16:57 -080037 */
38struct rcu_tasks {
39 struct rcu_head *cbs_head;
40 struct rcu_head **cbs_tail;
41 struct wait_queue_head cbs_wq;
42 raw_spinlock_t cbs_lock;
43 struct task_struct *kthread_ptr;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080044 rcu_tasks_gp_func_t gp_func;
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080045 pregp_func_t pregp_func;
46 pertask_func_t pertask_func;
47 postscan_func_t postscan_func;
48 holdouts_func_t holdouts_func;
49 postgp_func_t postgp_func;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080050 call_rcu_func_t call_func;
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080051 char *name;
52 char *kname;
Paul E. McKenney07e10512020-03-02 15:16:57 -080053};
54
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080055#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
56static struct rcu_tasks rt_name = \
Paul E. McKenney07e10512020-03-02 15:16:57 -080057{ \
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080058 .cbs_tail = &rt_name.cbs_head, \
59 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
60 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080061 .gp_func = gp, \
62 .call_func = call, \
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080063 .name = n, \
64 .kname = #rt_name, \
Paul E. McKenney07e10512020-03-02 15:16:57 -080065}
66
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080067/* Track exiting tasks in order to allow them to be waited for. */
68DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
69
70/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
71#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
72static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
73module_param(rcu_task_stall_timeout, int, 0644);
74
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080075////////////////////////////////////////////////////////////////////////
76//
77// Generic code.
78
79// Enqueue a callback for the specified flavor of Tasks RCU.
80static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
81 struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080082{
83 unsigned long flags;
84 bool needwake;
85
86 rhp->next = NULL;
87 rhp->func = func;
Paul E. McKenney07e10512020-03-02 15:16:57 -080088 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
89 needwake = !rtp->cbs_head;
90 WRITE_ONCE(*rtp->cbs_tail, rhp);
91 rtp->cbs_tail = &rhp->next;
92 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080093 /* We can't create the thread unless interrupts are enabled. */
Paul E. McKenney07e10512020-03-02 15:16:57 -080094 if (needwake && READ_ONCE(rtp->kthread_ptr))
95 wake_up(&rtp->cbs_wq);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080096}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080097
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080098// Wait for a grace period for the specified flavor of Tasks RCU.
99static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800100{
101 /* Complain if the scheduler has not started. */
102 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
103 "synchronize_rcu_tasks called too soon");
104
105 /* Wait for the grace period. */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800106 wait_rcu_gp(rtp->call_func);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800107}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800108
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800109/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
110static int __noreturn rcu_tasks_kthread(void *arg)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800111{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800112 unsigned long flags;
113 struct rcu_head *list;
114 struct rcu_head *next;
115 struct rcu_tasks *rtp = arg;
116
117 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
118 housekeeping_affine(current, HK_FLAG_RCU);
119 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
120
121 /*
122 * Each pass through the following loop makes one check for
123 * newly arrived callbacks, and, if there are some, waits for
124 * one RCU-tasks grace period and then invokes the callbacks.
125 * This loop is terminated by the system going down. ;-)
126 */
127 for (;;) {
128
129 /* Pick up any new callbacks. */
130 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800131 smp_mb__after_unlock_lock(); // Order updates vs. GP.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800132 list = rtp->cbs_head;
133 rtp->cbs_head = NULL;
134 rtp->cbs_tail = &rtp->cbs_head;
135 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
136
137 /* If there were none, wait a bit and start over. */
138 if (!list) {
139 wait_event_interruptible(rtp->cbs_wq,
140 READ_ONCE(rtp->cbs_head));
141 if (!rtp->cbs_head) {
142 WARN_ON(signal_pending(current));
143 schedule_timeout_interruptible(HZ/10);
144 }
145 continue;
146 }
147
148 // Wait for one grace period.
149 rtp->gp_func(rtp);
150
151 /* Invoke the callbacks. */
152 while (list) {
153 next = list->next;
154 local_bh_disable();
155 list->func(list);
156 local_bh_enable();
157 list = next;
158 cond_resched();
159 }
160 /* Paranoid sleep to keep this from entering a tight loop */
161 schedule_timeout_uninterruptible(HZ/10);
162 }
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800163}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800164
165/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
166static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
167{
168 struct task_struct *t;
169
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800170 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
171 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800172 return;
173 smp_mb(); /* Ensure others see full kthread. */
174}
175
176/* Do the srcu_read_lock() for the above synchronize_srcu(). */
177void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
178{
179 preempt_disable();
180 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
181 preempt_enable();
182}
183
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700184static void exit_tasks_rcu_finish_trace(struct task_struct *t);
185
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800186/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
187void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
188{
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700189 struct task_struct *t = current;
190
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800191 preempt_disable();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700192 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800193 preempt_enable();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700194 exit_tasks_rcu_finish_trace(t);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800195}
196
197#ifndef CONFIG_TINY_RCU
198
199/*
200 * Print any non-default Tasks RCU settings.
201 */
202static void __init rcu_tasks_bootup_oddness(void)
203{
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700204#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800205 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
206 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700207#endif /* #ifdef CONFIG_TASKS_RCU */
208#ifdef CONFIG_TASKS_RCU
209 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800210#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800211#ifdef CONFIG_TASKS_RUDE_RCU
212 pr_info("\tRude variant of Tasks RCU enabled.\n");
213#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700214#ifdef CONFIG_TASKS_TRACE_RCU
215 pr_info("\tTracing variant of Tasks RCU enabled.\n");
216#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800217}
218
219#endif /* #ifndef CONFIG_TINY_RCU */
220
221#ifdef CONFIG_TASKS_RCU
222
223////////////////////////////////////////////////////////////////////////
224//
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800225// Shared code between task-list-scanning variants of Tasks RCU.
226
227/* Wait for one RCU-tasks grace period. */
228static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
229{
230 struct task_struct *g, *t;
231 unsigned long lastreport;
232 LIST_HEAD(holdouts);
233 int fract;
234
235 rtp->pregp_func();
236
237 /*
238 * There were callbacks, so we need to wait for an RCU-tasks
239 * grace period. Start off by scanning the task list for tasks
240 * that are not already voluntarily blocked. Mark these tasks
241 * and make a list of them in holdouts.
242 */
243 rcu_read_lock();
244 for_each_process_thread(g, t)
245 rtp->pertask_func(t, &holdouts);
246 rcu_read_unlock();
247
248 rtp->postscan_func();
249
250 /*
251 * Each pass through the following loop scans the list of holdout
252 * tasks, removing any that are no longer holdouts. When the list
253 * is empty, we are done.
254 */
255 lastreport = jiffies;
256
257 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
258 fract = 10;
259
260 for (;;) {
261 bool firstreport;
262 bool needreport;
263 int rtst;
264
265 if (list_empty(&holdouts))
266 break;
267
268 /* Slowly back off waiting for holdouts */
269 schedule_timeout_interruptible(HZ/fract);
270
271 if (fract > 1)
272 fract--;
273
274 rtst = READ_ONCE(rcu_task_stall_timeout);
275 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
276 if (needreport)
277 lastreport = jiffies;
278 firstreport = true;
279 WARN_ON(signal_pending(current));
280 rtp->holdouts_func(&holdouts, needreport, &firstreport);
281 }
282
283 rtp->postgp_func();
284}
285
286////////////////////////////////////////////////////////////////////////
287//
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800288// Simple variant of RCU whose quiescent states are voluntary context
289// switch, cond_resched_rcu_qs(), user-space execution, and idle.
290// As such, grace periods can take one good long time. There are no
291// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
292// because this implementation is intended to get the system into a safe
293// state for some of the manipulations involved in tracing and the like.
294// Finally, this implementation does not support high call_rcu_tasks()
295// rates from multiple CPUs. If this is required, per-CPU callback lists
296// will be needed.
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800297
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800298/* Pre-grace-period preparation. */
299static void rcu_tasks_pregp_step(void)
300{
301 /*
302 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
303 * to complete. Invoking synchronize_rcu() suffices because all
304 * these transitions occur with interrupts disabled. Without this
305 * synchronize_rcu(), a read-side critical section that started
306 * before the grace period might be incorrectly seen as having
307 * started after the grace period.
308 *
309 * This synchronize_rcu() also dispenses with the need for a
310 * memory barrier on the first store to t->rcu_tasks_holdout,
311 * as it forces the store to happen after the beginning of the
312 * grace period.
313 */
314 synchronize_rcu();
315}
316
317/* Per-task initial processing. */
318static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
319{
320 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
321 get_task_struct(t);
322 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
323 WRITE_ONCE(t->rcu_tasks_holdout, true);
324 list_add(&t->rcu_tasks_holdout_list, hop);
325 }
326}
327
328/* Processing between scanning taskslist and draining the holdout list. */
329void rcu_tasks_postscan(void)
330{
331 /*
332 * Wait for tasks that are in the process of exiting. This
333 * does only part of the job, ensuring that all tasks that were
334 * previously exiting reach the point where they have disabled
335 * preemption, allowing the later synchronize_rcu() to finish
336 * the job.
337 */
338 synchronize_srcu(&tasks_rcu_exit_srcu);
339}
340
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800341/* See if tasks are still holding out, complain if so. */
342static void check_holdout_task(struct task_struct *t,
343 bool needreport, bool *firstreport)
344{
345 int cpu;
346
347 if (!READ_ONCE(t->rcu_tasks_holdout) ||
348 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
349 !READ_ONCE(t->on_rq) ||
350 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
351 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
352 WRITE_ONCE(t->rcu_tasks_holdout, false);
353 list_del_init(&t->rcu_tasks_holdout_list);
354 put_task_struct(t);
355 return;
356 }
357 rcu_request_urgent_qs_task(t);
358 if (!needreport)
359 return;
360 if (*firstreport) {
361 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
362 *firstreport = false;
363 }
364 cpu = task_cpu(t);
365 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
366 t, ".I"[is_idle_task(t)],
367 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
368 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
369 t->rcu_tasks_idle_cpu, cpu);
370 sched_show_task(t);
371}
372
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800373/* Scan the holdout lists for tasks no longer holding out. */
374static void check_all_holdout_tasks(struct list_head *hop,
375 bool needreport, bool *firstreport)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800376{
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800377 struct task_struct *t, *t1;
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800378
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800379 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
380 check_holdout_task(t, needreport, firstreport);
381 cond_resched();
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800382 }
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800383}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800384
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800385/* Finish off the Tasks-RCU grace period. */
386static void rcu_tasks_postgp(void)
387{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800388 /*
389 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
390 * memory barriers prior to them in the schedule() path, memory
391 * reordering on other CPUs could cause their RCU-tasks read-side
392 * critical sections to extend past the end of the grace period.
393 * However, because these ->nvcsw updates are carried out with
394 * interrupts disabled, we can use synchronize_rcu() to force the
395 * needed ordering on all such CPUs.
396 *
397 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
398 * accesses to be within the grace period, avoiding the need for
399 * memory barriers for ->rcu_tasks_holdout accesses.
400 *
401 * In addition, this synchronize_rcu() waits for exiting tasks
402 * to complete their final preempt_disable() region of execution,
403 * cleaning up after the synchronize_srcu() above.
404 */
405 synchronize_rcu();
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800406}
407
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800408void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800409DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800410
411/**
412 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
413 * @rhp: structure to be used for queueing the RCU updates.
414 * @func: actual callback function to be invoked after the grace period
415 *
416 * The callback function will be invoked some time after a full grace
417 * period elapses, in other words after all currently executing RCU
418 * read-side critical sections have completed. call_rcu_tasks() assumes
419 * that the read-side critical sections end at a voluntary context
420 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
421 * or transition to usermode execution. As such, there are no read-side
422 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
423 * this primitive is intended to determine that all tasks have passed
424 * through a safe state, not so much for data-strcuture synchronization.
425 *
426 * See the description of call_rcu() for more detailed information on
427 * memory ordering guarantees.
428 */
429void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
430{
431 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
432}
433EXPORT_SYMBOL_GPL(call_rcu_tasks);
434
435/**
436 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
437 *
438 * Control will return to the caller some time after a full rcu-tasks
439 * grace period has elapsed, in other words after all currently
440 * executing rcu-tasks read-side critical sections have elapsed. These
441 * read-side critical sections are delimited by calls to schedule(),
442 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
443 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
444 *
445 * This is a very specialized primitive, intended only for a few uses in
446 * tracing and other situations requiring manipulation of function
447 * preambles and profiling hooks. The synchronize_rcu_tasks() function
448 * is not (yet) intended for heavy use from multiple CPUs.
449 *
450 * See the description of synchronize_rcu() for more detailed information
451 * on memory ordering guarantees.
452 */
453void synchronize_rcu_tasks(void)
454{
455 synchronize_rcu_tasks_generic(&rcu_tasks);
456}
457EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
458
459/**
460 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
461 *
462 * Although the current implementation is guaranteed to wait, it is not
463 * obligated to, for example, if there are no pending callbacks.
464 */
465void rcu_barrier_tasks(void)
466{
467 /* There is only one callback queue, so this is easy. ;-) */
468 synchronize_rcu_tasks();
469}
470EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
471
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800472static int __init rcu_spawn_tasks_kthread(void)
473{
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800474 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
475 rcu_tasks.pertask_func = rcu_tasks_pertask;
476 rcu_tasks.postscan_func = rcu_tasks_postscan;
477 rcu_tasks.holdouts_func = check_all_holdout_tasks;
478 rcu_tasks.postgp_func = rcu_tasks_postgp;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800479 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800480 return 0;
481}
482core_initcall(rcu_spawn_tasks_kthread);
483
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800484#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800485
486#ifdef CONFIG_TASKS_RUDE_RCU
487
488////////////////////////////////////////////////////////////////////////
489//
490// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
491// passing an empty function to schedule_on_each_cpu(). This approach
492// provides an asynchronous call_rcu_tasks_rude() API and batching
493// of concurrent calls to the synchronous synchronize_rcu_rude() API.
494// This sends IPIs far and wide and induces otherwise unnecessary context
495// switches on all online CPUs, whether idle or not.
496
497// Empty function to allow workqueues to force a context switch.
498static void rcu_tasks_be_rude(struct work_struct *work)
499{
500}
501
502// Wait for one rude RCU-tasks grace period.
503static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
504{
505 schedule_on_each_cpu(rcu_tasks_be_rude);
506}
507
508void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800509DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
510 "RCU Tasks Rude");
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800511
512/**
513 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
514 * @rhp: structure to be used for queueing the RCU updates.
515 * @func: actual callback function to be invoked after the grace period
516 *
517 * The callback function will be invoked some time after a full grace
518 * period elapses, in other words after all currently executing RCU
519 * read-side critical sections have completed. call_rcu_tasks_rude()
520 * assumes that the read-side critical sections end at context switch,
521 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
522 * there are no read-side primitives analogous to rcu_read_lock() and
523 * rcu_read_unlock() because this primitive is intended to determine
524 * that all tasks have passed through a safe state, not so much for
525 * data-strcuture synchronization.
526 *
527 * See the description of call_rcu() for more detailed information on
528 * memory ordering guarantees.
529 */
530void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
531{
532 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
533}
534EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
535
536/**
537 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
538 *
539 * Control will return to the caller some time after a rude rcu-tasks
540 * grace period has elapsed, in other words after all currently
541 * executing rcu-tasks read-side critical sections have elapsed. These
542 * read-side critical sections are delimited by calls to schedule(),
543 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
544 * anyway) cond_resched().
545 *
546 * This is a very specialized primitive, intended only for a few uses in
547 * tracing and other situations requiring manipulation of function preambles
548 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
549 * (yet) intended for heavy use from multiple CPUs.
550 *
551 * See the description of synchronize_rcu() for more detailed information
552 * on memory ordering guarantees.
553 */
554void synchronize_rcu_tasks_rude(void)
555{
556 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
557}
558EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
559
560/**
561 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
562 *
563 * Although the current implementation is guaranteed to wait, it is not
564 * obligated to, for example, if there are no pending callbacks.
565 */
566void rcu_barrier_tasks_rude(void)
567{
568 /* There is only one callback queue, so this is easy. ;-) */
569 synchronize_rcu_tasks_rude();
570}
571EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
572
573static int __init rcu_spawn_tasks_rude_kthread(void)
574{
575 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
576 return 0;
577}
578core_initcall(rcu_spawn_tasks_rude_kthread);
579
580#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700581
582////////////////////////////////////////////////////////////////////////
583//
584// Tracing variant of Tasks RCU. This variant is designed to be used
585// to protect tracing hooks, including those of BPF. This variant
586// therefore:
587//
588// 1. Has explicit read-side markers to allow finite grace periods
589// in the face of in-kernel loops for PREEMPT=n builds.
590//
591// 2. Protects code in the idle loop, exception entry/exit, and
592// CPU-hotplug code paths, similar to the capabilities of SRCU.
593//
594// 3. Avoids expensive read-side instruction, having overhead similar
595// to that of Preemptible RCU.
596//
597// There are of course downsides. The grace-period code can send IPIs to
598// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
599// It is necessary to scan the full tasklist, much as for Tasks RCU. There
600// is a single callback queue guarded by a single lock, again, much as for
601// Tasks RCU. If needed, these downsides can be at least partially remedied.
602//
603// Perhaps most important, this variant of RCU does not affect the vanilla
604// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
605// readers can operate from idle, offline, and exception entry/exit in no
606// way allows rcu_preempt and rcu_sched readers to also do so.
607
608// The lockdep state must be outside of #ifdef to be useful.
609#ifdef CONFIG_DEBUG_LOCK_ALLOC
610static struct lock_class_key rcu_lock_trace_key;
611struct lockdep_map rcu_trace_lock_map =
612 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
613EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
614#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
615
616#ifdef CONFIG_TASKS_TRACE_RCU
617
618atomic_t trc_n_readers_need_end; // Number of waited-for readers.
619DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
620
621// Record outstanding IPIs to each CPU. No point in sending two...
622static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
623
624/* If we are the last reader, wake up the grace-period kthread. */
625void rcu_read_unlock_trace_special(struct task_struct *t)
626{
627 WRITE_ONCE(t->trc_reader_need_end, false);
628 if (atomic_dec_and_test(&trc_n_readers_need_end))
629 wake_up(&trc_wait);
630}
631EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
632
633/* Add a task to the holdout list, if it is not already on the list. */
634static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
635{
636 if (list_empty(&t->trc_holdout_list)) {
637 get_task_struct(t);
638 list_add(&t->trc_holdout_list, bhp);
639 }
640}
641
642/* Remove a task from the holdout list, if it is in fact present. */
643static void trc_del_holdout(struct task_struct *t)
644{
645 if (!list_empty(&t->trc_holdout_list)) {
646 list_del_init(&t->trc_holdout_list);
647 put_task_struct(t);
648 }
649}
650
651/* IPI handler to check task state. */
652static void trc_read_check_handler(void *t_in)
653{
654 struct task_struct *t = current;
655 struct task_struct *texp = t_in;
656
657 // If the task is no longer running on this CPU, leave.
658 if (unlikely(texp != t)) {
659 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
660 wake_up(&trc_wait);
661 goto reset_ipi; // Already on holdout list, so will check later.
662 }
663
664 // If the task is not in a read-side critical section, and
665 // if this is the last reader, awaken the grace-period kthread.
666 if (likely(!t->trc_reader_nesting)) {
667 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
668 wake_up(&trc_wait);
669 // Mark as checked after decrement to avoid false
670 // positives on the above WARN_ON_ONCE().
671 WRITE_ONCE(t->trc_reader_checked, true);
672 goto reset_ipi;
673 }
674 WRITE_ONCE(t->trc_reader_checked, true);
675
676 // Get here if the task is in a read-side critical section. Set
677 // its state so that it will awaken the grace-period kthread upon
678 // exit from that critical section.
679 WARN_ON_ONCE(t->trc_reader_need_end);
680 WRITE_ONCE(t->trc_reader_need_end, true);
681
682reset_ipi:
683 // Allow future IPIs to be sent on CPU and for task.
684 // Also order this IPI handler against any later manipulations of
685 // the intended task.
686 smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
687 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
688}
689
690/* Callback function for scheduler to check locked-down task. */
691static bool trc_inspect_reader(struct task_struct *t, void *arg)
692{
693 if (task_curr(t))
694 return false; // It is running, so decline to inspect it.
695
696 // Mark as checked. Because this is called from the grace-period
697 // kthread, also remove the task from the holdout list.
698 t->trc_reader_checked = true;
699 trc_del_holdout(t);
700
701 // If the task is in a read-side critical section, set up its
702 // its state so that it will awaken the grace-period kthread upon
703 // exit from that critical section.
704 if (unlikely(t->trc_reader_nesting)) {
705 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
706 WARN_ON_ONCE(t->trc_reader_need_end);
707 WRITE_ONCE(t->trc_reader_need_end, true);
708 }
709 return true;
710}
711
712/* Attempt to extract the state for the specified task. */
713static void trc_wait_for_one_reader(struct task_struct *t,
714 struct list_head *bhp)
715{
716 int cpu;
717
718 // If a previous IPI is still in flight, let it complete.
719 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
720 return;
721
722 // The current task had better be in a quiescent state.
723 if (t == current) {
724 t->trc_reader_checked = true;
725 trc_del_holdout(t);
726 WARN_ON_ONCE(t->trc_reader_nesting);
727 return;
728 }
729
730 // Attempt to nail down the task for inspection.
731 get_task_struct(t);
732 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
733 put_task_struct(t);
734 return;
735 }
736 put_task_struct(t);
737
738 // If currently running, send an IPI, either way, add to list.
739 trc_add_holdout(t, bhp);
740 if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
741 // The task is currently running, so try IPIing it.
742 cpu = task_cpu(t);
743
744 // If there is already an IPI outstanding, let it happen.
745 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
746 return;
747
748 atomic_inc(&trc_n_readers_need_end);
749 per_cpu(trc_ipi_to_cpu, cpu) = true;
750 t->trc_ipi_to_cpu = cpu;
751 if (smp_call_function_single(cpu,
752 trc_read_check_handler, t, 0)) {
753 // Just in case there is some other reason for
754 // failure than the target CPU being offline.
755 per_cpu(trc_ipi_to_cpu, cpu) = false;
756 t->trc_ipi_to_cpu = cpu;
757 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
758 WARN_ON_ONCE(1);
759 wake_up(&trc_wait);
760 }
761 }
762 }
763}
764
765/* Initialize for a new RCU-tasks-trace grace period. */
766static void rcu_tasks_trace_pregp_step(void)
767{
768 int cpu;
769
770 // Wait for CPU-hotplug paths to complete.
771 cpus_read_lock();
772 cpus_read_unlock();
773
774 // Allow for fast-acting IPIs.
775 atomic_set(&trc_n_readers_need_end, 1);
776
777 // There shouldn't be any old IPIs, but...
778 for_each_possible_cpu(cpu)
779 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
780}
781
782/* Do first-round processing for the specified task. */
783static void rcu_tasks_trace_pertask(struct task_struct *t,
784 struct list_head *hop)
785{
786 WRITE_ONCE(t->trc_reader_need_end, false);
787 t->trc_reader_checked = false;
788 t->trc_ipi_to_cpu = -1;
789 trc_wait_for_one_reader(t, hop);
790}
791
792/* Do intermediate processing between task and holdout scans. */
793static void rcu_tasks_trace_postscan(void)
794{
795 // Wait for late-stage exiting tasks to finish exiting.
796 // These might have passed the call to exit_tasks_rcu_finish().
797 synchronize_rcu();
798 // Any tasks that exit after this point will set ->trc_reader_checked.
799}
800
801/* Do one scan of the holdout list. */
802static void check_all_holdout_tasks_trace(struct list_head *hop,
803 bool ndrpt, bool *frptp)
804{
805 struct task_struct *g, *t;
806
807 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
808 // If safe and needed, try to check the current task.
809 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
810 !READ_ONCE(t->trc_reader_checked))
811 trc_wait_for_one_reader(t, hop);
812
813 // If check succeeded, remove this task from the list.
814 if (READ_ONCE(t->trc_reader_checked))
815 trc_del_holdout(t);
816 }
817}
818
819/* Wait for grace period to complete and provide ordering. */
820static void rcu_tasks_trace_postgp(void)
821{
822 // Remove the safety count.
823 smp_mb__before_atomic(); // Order vs. earlier atomics
824 atomic_dec(&trc_n_readers_need_end);
825 smp_mb__after_atomic(); // Order vs. later atomics
826
827 // Wait for readers.
828 wait_event_idle_exclusive(trc_wait,
829 atomic_read(&trc_n_readers_need_end) == 0);
830
831 smp_mb(); // Caller's code must be ordered after wakeup.
832}
833
834/* Report any needed quiescent state for this exiting task. */
835void exit_tasks_rcu_finish_trace(struct task_struct *t)
836{
837 WRITE_ONCE(t->trc_reader_checked, true);
838 WARN_ON_ONCE(t->trc_reader_nesting);
839 WRITE_ONCE(t->trc_reader_nesting, 0);
840 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
841 rcu_read_unlock_trace_special(t);
842}
843
844void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
845DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
846 "RCU Tasks Trace");
847
848/**
849 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
850 * @rhp: structure to be used for queueing the RCU updates.
851 * @func: actual callback function to be invoked after the grace period
852 *
853 * The callback function will be invoked some time after a full grace
854 * period elapses, in other words after all currently executing RCU
855 * read-side critical sections have completed. call_rcu_tasks_trace()
856 * assumes that the read-side critical sections end at context switch,
857 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
858 * there are no read-side primitives analogous to rcu_read_lock() and
859 * rcu_read_unlock() because this primitive is intended to determine
860 * that all tasks have passed through a safe state, not so much for
861 * data-strcuture synchronization.
862 *
863 * See the description of call_rcu() for more detailed information on
864 * memory ordering guarantees.
865 */
866void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
867{
868 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
869}
870EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
871
872/**
873 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
874 *
875 * Control will return to the caller some time after a trace rcu-tasks
876 * grace period has elapsed, in other words after all currently
877 * executing rcu-tasks read-side critical sections have elapsed. These
878 * read-side critical sections are delimited by calls to schedule(),
879 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
880 * anyway) cond_resched().
881 *
882 * This is a very specialized primitive, intended only for a few uses in
883 * tracing and other situations requiring manipulation of function preambles
884 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
885 * (yet) intended for heavy use from multiple CPUs.
886 *
887 * See the description of synchronize_rcu() for more detailed information
888 * on memory ordering guarantees.
889 */
890void synchronize_rcu_tasks_trace(void)
891{
892 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
893 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
894}
895EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
896
897/**
898 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
899 *
900 * Although the current implementation is guaranteed to wait, it is not
901 * obligated to, for example, if there are no pending callbacks.
902 */
903void rcu_barrier_tasks_trace(void)
904{
905 /* There is only one callback queue, so this is easy. ;-) */
906 synchronize_rcu_tasks_trace();
907}
908EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
909
910static int __init rcu_spawn_tasks_trace_kthread(void)
911{
912 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
913 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
914 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
915 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
916 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
917 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
918 return 0;
919}
920core_initcall(rcu_spawn_tasks_trace_kthread);
921
922#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
923void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
924#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */