blob: 7d057d0aaec404ef9ca662d2178731fd1f32ac53 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
Paul E. McKenneya71fca52009-09-18 10:28:19 -070022 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070030 * http://lse.sourceforge.net/locking/rcupdate.html
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010039#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010040#include <linux/sched/debug.h>
Arun Sharma600634972011-07-26 16:09:06 -070041#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/percpu.h>
44#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/cpu.h>
Ingo Molnar9331b312006-03-23 03:00:19 -080046#include <linux/mutex.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040047#include <linux/export.h>
Paul E. McKenneye3818b82010-03-15 17:03:43 -070048#include <linux/hardirq.h>
Paul E. McKenneye3ebfb92012-07-02 14:42:01 -070049#include <linux/delay.h>
Paul Gortmakere77b7042016-07-15 12:19:41 -040050#include <linux/moduleparam.h>
Paul E. McKenney8315f422014-06-27 13:42:20 -070051#include <linux/kthread.h>
Paul E. McKenney4ff475e2014-08-10 19:47:12 -070052#include <linux/tick.h>
Ingo Molnarf9411eb2017-02-06 09:50:49 +010053#include <linux/rcupdate_wait.h>
Frederic Weisbecker78634062017-10-27 04:42:28 +020054#include <linux/sched/isolation.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Paul E. McKenney29c00b42011-06-17 15:53:19 -070056#define CREATE_TRACE_POINTS
Paul E. McKenney29c00b42011-06-17 15:53:19 -070057
58#include "rcu.h"
59
Paul E. McKenney4102ada2013-10-08 20:23:47 -070060#ifdef MODULE_PARAM_PREFIX
61#undef MODULE_PARAM_PREFIX
62#endif
63#define MODULE_PARAM_PREFIX "rcupdate."
64
Paul E. McKenney79cfea022015-12-07 13:09:52 -080065#ifndef CONFIG_TINY_RCU
Paul E. McKenney3caec622017-05-03 09:27:15 -070066extern int rcu_expedited; /* from sysctl */
Antti P Miettinen3705b882012-10-05 09:59:15 +030067module_param(rcu_expedited, int, 0);
Paul E. McKenney3caec622017-05-03 09:27:15 -070068extern int rcu_normal; /* from sysctl */
Paul E. McKenney5a9be7c2015-11-24 15:44:06 -080069module_param(rcu_normal, int, 0);
Paul E. McKenney3e42ec12015-11-25 18:56:00 -080070static int rcu_normal_after_boot;
71module_param(rcu_normal_after_boot, int, 0);
Paul E. McKenney79cfea022015-12-07 13:09:52 -080072#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenney3e42ec12015-11-25 18:56:00 -080073
Boqun Feng293e2422016-03-23 23:11:48 +080074#ifdef CONFIG_DEBUG_LOCK_ALLOC
Denys Vlasenkod5671f62015-05-26 17:48:34 +020075/**
76 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
77 *
78 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
79 * RCU-sched read-side critical section. In absence of
80 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
81 * critical section unless it can prove otherwise. Note that disabling
82 * of preemption (including disabling irqs) counts as an RCU-sched
83 * read-side critical section. This is useful for debug checks in functions
84 * that required that they be called within an RCU-sched read-side
85 * critical section.
86 *
87 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
88 * and while lockdep is disabled.
89 *
90 * Note that if the CPU is in the idle loop from an RCU point of
91 * view (ie: that we are in the section between rcu_idle_enter() and
92 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
93 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
94 * that are in such a section, considering these as in extended quiescent
95 * state, so such a CPU is effectively never in an RCU read-side critical
96 * section regardless of what RCU primitives it invokes. This state of
97 * affairs is required --- we need to keep an RCU-free window in idle
98 * where the CPU may possibly enter into low power mode. This way we can
99 * notice an extended quiescent state to other CPUs that started a grace
100 * period. Otherwise we would delay any grace period as long as we run in
101 * the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
105 */
106int rcu_read_lock_sched_held(void)
107{
108 int lockdep_opinion = 0;
109
110 if (!debug_lockdep_rcu_enabled())
111 return 1;
112 if (!rcu_is_watching())
113 return 0;
114 if (!rcu_lockdep_current_cpu_online())
115 return 0;
116 if (debug_locks)
117 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
Boqun Feng293e2422016-03-23 23:11:48 +0800118 return lockdep_opinion || !preemptible();
Denys Vlasenkod5671f62015-05-26 17:48:34 +0200119}
120EXPORT_SYMBOL(rcu_read_lock_sched_held);
121#endif
122
Paul E. McKenney0d394822015-02-18 12:24:30 -0800123#ifndef CONFIG_TINY_RCU
124
Paul E. McKenney5a9be7c2015-11-24 15:44:06 -0800125/*
126 * Should expedited grace-period primitives always fall back to their
127 * non-expedited counterparts? Intended for use within RCU. Note
128 * that if the user specifies both rcu_expedited and rcu_normal, then
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800129 * rcu_normal wins. (Except during the time period during boot from
Paul E. McKenney900b1022017-02-10 14:32:54 -0800130 * when the first task is spawned until the rcu_set_runtime_mode()
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800131 * core_initcall() is invoked, at which point everything is expedited.)
Paul E. McKenney5a9be7c2015-11-24 15:44:06 -0800132 */
133bool rcu_gp_is_normal(void)
134{
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800135 return READ_ONCE(rcu_normal) &&
136 rcu_scheduler_active != RCU_SCHEDULER_INIT;
Paul E. McKenney5a9be7c2015-11-24 15:44:06 -0800137}
Paul E. McKenney4f2a8482016-01-01 13:38:12 -0800138EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
Paul E. McKenney5a9be7c2015-11-24 15:44:06 -0800139
Sebastian Andrzej Siewior7c6094d2016-11-02 17:30:02 +0100140static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
Paul E. McKenney0d394822015-02-18 12:24:30 -0800141
142/*
143 * Should normal grace-period primitives be expedited? Intended for
144 * use within RCU. Note that this function takes the rcu_expedited
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800145 * sysfs/boot variable and rcu_scheduler_active into account as well
146 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
147 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
Paul E. McKenney0d394822015-02-18 12:24:30 -0800148 */
149bool rcu_gp_is_expedited(void)
150{
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800151 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
152 rcu_scheduler_active == RCU_SCHEDULER_INIT;
Paul E. McKenney0d394822015-02-18 12:24:30 -0800153}
154EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
155
156/**
157 * rcu_expedite_gp - Expedite future RCU grace periods
158 *
159 * After a call to this function, future calls to synchronize_rcu() and
160 * friends act as the corresponding synchronize_rcu_expedited() function
161 * had instead been called.
162 */
163void rcu_expedite_gp(void)
164{
165 atomic_inc(&rcu_expedited_nesting);
166}
167EXPORT_SYMBOL_GPL(rcu_expedite_gp);
168
169/**
170 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
171 *
172 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
173 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
174 * and if the rcu_expedited sysfs/boot parameter is not set, then all
175 * subsequent calls to synchronize_rcu() and friends will return to
176 * their normal non-expedited behavior.
177 */
178void rcu_unexpedite_gp(void)
179{
180 atomic_dec(&rcu_expedited_nesting);
181}
182EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
183
Paul E. McKenneyee425712015-02-19 10:51:32 -0800184/*
185 * Inform RCU of the end of the in-kernel boot sequence.
186 */
187void rcu_end_inkernel_boot(void)
188{
Sebastian Andrzej Siewior7c6094d2016-11-02 17:30:02 +0100189 rcu_unexpedite_gp();
Paul E. McKenney3e42ec12015-11-25 18:56:00 -0800190 if (rcu_normal_after_boot)
191 WRITE_ONCE(rcu_normal, 1);
Paul E. McKenneyee425712015-02-19 10:51:32 -0800192}
Paul E. McKenney0d394822015-02-18 12:24:30 -0800193
Paul E. McKenney79cfea022015-12-07 13:09:52 -0800194#endif /* #ifndef CONFIG_TINY_RCU */
195
Paul E. McKenney900b1022017-02-10 14:32:54 -0800196/*
197 * Test each non-SRCU synchronous grace-period wait API. This is
198 * useful just after a change in mode for these primitives, and
199 * during early boot.
200 */
201void rcu_test_sync_prims(void)
202{
203 if (!IS_ENABLED(CONFIG_PROVE_RCU))
204 return;
205 synchronize_rcu();
206 synchronize_rcu_bh();
207 synchronize_sched();
208 synchronize_rcu_expedited();
209 synchronize_rcu_bh_expedited();
210 synchronize_sched_expedited();
211}
212
213#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
214
215/*
216 * Switch to run-time mode once RCU has fully initialized.
217 */
218static int __init rcu_set_runtime_mode(void)
219{
220 rcu_test_sync_prims();
221 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
222 rcu_test_sync_prims();
223 return 0;
224}
225core_initcall(rcu_set_runtime_mode);
226
227#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
228
Paul E. McKenney162cc272009-09-23 16:18:13 -0700229#ifdef CONFIG_DEBUG_LOCK_ALLOC
230static struct lock_class_key rcu_lock_key;
231struct lockdep_map rcu_lock_map =
232 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
233EXPORT_SYMBOL_GPL(rcu_lock_map);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800234
235static struct lock_class_key rcu_bh_lock_key;
236struct lockdep_map rcu_bh_lock_map =
237 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
238EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
239
240static struct lock_class_key rcu_sched_lock_key;
241struct lockdep_map rcu_sched_lock_map =
242 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
243EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700244
Paul E. McKenney24ef6592013-10-28 09:22:24 -0700245static struct lock_class_key rcu_callback_key;
246struct lockdep_map rcu_callback_map =
247 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
248EXPORT_SYMBOL_GPL(rcu_callback_map);
249
Steven Rostedt (Red Hat)a0a5a052013-08-31 01:04:07 -0400250int notrace debug_lockdep_rcu_enabled(void)
Paul E. McKenneybc293d62010-04-15 12:50:39 -0700251{
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800252 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
Paul E. McKenneybc293d62010-04-15 12:50:39 -0700253 current->lockdep_recursion == 0;
254}
255EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
256
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700257/**
Oleg Nesterov85b39d32014-07-08 15:17:59 -0700258 * rcu_read_lock_held() - might we be in RCU read-side critical section?
259 *
260 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
261 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
262 * this assumes we are in an RCU read-side critical section unless it can
263 * prove otherwise. This is useful for debug checks in functions that
264 * require that they be called within an RCU read-side critical section.
265 *
266 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
267 * and while lockdep is disabled.
268 *
269 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
270 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler.
273 *
274 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
275 * offline from an RCU perspective, so check for those as well.
276 */
277int rcu_read_lock_held(void)
278{
279 if (!debug_lockdep_rcu_enabled())
280 return 1;
281 if (!rcu_is_watching())
282 return 0;
283 if (!rcu_lockdep_current_cpu_online())
284 return 0;
285 return lock_is_held(&rcu_lock_map);
286}
287EXPORT_SYMBOL_GPL(rcu_read_lock_held);
288
289/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700290 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700291 *
292 * Check for bottom half being disabled, which covers both the
293 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
294 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700295 * will show the situation. This is useful for debug checks in functions
296 * that require that they be called within an RCU read-side critical
297 * section.
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700298 *
299 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800300 *
301 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
302 * offline from an RCU perspective, so check for those as well.
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700303 */
304int rcu_read_lock_bh_held(void)
305{
306 if (!debug_lockdep_rcu_enabled())
307 return 1;
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700308 if (!rcu_is_watching())
Frederic Weisbeckere6b80a32011-10-07 16:25:18 -0700309 return 0;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800310 if (!rcu_lockdep_current_cpu_online())
311 return 0;
Paul E. McKenney773e3f92010-10-05 14:03:02 -0700312 return in_softirq() || irqs_disabled();
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700313}
314EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
315
316#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
317
Paul E. McKenneyee376db2015-01-10 19:47:10 -0800318/**
319 * wakeme_after_rcu() - Callback function to awaken a task after grace period
320 * @head: Pointer to rcu_head member within rcu_synchronize structure
321 *
322 * Awaken the corresponding task now that a grace period has elapsed.
Paul E. McKenneyfbf6bfc2008-02-13 15:03:15 -0800323 */
Paul E. McKenneyee376db2015-01-10 19:47:10 -0800324void wakeme_after_rcu(struct rcu_head *head)
Dipankar Sarma21a1ea92006-03-07 21:55:33 -0800325{
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100326 struct rcu_synchronize *rcu;
327
328 rcu = container_of(head, struct rcu_synchronize, head);
329 complete(&rcu->completion);
Dipankar Sarma21a1ea92006-03-07 21:55:33 -0800330}
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700331EXPORT_SYMBOL_GPL(wakeme_after_rcu);
Paul E. McKenneyee84b822010-05-06 09:28:41 -0700332
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700333void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
334 struct rcu_synchronize *rs_array)
Paul E. McKenney2c428182011-05-26 22:14:36 -0700335{
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700336 int i;
Paul E. McKenney68ab0b42017-04-28 16:19:07 -0700337 int j;
Paul E. McKenney2c428182011-05-26 22:14:36 -0700338
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700339 /* Initialize and register callbacks for each flavor specified. */
340 for (i = 0; i < n; i++) {
341 if (checktiny &&
342 (crcu_array[i] == call_rcu ||
343 crcu_array[i] == call_rcu_bh)) {
344 might_sleep();
345 continue;
346 }
347 init_rcu_head_on_stack(&rs_array[i].head);
348 init_completion(&rs_array[i].completion);
Paul E. McKenney68ab0b42017-04-28 16:19:07 -0700349 for (j = 0; j < i; j++)
350 if (crcu_array[j] == crcu_array[i])
351 break;
352 if (j == i)
353 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700354 }
355
356 /* Wait for all callbacks to be invoked. */
357 for (i = 0; i < n; i++) {
358 if (checktiny &&
359 (crcu_array[i] == call_rcu ||
360 crcu_array[i] == call_rcu_bh))
361 continue;
Paul E. McKenney68ab0b42017-04-28 16:19:07 -0700362 for (j = 0; j < i; j++)
363 if (crcu_array[j] == crcu_array[i])
364 break;
365 if (j == i)
366 wait_for_completion(&rs_array[i].completion);
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700367 destroy_rcu_head_on_stack(&rs_array[i].head);
368 }
Paul E. McKenney2c428182011-05-26 22:14:36 -0700369}
Paul E. McKenneyec90a192015-06-10 12:53:06 -0700370EXPORT_SYMBOL_GPL(__wait_rcu_gp);
Paul E. McKenney2c428182011-05-26 22:14:36 -0700371
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400372#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
Paul E. McKenney546a9d82014-06-19 14:57:10 -0700373void init_rcu_head(struct rcu_head *head)
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400374{
375 debug_object_init(head, &rcuhead_debug_descr);
376}
Paul E. McKenney156baec2017-12-07 09:40:38 -0800377EXPORT_SYMBOL_GPL(init_rcu_head);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400378
Paul E. McKenney546a9d82014-06-19 14:57:10 -0700379void destroy_rcu_head(struct rcu_head *head)
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400380{
381 debug_object_free(head, &rcuhead_debug_descr);
382}
Paul E. McKenney156baec2017-12-07 09:40:38 -0800383EXPORT_SYMBOL_GPL(destroy_rcu_head);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400384
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700385static bool rcuhead_is_static_object(void *addr)
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400386{
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700387 return true;
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400388}
389
390/**
391 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
392 * @head: pointer to rcu_head structure to be initialized
393 *
394 * This function informs debugobjects of a new rcu_head structure that
395 * has been allocated as an auto variable on the stack. This function
396 * is not required for rcu_head structures that are statically defined or
397 * that are dynamically allocated on the heap. This function has no
398 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
399 */
400void init_rcu_head_on_stack(struct rcu_head *head)
401{
402 debug_object_init_on_stack(head, &rcuhead_debug_descr);
403}
404EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
405
406/**
407 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
408 * @head: pointer to rcu_head structure to be initialized
409 *
410 * This function informs debugobjects that an on-stack rcu_head structure
411 * is about to go out of scope. As with init_rcu_head_on_stack(), this
412 * function is not required for rcu_head structures that are statically
413 * defined or that are dynamically allocated on the heap. Also as with
414 * init_rcu_head_on_stack(), this function has no effect for
415 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
416 */
417void destroy_rcu_head_on_stack(struct rcu_head *head)
418{
419 debug_object_free(head, &rcuhead_debug_descr);
420}
421EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
422
423struct debug_obj_descr rcuhead_debug_descr = {
424 .name = "rcu_head",
Du, Changbinb9fdac7f2016-05-19 17:09:41 -0700425 .is_static_object = rcuhead_is_static_object,
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400426};
427EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
428#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700429
Pranith Kumar28f65692014-09-22 14:00:48 -0400430#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400431void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
Paul E. McKenney52494532012-11-14 16:26:40 -0800432 unsigned long secs,
433 unsigned long c_old, unsigned long c)
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700434{
Paul E. McKenney52494532012-11-14 16:26:40 -0800435 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700436}
437EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
438#else
Paul E. McKenney52494532012-11-14 16:26:40 -0800439#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
440 do { } while (0)
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700441#endif
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700442
443#ifdef CONFIG_RCU_STALL_COMMON
444
445#ifdef CONFIG_PROVE_RCU
446#define RCU_STALL_DELAY_DELTA (5 * HZ)
447#else
448#define RCU_STALL_DELAY_DELTA 0
449#endif
450
451int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
Paul E. McKenneyf22ce092017-09-01 14:40:54 -0700452EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
Paul E. McKenney01896f72013-08-18 12:14:32 -0700453static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700454
455module_param(rcu_cpu_stall_suppress, int, 0644);
456module_param(rcu_cpu_stall_timeout, int, 0644);
457
458int rcu_jiffies_till_stall_check(void)
459{
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800460 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700461
462 /*
463 * Limit check must be consistent with the Kconfig limits
464 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
465 */
466 if (till_stall_check < 3) {
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800467 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700468 till_stall_check = 3;
469 } else if (till_stall_check > 300) {
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800470 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700471 till_stall_check = 300;
472 }
473 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
474}
475
Rik van Riel61f38db2014-04-26 23:15:35 -0700476void rcu_sysrq_start(void)
477{
478 if (!rcu_cpu_stall_suppress)
479 rcu_cpu_stall_suppress = 2;
480}
481
482void rcu_sysrq_end(void)
483{
484 if (rcu_cpu_stall_suppress == 2)
485 rcu_cpu_stall_suppress = 0;
486}
487
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -0700488static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
489{
490 rcu_cpu_stall_suppress = 1;
491 return NOTIFY_DONE;
492}
493
494static struct notifier_block rcu_panic_block = {
495 .notifier_call = rcu_panic,
496};
497
498static int __init check_cpu_stall_init(void)
499{
500 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
501 return 0;
502}
503early_initcall(check_cpu_stall_init);
504
505#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
Paul E. McKenney8315f422014-06-27 13:42:20 -0700506
507#ifdef CONFIG_TASKS_RCU
508
509/*
Paul E. McKenney6f56f712018-05-14 13:52:27 -0700510 * Simple variant of RCU whose quiescent states are voluntary context
511 * switch, cond_resched_rcu_qs(), user-space execution, and idle.
512 * As such, grace periods can take one good long time. There are no
513 * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
514 * because this implementation is intended to get the system into a safe
515 * state for some of the manipulations involved in tracing and the like.
516 * Finally, this implementation does not support high call_rcu_tasks()
517 * rates from multiple CPUs. If this is required, per-CPU callback lists
518 * will be needed.
Paul E. McKenney8315f422014-06-27 13:42:20 -0700519 */
520
521/* Global list of callbacks and associated lock. */
522static struct rcu_head *rcu_tasks_cbs_head;
523static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700524static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700525static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
526
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700527/* Track exiting tasks in order to allow them to be waited for. */
Paul E. McKenneyccdd29f2017-05-25 08:51:48 -0700528DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700529
530/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
Paul E. McKenney59d80fd2017-04-28 10:20:28 -0700531#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
532static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700533module_param(rcu_task_stall_timeout, int, 0644);
534
Paul E. McKenney4929c912016-05-02 11:58:56 -0700535static struct task_struct *rcu_tasks_kthread_ptr;
Paul E. McKenney84a8f442014-08-04 07:24:21 -0700536
Paul E. McKenneya68a2bb2017-05-03 08:34:57 -0700537/**
538 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
539 * @rhp: structure to be used for queueing the RCU updates.
540 * @func: actual callback function to be invoked after the grace period
541 *
542 * The callback function will be invoked some time after a full grace
543 * period elapses, in other words after all currently executing RCU
544 * read-side critical sections have completed. call_rcu_tasks() assumes
545 * that the read-side critical sections end at a voluntary context
Paul E. McKenney6f56f712018-05-14 13:52:27 -0700546 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
547 * or transition to usermode execution. As such, there are no read-side
548 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
549 * this primitive is intended to determine that all tasks have passed
550 * through a safe state, not so much for data-strcuture synchronization.
Paul E. McKenneya68a2bb2017-05-03 08:34:57 -0700551 *
552 * See the description of call_rcu() for more detailed information on
553 * memory ordering guarantees.
Paul E. McKenney84a8f442014-08-04 07:24:21 -0700554 */
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800555void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
Paul E. McKenney8315f422014-06-27 13:42:20 -0700556{
557 unsigned long flags;
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700558 bool needwake;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700559
560 rhp->next = NULL;
561 rhp->func = func;
562 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700563 needwake = !rcu_tasks_cbs_head;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700564 *rcu_tasks_cbs_tail = rhp;
565 rcu_tasks_cbs_tail = &rhp->next;
566 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
Paul E. McKenney4929c912016-05-02 11:58:56 -0700567 /* We can't create the thread unless interrupts are enabled. */
Paul E. McKenneyc63eb172017-08-11 12:37:07 -0700568 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700569 wake_up(&rcu_tasks_cbs_wq);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700570}
571EXPORT_SYMBOL_GPL(call_rcu_tasks);
572
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700573/**
574 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
575 *
576 * Control will return to the caller some time after a full rcu-tasks
577 * grace period has elapsed, in other words after all currently
578 * executing rcu-tasks read-side critical sections have elapsed. These
579 * read-side critical sections are delimited by calls to schedule(),
Paul E. McKenneycee43932018-03-02 16:35:27 -0800580 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700581 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
582 *
583 * This is a very specialized primitive, intended only for a few uses in
584 * tracing and other situations requiring manipulation of function
585 * preambles and profiling hooks. The synchronize_rcu_tasks() function
586 * is not (yet) intended for heavy use from multiple CPUs.
587 *
588 * Note that this guarantee implies further memory-ordering guarantees.
589 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
590 * each CPU is guaranteed to have executed a full memory barrier since the
591 * end of its last RCU-tasks read-side critical section whose beginning
592 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
593 * having an RCU-tasks read-side critical section that extends beyond
594 * the return from synchronize_rcu_tasks() is guaranteed to have executed
595 * a full memory barrier after the beginning of synchronize_rcu_tasks()
596 * and before the beginning of that RCU-tasks read-side critical section.
597 * Note that these guarantees include CPUs that are offline, idle, or
598 * executing in user mode, as well as CPUs that are executing in the kernel.
599 *
600 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
601 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
602 * to have executed a full memory barrier during the execution of
603 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
604 * (but again only if the system has more than one CPU).
605 */
606void synchronize_rcu_tasks(void)
607{
608 /* Complain if the scheduler has not started. */
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800609 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
Paul E. McKenneyf78f5b92015-06-18 15:50:02 -0700610 "synchronize_rcu_tasks called too soon");
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700611
612 /* Wait for the grace period. */
613 wait_rcu_gp(call_rcu_tasks);
614}
Steven Rostedt06c2a922014-07-02 18:17:19 -0700615EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700616
617/**
618 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
619 *
620 * Although the current implementation is guaranteed to wait, it is not
621 * obligated to, for example, if there are no pending callbacks.
622 */
623void rcu_barrier_tasks(void)
624{
625 /* There is only one callback queue, so this is easy. ;-) */
626 synchronize_rcu_tasks();
627}
Steven Rostedt06c2a922014-07-02 18:17:19 -0700628EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700629
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700630/* See if tasks are still holding out, complain if so. */
631static void check_holdout_task(struct task_struct *t,
632 bool needreport, bool *firstreport)
Paul E. McKenney8315f422014-06-27 13:42:20 -0700633{
Paul E. McKenney4ff475e2014-08-10 19:47:12 -0700634 int cpu;
635
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800636 if (!READ_ONCE(t->rcu_tasks_holdout) ||
637 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
638 !READ_ONCE(t->on_rq) ||
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700639 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
640 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800641 WRITE_ONCE(t->rcu_tasks_holdout, false);
Paul E. McKenney8f20a5e2014-08-05 05:10:24 -0700642 list_del_init(&t->rcu_tasks_holdout_list);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700643 put_task_struct(t);
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700644 return;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700645 }
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -0700646 rcu_request_urgent_qs_task(t);
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700647 if (!needreport)
648 return;
649 if (*firstreport) {
650 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
651 *firstreport = false;
652 }
Paul E. McKenney4ff475e2014-08-10 19:47:12 -0700653 cpu = task_cpu(t);
654 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
655 t, ".I"[is_idle_task(t)],
656 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
657 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
658 t->rcu_tasks_idle_cpu, cpu);
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700659 sched_show_task(t);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700660}
661
662/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
663static int __noreturn rcu_tasks_kthread(void *arg)
664{
665 unsigned long flags;
666 struct task_struct *g, *t;
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700667 unsigned long lastreport;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700668 struct rcu_head *list;
669 struct rcu_head *next;
670 LIST_HEAD(rcu_tasks_holdouts);
Steven Rostedt (VMware)c03be752018-05-24 18:49:46 -0400671 int fract;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700672
Paul E. McKenney60ced492014-10-27 16:04:35 -0700673 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
Frederic Weisbeckerde201552017-10-27 04:42:35 +0200674 housekeeping_affine(current, HK_FLAG_RCU);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700675
676 /*
677 * Each pass through the following loop makes one check for
678 * newly arrived callbacks, and, if there are some, waits for
679 * one RCU-tasks grace period and then invokes the callbacks.
680 * This loop is terminated by the system going down. ;-)
681 */
682 for (;;) {
683
684 /* Pick up any new callbacks. */
685 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
686 list = rcu_tasks_cbs_head;
687 rcu_tasks_cbs_head = NULL;
688 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
689 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
690
691 /* If there were none, wait a bit and start over. */
692 if (!list) {
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700693 wait_event_interruptible(rcu_tasks_cbs_wq,
694 rcu_tasks_cbs_head);
695 if (!rcu_tasks_cbs_head) {
696 WARN_ON(signal_pending(current));
697 schedule_timeout_interruptible(HZ/10);
698 }
Paul E. McKenney8315f422014-06-27 13:42:20 -0700699 continue;
700 }
701
702 /*
703 * Wait for all pre-existing t->on_rq and t->nvcsw
704 * transitions to complete. Invoking synchronize_sched()
705 * suffices because all these transitions occur with
706 * interrupts disabled. Without this synchronize_sched(),
707 * a read-side critical section that started before the
708 * grace period might be incorrectly seen as having started
709 * after the grace period.
710 *
711 * This synchronize_sched() also dispenses with the
712 * need for a memory barrier on the first store to
713 * ->rcu_tasks_holdout, as it forces the store to happen
714 * after the beginning of the grace period.
715 */
716 synchronize_sched();
717
718 /*
719 * There were callbacks, so we need to wait for an
720 * RCU-tasks grace period. Start off by scanning
721 * the task list for tasks that are not already
722 * voluntarily blocked. Mark these tasks and make
723 * a list of them in rcu_tasks_holdouts.
724 */
725 rcu_read_lock();
726 for_each_process_thread(g, t) {
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800727 if (t != current && READ_ONCE(t->on_rq) &&
Paul E. McKenney8315f422014-06-27 13:42:20 -0700728 !is_idle_task(t)) {
729 get_task_struct(t);
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800730 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
731 WRITE_ONCE(t->rcu_tasks_holdout, true);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700732 list_add(&t->rcu_tasks_holdout_list,
733 &rcu_tasks_holdouts);
734 }
735 }
736 rcu_read_unlock();
737
738 /*
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700739 * Wait for tasks that are in the process of exiting.
740 * This does only part of the job, ensuring that all
741 * tasks that were previously exiting reach the point
742 * where they have disabled preemption, allowing the
743 * later synchronize_sched() to finish the job.
744 */
745 synchronize_srcu(&tasks_rcu_exit_srcu);
746
747 /*
Paul E. McKenney8315f422014-06-27 13:42:20 -0700748 * Each pass through the following loop scans the list
749 * of holdout tasks, removing any that are no longer
750 * holdouts. When the list is empty, we are done.
751 */
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700752 lastreport = jiffies;
Steven Rostedt (VMware)c03be752018-05-24 18:49:46 -0400753
754 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
755 fract = 10;
756
757 for (;;) {
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700758 bool firstreport;
759 bool needreport;
760 int rtst;
Paul E. McKenney8f20a5e2014-08-05 05:10:24 -0700761 struct task_struct *t1;
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700762
Steven Rostedt (VMware)c03be752018-05-24 18:49:46 -0400763 if (list_empty(&rcu_tasks_holdouts))
764 break;
765
766 /* Slowly back off waiting for holdouts */
767 schedule_timeout_interruptible(HZ/fract);
768
769 if (fract > 1)
770 fract--;
771
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800772 rtst = READ_ONCE(rcu_task_stall_timeout);
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700773 needreport = rtst > 0 &&
774 time_after(jiffies, lastreport + rtst);
775 if (needreport)
776 lastreport = jiffies;
777 firstreport = true;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700778 WARN_ON(signal_pending(current));
Paul E. McKenney8f20a5e2014-08-05 05:10:24 -0700779 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
780 rcu_tasks_holdout_list) {
Paul E. McKenney52db30a2014-07-01 18:16:30 -0700781 check_holdout_task(t, needreport, &firstreport);
Paul E. McKenney8f20a5e2014-08-05 05:10:24 -0700782 cond_resched();
783 }
Paul E. McKenney8315f422014-06-27 13:42:20 -0700784 }
785
786 /*
787 * Because ->on_rq and ->nvcsw are not guaranteed
788 * to have a full memory barriers prior to them in the
789 * schedule() path, memory reordering on other CPUs could
790 * cause their RCU-tasks read-side critical sections to
791 * extend past the end of the grace period. However,
792 * because these ->nvcsw updates are carried out with
793 * interrupts disabled, we can use synchronize_sched()
794 * to force the needed ordering on all such CPUs.
795 *
796 * This synchronize_sched() also confines all
797 * ->rcu_tasks_holdout accesses to be within the grace
798 * period, avoiding the need for memory barriers for
799 * ->rcu_tasks_holdout accesses.
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700800 *
801 * In addition, this synchronize_sched() waits for exiting
802 * tasks to complete their final preempt_disable() region
803 * of execution, cleaning up after the synchronize_srcu()
804 * above.
Paul E. McKenney8315f422014-06-27 13:42:20 -0700805 */
806 synchronize_sched();
807
808 /* Invoke the callbacks. */
809 while (list) {
810 next = list->next;
811 local_bh_disable();
812 list->func(list);
813 local_bh_enable();
814 list = next;
815 cond_resched();
816 }
Steven Rostedt (VMware)cd23ac82018-05-24 18:58:16 -0400817 /* Paranoid sleep to keep this from entering a tight loop */
Paul E. McKenneyc7b24d22014-07-28 14:39:25 -0700818 schedule_timeout_uninterruptible(HZ/10);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700819 }
820}
821
Paul E. McKenneyc63eb172017-08-11 12:37:07 -0700822/* Spawn rcu_tasks_kthread() at core_initcall() time. */
823static int __init rcu_spawn_tasks_kthread(void)
Paul E. McKenney8315f422014-06-27 13:42:20 -0700824{
Paul E. McKenney84a8f442014-08-04 07:24:21 -0700825 struct task_struct *t;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700826
827 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
828 BUG_ON(IS_ERR(t));
Paul E. McKenney84a8f442014-08-04 07:24:21 -0700829 smp_mb(); /* Ensure others see full kthread. */
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800830 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
Paul E. McKenneyc63eb172017-08-11 12:37:07 -0700831 return 0;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700832}
Paul E. McKenneyc63eb172017-08-11 12:37:07 -0700833core_initcall(rcu_spawn_tasks_kthread);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700834
Paul E. McKenneyccdd29f2017-05-25 08:51:48 -0700835/* Do the srcu_read_lock() for the above synchronize_srcu(). */
836void exit_tasks_rcu_start(void)
837{
838 preempt_disable();
839 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
840 preempt_enable();
841}
842
843/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
844void exit_tasks_rcu_finish(void)
845{
846 preempt_disable();
847 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
848 preempt_enable();
849}
850
Paul E. McKenney8315f422014-06-27 13:42:20 -0700851#endif /* #ifdef CONFIG_TASKS_RCU */
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400852
Paul E. McKenney59d80fd2017-04-28 10:20:28 -0700853#ifndef CONFIG_TINY_RCU
854
855/*
856 * Print any non-default Tasks RCU settings.
857 */
858static void __init rcu_tasks_bootup_oddness(void)
859{
860#ifdef CONFIG_TASKS_RCU
861 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
862 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
863 else
864 pr_info("\tTasks RCU enabled.\n");
865#endif /* #ifdef CONFIG_TASKS_RCU */
866}
867
868#endif /* #ifndef CONFIG_TINY_RCU */
869
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400870#ifdef CONFIG_PROVE_RCU
871
872/*
873 * Early boot self test parameters, one for each flavor
874 */
875static bool rcu_self_test;
876static bool rcu_self_test_bh;
877static bool rcu_self_test_sched;
878
879module_param(rcu_self_test, bool, 0444);
880module_param(rcu_self_test_bh, bool, 0444);
881module_param(rcu_self_test_sched, bool, 0444);
882
883static int rcu_self_test_counter;
884
885static void test_callback(struct rcu_head *r)
886{
887 rcu_self_test_counter++;
888 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
889}
890
Paul E. McKenneye0fcba92018-08-14 08:45:54 -0700891DEFINE_STATIC_SRCU(early_srcu);
892
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400893static void early_boot_test_call_rcu(void)
894{
895 static struct rcu_head head;
Paul E. McKenneye0fcba92018-08-14 08:45:54 -0700896 static struct rcu_head shead;
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400897
898 call_rcu(&head, test_callback);
Paul E. McKenneye0fcba92018-08-14 08:45:54 -0700899 if (IS_ENABLED(CONFIG_SRCU))
900 call_srcu(&early_srcu, &shead, test_callback);
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400901}
902
903static void early_boot_test_call_rcu_bh(void)
904{
905 static struct rcu_head head;
906
907 call_rcu_bh(&head, test_callback);
908}
909
910static void early_boot_test_call_rcu_sched(void)
911{
912 static struct rcu_head head;
913
914 call_rcu_sched(&head, test_callback);
915}
916
917void rcu_early_boot_tests(void)
918{
919 pr_info("Running RCU self tests\n");
920
921 if (rcu_self_test)
922 early_boot_test_call_rcu();
923 if (rcu_self_test_bh)
924 early_boot_test_call_rcu_bh();
925 if (rcu_self_test_sched)
926 early_boot_test_call_rcu_sched();
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800927 rcu_test_sync_prims();
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400928}
929
930static int rcu_verify_early_boot_tests(void)
931{
932 int ret = 0;
933 int early_boot_test_counter = 0;
934
935 if (rcu_self_test) {
936 early_boot_test_counter++;
937 rcu_barrier();
Paul E. McKenneye0fcba92018-08-14 08:45:54 -0700938 if (IS_ENABLED(CONFIG_SRCU)) {
939 early_boot_test_counter++;
940 srcu_barrier(&early_srcu);
941 }
Pranith Kumaraa23c6fbc2014-09-19 11:32:29 -0400942 }
943 if (rcu_self_test_bh) {
944 early_boot_test_counter++;
945 rcu_barrier_bh();
946 }
947 if (rcu_self_test_sched) {
948 early_boot_test_counter++;
949 rcu_barrier_sched();
950 }
951
952 if (rcu_self_test_counter != early_boot_test_counter) {
953 WARN_ON(1);
954 ret = -1;
955 }
956
957 return ret;
958}
959late_initcall(rcu_verify_early_boot_tests);
960#else
961void rcu_early_boot_tests(void) {}
962#endif /* CONFIG_PROVE_RCU */
Paul E. McKenney59d80fd2017-04-28 10:20:28 -0700963
964#ifndef CONFIG_TINY_RCU
965
966/*
967 * Print any significant non-default boot-time settings.
968 */
969void __init rcupdate_announce_bootup_oddness(void)
970{
971 if (rcu_normal)
972 pr_info("\tNo expedited grace period (rcu_normal).\n");
973 else if (rcu_normal_after_boot)
974 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
975 else if (rcu_expedited)
976 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
977 if (rcu_cpu_stall_suppress)
978 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
979 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
980 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
981 rcu_tasks_bootup_oddness();
982}
983
984#endif /* #ifndef CONFIG_TINY_RCU */