blob: 8ce77d9ac716c5bc35ef8f42f21986f6d0a155ee [file] [log] [blame]
Paul E. McKenney22e40922019-01-17 10:23:39 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002/*
SeongJae Park65bb0dc2020-01-06 21:08:02 +01003 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004 *
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01005 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
SeongJae Park65bb0dc2020-01-06 21:08:02 +01009 * Paul E. McKenney <paulmck@linux.ibm.com>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010010 *
Paul E. McKenney22e40922019-01-17 10:23:39 -080011 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010012 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070015 * Documentation/RCU
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010016 */
Joe Perchesa7538352018-05-14 13:27:33 -070017
18#define pr_fmt(fmt) "rcu: " fmt
19
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010020#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/smp.h>
Ingo Molnarf9411eb2017-02-06 09:50:49 +010025#include <linux/rcupdate_wait.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010026#include <linux/interrupt.h>
27#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010028#include <linux/sched/debug.h>
Ingo Molnarc1dc0b92009-08-02 11:28:21 +020029#include <linux/nmi.h>
Paul E. McKenney8826f3b2011-05-11 05:41:41 -070030#include <linux/atomic.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010031#include <linux/bitops.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040032#include <linux/export.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010033#include <linux/completion.h>
34#include <linux/moduleparam.h>
35#include <linux/percpu.h>
36#include <linux/notifier.h>
37#include <linux/cpu.h>
38#include <linux/mutex.h>
39#include <linux/time.h>
Paul E. McKenneybbad9372010-04-02 16:17:17 -070040#include <linux/kernel_stat.h>
Paul E. McKenneya26ac242011-01-12 14:10:23 -080041#include <linux/wait.h>
42#include <linux/kthread.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010043#include <uapi/linux/sched/types.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070044#include <linux/prefetch.h>
Paul E. McKenney3d3b7db2012-01-23 17:05:46 -080045#include <linux/delay.h>
Paul E. McKenney661a85d2012-07-07 05:57:03 -070046#include <linux/random.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040047#include <linux/trace_events.h>
Borislav Petkovd1d74d12013-04-22 00:12:42 +020048#include <linux/suspend.h>
Paul E. McKenneya278d472017-04-05 09:05:18 -070049#include <linux/ftrace.h>
Paul E. McKenneyd3052102018-07-25 11:49:47 -070050#include <linux/tick.h>
Paul E. McKenney2ccaff12018-12-12 12:32:06 -080051#include <linux/sysrq.h>
Masami Hiramatsuc13324a2019-02-13 01:12:15 +090052#include <linux/kprobes.h>
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +010053#include <linux/gfp.h>
54#include <linux/oom.h>
55#include <linux/smpboot.h>
56#include <linux/jiffies.h>
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -040057#include <linux/slab.h>
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +010058#include <linux/sched/isolation.h>
Eric Dumazetcfcdef52019-07-24 18:07:52 -070059#include <linux/sched/clock.h>
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +020060#include <linux/vmalloc.h>
61#include <linux/mm.h>
Walter Wu26e760c2020-08-06 23:24:35 -070062#include <linux/kasan.h>
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +010063#include "../time/tick-internal.h"
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010064
Paul E. McKenney4102ada2013-10-08 20:23:47 -070065#include "tree.h"
Paul E. McKenney29c00b42011-06-17 15:53:19 -070066#include "rcu.h"
Paul E. McKenney9f77da92009-08-22 13:56:45 -070067
Paul E. McKenney4102ada2013-10-08 20:23:47 -070068#ifdef MODULE_PARAM_PREFIX
69#undef MODULE_PARAM_PREFIX
70#endif
71#define MODULE_PARAM_PREFIX "rcutree."
72
Paul E. McKenney35315932020-04-13 16:36:29 -070073#ifndef data_race
74#define data_race(expr) \
75 ({ \
76 expr; \
77 })
78#endif
79#ifndef ASSERT_EXCLUSIVE_WRITER
80#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
81#endif
82#ifndef ASSERT_EXCLUSIVE_ACCESS
83#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
84#endif
85
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010086/* Data structures. */
87
Steven Rostedt (Red Hat)f7f7bac2013-07-12 17:18:47 -040088/*
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -070089 * Steal a bit from the bottom of ->dynticks for idle entry/exit
90 * control. Initially this is for TLB flushing.
Steven Rostedt (Red Hat)f7f7bac2013-07-12 17:18:47 -040091 */
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -070092#define RCU_DYNTICK_CTRL_MASK 0x1
93#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
Ard Biesheuvela8a29b32014-07-12 19:01:49 +020094
Paul E. McKenney4c5273b2018-08-03 21:00:38 -070095static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
96 .dynticks_nesting = 1,
97 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -070098 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
Paul E. McKenney4c5273b2018-08-03 21:00:38 -070099};
Paul E. McKenneyc30fe542019-10-11 21:40:09 -0700100static struct rcu_state rcu_state = {
Paul E. McKenney358be2d2018-07-03 14:15:31 -0700101 .level = { &rcu_state.node[0] },
Paul E. McKenney358be2d2018-07-03 14:15:31 -0700102 .gp_state = RCU_GP_IDLE,
103 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
104 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
105 .name = RCU_NAME,
106 .abbr = RCU_ABBR,
107 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
108 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
Mike Galbraith894d45b2018-08-15 09:05:29 -0700109 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
Paul E. McKenney358be2d2018-07-03 14:15:31 -0700110};
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800111
Paul E. McKenneya3dc2942015-04-20 11:40:50 -0700112/* Dump rcu_node combining tree at boot to verify correct setup. */
113static bool dump_tree;
114module_param(dump_tree, bool, 0444);
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +0100115/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
Jules Irengeda44cd62020-03-30 02:24:48 +0100116static bool use_softirq = true;
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +0100117module_param(use_softirq, bool, 0444);
Paul E. McKenney7fa27002015-04-20 10:27:15 -0700118/* Control rcu_node-tree auto-balancing at boot time. */
119static bool rcu_fanout_exact;
120module_param(rcu_fanout_exact, bool, 0444);
Paul E. McKenney47d631a2015-04-21 09:12:13 -0700121/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
122static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
Paul E. McKenney7e5c2df2012-07-01 15:42:33 -0700123module_param(rcu_fanout_leaf, int, 0444);
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700124int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
Alexander Gordeevcb007102015-06-03 08:18:30 +0200125/* Number of rcu_nodes at specified level. */
Paul E. McKenneye95d68d2017-03-15 13:11:11 -0700126int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700127int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
128
Paul E. McKenneyb0d30412011-07-10 15:57:35 -0700129/*
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800130 * The rcu_scheduler_active variable is initialized to the value
131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
133 * RCU can assume that there is but one task, allowing RCU to (for example)
Paul E. McKenney0d950922016-04-08 05:00:03 -0700134 * optimize synchronize_rcu() to a simple barrier(). When this variable
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
136 * to detect real grace periods. This variable is also used to suppress
137 * boot-time false positives from lockdep-RCU error checking. Finally, it
138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
139 * is fully initialized, including all of its kthreads having been spawned.
Paul E. McKenneyb0d30412011-07-10 15:57:35 -0700140 */
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700141int rcu_scheduler_active __read_mostly;
142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
143
Paul E. McKenneyb0d30412011-07-10 15:57:35 -0700144/*
145 * The rcu_scheduler_fully_active variable transitions from zero to one
146 * during the early_initcall() processing, which is after the scheduler
147 * is capable of creating new tasks. So RCU processing (for example,
148 * creating tasks for RCU priority boosting) must be delayed until after
149 * rcu_scheduler_fully_active transitions from zero to one. We also
150 * currently delay invocation of any RCU callbacks until after this point.
151 *
152 * It might later prove better for people registering RCU callbacks during
153 * early boot to take responsibility for these callbacks, but one step at
154 * a time.
155 */
156static int rcu_scheduler_fully_active __read_mostly;
157
Paul E. McKenneyb50912d2018-07-03 17:22:34 -0700158static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
159 unsigned long gps, unsigned long flags);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800160static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
161static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +0000162static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700163static void invoke_rcu_core(void);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700164static void rcu_report_exp_rdp(struct rcu_data *rdp);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700165static void sync_sched_exp_online_cleanup(int cpu);
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -0700166static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
Paul E. McKenneya26ac242011-01-12 14:10:23 -0800167
Paul E. McKenneya94844b2014-12-12 07:37:48 -0800168/* rcuc/rcub kthread realtime priority */
Paul E. McKenney26730f52015-04-21 09:22:14 -0700169static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
Liu Song3ffe3d12019-02-21 22:13:27 +0800170module_param(kthread_prio, int, 0444);
Paul E. McKenneya94844b2014-12-12 07:37:48 -0800171
Paul E. McKenney8d7dc922015-04-14 19:33:59 -0700172/* Delay in jiffies for grace-period initialization delays, debug only. */
Paul E. McKenney0f41c0d2015-03-10 18:33:20 -0700173
Paul E. McKenney90040c92017-05-10 14:36:55 -0700174static int gp_preinit_delay;
175module_param(gp_preinit_delay, int, 0444);
176static int gp_init_delay;
177module_param(gp_init_delay, int, 0444);
178static int gp_cleanup_delay;
179module_param(gp_cleanup_delay, int, 0444);
Paul E. McKenney0f41c0d2015-03-10 18:33:20 -0700180
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +0200181/*
182 * This rcu parameter is runtime-read-only. It reflects
183 * a minimum allowed number of objects which can be cached
184 * per-CPU. Object size is equal to one page. This value
185 * can be changed at boot time.
186 */
187static int rcu_min_cached_objs = 2;
188module_param(rcu_min_cached_objs, int, 0444);
189
Paul E. McKenney4cf439a2018-07-02 12:15:25 -0700190/* Retrieve RCU kthreads priority for rcutorture */
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -0700191int rcu_get_gp_kthreads_prio(void)
192{
193 return kthread_prio;
194}
195EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
196
Paul E. McKenneyeab128e2015-04-15 12:08:22 -0700197/*
198 * Number of grace periods between delays, normalized by the duration of
Paul E. McKenneybfd090b2017-02-08 14:49:27 -0800199 * the delay. The longer the delay, the more the grace periods between
Paul E. McKenneyeab128e2015-04-15 12:08:22 -0700200 * each delay. The reason for this normalization is that it means that,
201 * for non-zero delays, the overall slowdown of grace periods is constant
202 * regardless of the duration of the delay. This arrangement balances
203 * the need for long delays to increase some race probabilities with the
204 * need for fast grace periods to increase other race probabilities.
205 */
206#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
Paul E. McKenney37745d22015-01-22 18:24:08 -0800207
Paul E. McKenneya26ac242011-01-12 14:10:23 -0800208/*
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800209 * Compute the mask of online CPUs for the specified rcu_node structure.
210 * This will not be stable unless the rcu_node structure's ->lock is
211 * held, but the bit corresponding to the current CPU will be stable
212 * in most contexts.
213 */
Paul E. McKenneyc30fe542019-10-11 21:40:09 -0700214static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800215{
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800216 return READ_ONCE(rnp->qsmaskinitnext);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800217}
218
219/*
Paul E. McKenney7d0ae802015-03-03 14:57:58 -0800220 * Return true if an RCU grace period is in progress. The READ_ONCE()s
Paul E. McKenneyfc2219d42009-09-23 09:50:41 -0700221 * permit this function to be invoked without holding the root rcu_node
222 * structure's ->lock, but of course results can be subject to change.
223 */
Paul E. McKenneyde8e8732018-07-03 17:22:34 -0700224static int rcu_gp_in_progress(void)
Paul E. McKenneyfc2219d42009-09-23 09:50:41 -0700225{
Paul E. McKenneyde8e8732018-07-03 17:22:34 -0700226 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
Paul E. McKenneyfc2219d42009-09-23 09:50:41 -0700227}
228
Paul E. McKenney903ee832018-10-02 16:05:46 -0700229/*
230 * Return the number of callbacks queued on the specified CPU.
231 * Handles both the nocbs and normal cases.
232 */
233static long rcu_get_n_cbs_cpu(int cpu)
234{
235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
236
Paul E. McKenneyc0352802019-05-21 08:28:41 -0700237 if (rcu_segcblist_is_enabled(&rdp->cblist))
Paul E. McKenney903ee832018-10-02 16:05:46 -0700238 return rcu_segcblist_n_cbs(&rdp->cblist);
Paul E. McKenneyc0352802019-05-21 08:28:41 -0700239 return 0;
Paul E. McKenney903ee832018-10-02 16:05:46 -0700240}
241
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700242void rcu_softirq_qs(void)
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100243{
Paul E. McKenney45975c72018-07-02 14:30:37 -0700244 rcu_qs();
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700245 rcu_preempt_deferred_qs(current);
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100246}
247
Paul E. McKenney6563de92016-11-02 13:33:57 -0700248/*
Paul E. McKenney2625d462016-11-02 14:23:30 -0700249 * Record entry into an extended quiescent state. This is only to be
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700250 * called when not already in an extended quiescent state, that is,
251 * RCU is watching prior to the call to this function and is no longer
252 * watching upon return.
Paul E. McKenney2625d462016-11-02 14:23:30 -0700253 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100254static noinstr void rcu_dynticks_eqs_enter(void)
Paul E. McKenney2625d462016-11-02 14:23:30 -0700255{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700256 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800257 int seq;
Paul E. McKenney2625d462016-11-02 14:23:30 -0700258
259 /*
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800260 * CPUs seeing atomic_add_return() must see prior RCU read-side
Paul E. McKenney2625d462016-11-02 14:23:30 -0700261 * critical sections, and we also must force ordering with the
262 * next idle sojourn.
263 */
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700264 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200265 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700266 // RCU is no longer watching. Better be in extended quiescent state!
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800267 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
268 (seq & RCU_DYNTICK_CTRL_CTR));
269 /* Better not have special action (TLB flush) pending! */
270 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
271 (seq & RCU_DYNTICK_CTRL_MASK));
Paul E. McKenney2625d462016-11-02 14:23:30 -0700272}
273
274/*
275 * Record exit from an extended quiescent state. This is only to be
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700276 * called from an extended quiescent state, that is, RCU is not watching
277 * prior to the call to this function and is watching upon return.
Paul E. McKenney2625d462016-11-02 14:23:30 -0700278 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100279static noinstr void rcu_dynticks_eqs_exit(void)
Paul E. McKenney2625d462016-11-02 14:23:30 -0700280{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700281 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800282 int seq;
Paul E. McKenney2625d462016-11-02 14:23:30 -0700283
284 /*
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800285 * CPUs seeing atomic_add_return() must see prior idle sojourns,
Paul E. McKenney2625d462016-11-02 14:23:30 -0700286 * and we also must force ordering with the next RCU read-side
287 * critical section.
288 */
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200289 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700290 // RCU is now watching. Better not be in an extended quiescent state!
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700291 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
293 !(seq & RCU_DYNTICK_CTRL_CTR));
294 if (seq & RCU_DYNTICK_CTRL_MASK) {
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200295 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800296 smp_mb__after_atomic(); /* _exit after clearing mask. */
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800297 }
Paul E. McKenney2625d462016-11-02 14:23:30 -0700298}
299
300/*
301 * Reset the current CPU's ->dynticks counter to indicate that the
302 * newly onlined CPU is no longer in an extended quiescent state.
303 * This will either leave the counter unchanged, or increment it
304 * to the next non-quiescent value.
305 *
306 * The non-atomic test/increment sequence works because the upper bits
307 * of the ->dynticks counter are manipulated only by the corresponding CPU,
308 * or when the corresponding CPU is offline.
309 */
310static void rcu_dynticks_eqs_online(void)
311{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700312 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney2625d462016-11-02 14:23:30 -0700313
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700314 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
Paul E. McKenney2625d462016-11-02 14:23:30 -0700315 return;
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700316 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
Paul E. McKenney2625d462016-11-02 14:23:30 -0700317}
318
319/*
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700320 * Is the current CPU in an extended quiescent state?
321 *
322 * No ordering, as we are sampling CPU-local information.
323 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100324static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700325{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700327
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200328 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700329}
330
331/*
Paul E. McKenney8b2f63a2016-11-02 14:12:05 -0700332 * Snapshot the ->dynticks counter with full ordering so as to allow
333 * stable comparison of this counter with past and future snapshots.
334 */
Paul E. McKenneyc30fe542019-10-11 21:40:09 -0700335static int rcu_dynticks_snap(struct rcu_data *rdp)
Paul E. McKenney8b2f63a2016-11-02 14:12:05 -0700336{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700337 int snap = atomic_add_return(0, &rdp->dynticks);
Paul E. McKenney8b2f63a2016-11-02 14:12:05 -0700338
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800339 return snap & ~RCU_DYNTICK_CTRL_MASK;
Paul E. McKenney8b2f63a2016-11-02 14:12:05 -0700340}
341
342/*
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700343 * Return true if the snapshot returned from rcu_dynticks_snap()
344 * indicates that RCU is in an extended quiescent state.
345 */
346static bool rcu_dynticks_in_eqs(int snap)
347{
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800348 return !(snap & RCU_DYNTICK_CTRL_CTR);
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700349}
350
351/*
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700352 * Return true if the CPU corresponding to the specified rcu_data
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700353 * structure has spent some time in an extended quiescent state since
354 * rcu_dynticks_snap() returned the specified snapshot.
355 */
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700356static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700357{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700358 return snap != rcu_dynticks_snap(rdp);
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700359}
360
361/*
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700362 * Return true if the referenced integer is zero while the specified
363 * CPU remains within a single extended quiescent state.
364 */
365bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
366{
367 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
368 int snap;
369
370 // If not quiescent, force back to earlier extended quiescent state.
371 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
372 RCU_DYNTICK_CTRL_CTR);
373
374 smp_rmb(); // Order ->dynticks and *vp reads.
375 if (READ_ONCE(*vp))
376 return false; // Non-zero, so report failure;
377 smp_rmb(); // Order *vp read and ->dynticks re-read.
378
379 // If still in the same extended quiescent state, we are good!
380 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
381}
382
383/*
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800384 * Set the special (bottom) bit of the specified CPU so that it
385 * will take special action (such as flushing its TLB) on the
386 * next exit from an extended quiescent state. Returns true if
387 * the bit was successfully set, or false if the CPU was not in
388 * an extended quiescent state.
389 */
390bool rcu_eqs_special_set(int cpu)
391{
392 int old;
393 int new;
Paul E. McKenneyfaa059c2020-02-03 14:20:00 -0800394 int new_old;
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700395 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800396
Paul E. McKenneyfaa059c2020-02-03 14:20:00 -0800397 new_old = atomic_read(&rdp->dynticks);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800398 do {
Paul E. McKenneyfaa059c2020-02-03 14:20:00 -0800399 old = new_old;
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800400 if (old & RCU_DYNTICK_CTRL_CTR)
401 return false;
402 new = old | RCU_DYNTICK_CTRL_MASK;
Paul E. McKenneyfaa059c2020-02-03 14:20:00 -0800403 new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
404 } while (new_old != old);
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -0800405 return true;
Paul E. McKenney6563de92016-11-02 13:33:57 -0700406}
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800407
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700408/*
409 * Let the RCU core know that this CPU has gone through the scheduler,
410 * which is a quiescent state. This is called when the need for a
411 * quiescent state is urgent, so we burn an atomic operation and full
412 * memory barriers to let the RCU core know about it, regardless of what
413 * this CPU might (or might not) do in the near future.
414 *
Paul E. McKenney0f9be8c2017-01-27 13:17:02 -0800415 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
Paul E. McKenney46a5d162015-10-07 09:10:48 -0700416 *
Paul E. McKenney3b57a392018-05-16 16:01:56 -0700417 * The caller must have disabled interrupts and must not be idle.
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700418 */
Paul E. McKenney366237e2019-07-10 08:01:01 -0700419void rcu_momentary_dyntick_idle(void)
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700420{
Paul E. McKenney3b57a392018-05-16 16:01:56 -0700421 int special;
422
Paul E. McKenney2dba13f2018-08-03 21:00:38 -0700423 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700424 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
425 &this_cpu_ptr(&rcu_data)->dynticks);
Paul E. McKenney3b57a392018-05-16 16:01:56 -0700426 /* It is illegal to call this from idle state. */
427 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
Paul E. McKenney3e310092018-06-21 12:50:01 -0700428 rcu_preempt_deferred_qs(current);
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700429}
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -0700430EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700431
Paul E. McKenney45975c72018-07-02 14:30:37 -0700432/**
Peter Zijlstra806f04e2020-05-27 19:12:36 +0200433 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
Boqun Fengbb73c522015-07-30 16:55:38 -0700434 *
Joel Fernandes (Google)eddded82019-03-26 15:24:09 -0400435 * If the current CPU is idle and running at a first-level (not nested)
Peter Zijlstra806f04e2020-05-27 19:12:36 +0200436 * interrupt, or directly, from idle, return true.
437 *
438 * The caller must have at least disabled IRQs.
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800439 */
Paul E. McKenney45975c72018-07-02 14:30:37 -0700440static int rcu_is_cpu_rrupt_from_idle(void)
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800441{
Peter Zijlstra806f04e2020-05-27 19:12:36 +0200442 long nesting;
443
444 /*
445 * Usually called from the tick; but also used from smp_function_call()
446 * for expedited grace periods. This latter can result in running from
447 * the idle task, instead of an actual IPI.
448 */
449 lockdep_assert_irqs_disabled();
Joel Fernandes (Google)eddded82019-03-26 15:24:09 -0400450
451 /* Check for counter underflows */
452 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
453 "RCU dynticks_nesting counter underflow!");
454 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
455 "RCU dynticks_nmi_nesting counter underflow/zero!");
456
457 /* Are we at first interrupt nesting level? */
Peter Zijlstra806f04e2020-05-27 19:12:36 +0200458 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
459 if (nesting > 1)
Joel Fernandes (Google)eddded82019-03-26 15:24:09 -0400460 return false;
461
Peter Zijlstra806f04e2020-05-27 19:12:36 +0200462 /*
463 * If we're not in an interrupt, we must be in the idle task!
464 */
465 WARN_ON_ONCE(!nesting && !is_idle_task(current));
466
Joel Fernandes (Google)eddded82019-03-26 15:24:09 -0400467 /* Does CPU appear to be idle from an RCU standpoint? */
468 return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800469}
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800470
Paul E. McKenneyd5a9a8c2019-04-10 17:01:39 -0700471#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
472#define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
Paul E. McKenney17c77982017-04-28 11:12:34 -0700473static long blimit = DEFAULT_RCU_BLIMIT;
474#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
475static long qhimark = DEFAULT_RCU_QHIMARK;
476#define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
477static long qlowmark = DEFAULT_RCU_QLOMARK;
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -0700478#define DEFAULT_RCU_QOVLD_MULT 2
479#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
480static long qovld = DEFAULT_RCU_QOVLD; /* If this many pending, hammer QS. */
481static long qovld_calc = -1; /* No pre-initialization lock acquisitions! */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100482
Eric Dumazet878d7432012-10-18 04:55:36 -0700483module_param(blimit, long, 0444);
484module_param(qhimark, long, 0444);
485module_param(qlowmark, long, 0444);
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -0700486module_param(qovld, long, 0444);
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700487
Paul E. McKenney026ad282013-04-03 22:14:11 -0700488static ulong jiffies_till_first_fqs = ULONG_MAX;
489static ulong jiffies_till_next_fqs = ULONG_MAX;
Paul E. McKenney8c7c4822016-01-03 20:29:57 -0800490static bool rcu_kick_kthreads;
Eric Dumazetcfcdef52019-07-24 18:07:52 -0700491static int rcu_divisor = 7;
492module_param(rcu_divisor, int, 0644);
493
494/* Force an exit from rcu_do_batch() after 3 milliseconds. */
495static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
496module_param(rcu_resched_ns, long, 0644);
Paul E. McKenneyd40011f2012-06-26 20:45:57 -0700497
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700498/*
499 * How long the grace period must be before we start recruiting
500 * quiescent-state help from rcu_note_context_switch().
501 */
502static ulong jiffies_till_sched_qs = ULONG_MAX;
503module_param(jiffies_till_sched_qs, ulong, 0444);
Paul E. McKenney85f2b602019-03-11 15:45:13 -0700504static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700505module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
506
507/*
508 * Make sure that we give the grace-period kthread time to detect any
509 * idle CPUs before taking active measures to force quiescent states.
510 * However, don't go below 100 milliseconds, adjusted upwards for really
511 * large systems.
512 */
513static void adjust_jiffies_till_sched_qs(void)
514{
515 unsigned long j;
516
517 /* If jiffies_till_sched_qs was specified, respect the request. */
518 if (jiffies_till_sched_qs != ULONG_MAX) {
519 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
520 return;
521 }
Paul E. McKenney85f2b602019-03-11 15:45:13 -0700522 /* Otherwise, set to third fqs scan, but bound below on large system. */
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700523 j = READ_ONCE(jiffies_till_first_fqs) +
524 2 * READ_ONCE(jiffies_till_next_fqs);
525 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
526 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
527 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
528 WRITE_ONCE(jiffies_to_sched_qs, j);
529}
530
Byungchul Park67abb962018-06-01 11:03:09 +0900531static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
532{
533 ulong j;
534 int ret = kstrtoul(val, 0, &j);
535
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700536 if (!ret) {
Byungchul Park67abb962018-06-01 11:03:09 +0900537 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700538 adjust_jiffies_till_sched_qs();
539 }
Byungchul Park67abb962018-06-01 11:03:09 +0900540 return ret;
541}
542
543static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
544{
545 ulong j;
546 int ret = kstrtoul(val, 0, &j);
547
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700548 if (!ret) {
Byungchul Park67abb962018-06-01 11:03:09 +0900549 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
Paul E. McKenneyc06aed02018-07-25 11:25:23 -0700550 adjust_jiffies_till_sched_qs();
551 }
Byungchul Park67abb962018-06-01 11:03:09 +0900552 return ret;
553}
554
555static struct kernel_param_ops first_fqs_jiffies_ops = {
556 .set = param_set_first_fqs_jiffies,
557 .get = param_get_ulong,
558};
559
560static struct kernel_param_ops next_fqs_jiffies_ops = {
561 .set = param_set_next_fqs_jiffies,
562 .get = param_get_ulong,
563};
564
565module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
566module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
Paul E. McKenney8c7c4822016-01-03 20:29:57 -0800567module_param(rcu_kick_kthreads, bool, 0644);
Paul E. McKenneyd40011f2012-06-26 20:45:57 -0700568
Paul E. McKenney8ff0b902018-07-05 17:55:14 -0700569static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
Paul E. McKenneydd7dafd2019-09-14 03:39:22 -0700570static int rcu_pending(int user);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100571
572/*
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700573 * Return the number of RCU GPs completed thus far for debug & stats.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100574 */
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700575unsigned long rcu_get_gp_seq(void)
Paul E. McKenney917963d2014-11-21 17:10:16 -0800576{
Paul E. McKenney16fc9c62018-07-03 15:54:39 -0700577 return READ_ONCE(rcu_state.gp_seq);
Paul E. McKenney917963d2014-11-21 17:10:16 -0800578}
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700579EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
Paul E. McKenney917963d2014-11-21 17:10:16 -0800580
581/*
Paul E. McKenney291783b2016-01-12 13:43:30 -0800582 * Return the number of RCU expedited batches completed thus far for
583 * debug & stats. Odd numbers mean that a batch is in progress, even
584 * numbers mean idle. The value returned will thus be roughly double
585 * the cumulative batches since boot.
586 */
587unsigned long rcu_exp_batches_completed(void)
588{
Paul E. McKenney16fc9c62018-07-03 15:54:39 -0700589 return rcu_state.expedited_sequence;
Paul E. McKenney291783b2016-01-12 13:43:30 -0800590}
591EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
592
593/*
Paul E. McKenneyfd897572018-12-10 16:09:49 -0800594 * Return the root node of the rcu_state structure.
595 */
596static struct rcu_node *rcu_get_root(void)
597{
598 return &rcu_state.node[0];
599}
600
601/*
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800602 * Send along grace-period-related data for rcutorture diagnostics.
603 */
604void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -0700605 unsigned long *gp_seq)
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800606{
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800607 switch (test_type) {
608 case RCU_FLAVOR:
Paul E. McKenneyf7dd7d42018-07-04 15:39:40 -0700609 *flags = READ_ONCE(rcu_state.gp_flags);
610 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800611 break;
612 default:
613 break;
614 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800615}
616EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
617
618/*
Paul E. McKenney215bba92017-10-05 16:37:03 -0700619 * Enter an RCU extended quiescent state, which can be either the
620 * idle loop or adaptive-tickless usermode execution.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100621 *
Paul E. McKenney215bba92017-10-05 16:37:03 -0700622 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
623 * the possibility of usermode upcalls having messed up our count
624 * of interrupt nesting level during the prior busy period.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100625 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100626static noinstr void rcu_eqs_enter(bool user)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100627{
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700628 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney215bba92017-10-05 16:37:03 -0700629
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700630 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
631 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
Paul E. McKenney215bba92017-10-05 16:37:03 -0700632 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700633 rdp->dynticks_nesting == 0);
634 if (rdp->dynticks_nesting != 1) {
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700635 // RCU will still be watching, so just do accounting and leave.
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700636 rdp->dynticks_nesting--;
Paul E. McKenney215bba92017-10-05 16:37:03 -0700637 return;
638 }
Paul E. McKenney96d3fd02013-10-04 14:33:34 -0700639
Frederic Weisbeckerb04db8e2017-11-06 16:01:30 +0100640 lockdep_assert_irqs_disabled();
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100641 instrumentation_begin();
Marco Elver6cf539a2019-10-09 17:57:43 +0200642 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
Paul E. McKenneye68bbb22017-10-05 19:55:31 -0700643 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -0700644 rdp = this_cpu_ptr(&rcu_data);
645 do_nocb_deferred_wakeup(rdp);
Paul E. McKenney198bbf82014-10-22 15:03:43 -0700646 rcu_prepare_for_idle();
Paul E. McKenney3e310092018-06-21 12:50:01 -0700647 rcu_preempt_deferred_qs(current);
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200648
649 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
650 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
651
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100652 instrumentation_end();
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700653 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700654 // RCU is watching here ...
Paul E. McKenney844ccdd2017-10-03 16:51:47 -0700655 rcu_dynticks_eqs_enter();
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700656 // ... but is no longer watching here.
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700657 rcu_dynticks_task_enter();
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700658}
659
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700660/**
661 * rcu_idle_enter - inform RCU that current CPU is entering idle
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100662 *
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700663 * Enter idle mode, in other words, -leave- the mode in which RCU
664 * read-side critical sections can occur. (Though RCU read-side
665 * critical sections can occur in irq handlers in idle, a possibility
666 * handled by irq_enter() and irq_exit().)
667 *
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700668 * If you add or remove a call to rcu_idle_enter(), be sure to test with
669 * CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100670 */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700671void rcu_idle_enter(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100672{
Frederic Weisbeckerb04db8e2017-11-06 16:01:30 +0100673 lockdep_assert_irqs_disabled();
Paul E. McKenneycb349ca2012-09-04 17:35:31 -0700674 rcu_eqs_enter(false);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700675}
676
Paul E. McKenneyd1ec4c32015-05-13 10:41:58 -0700677#ifdef CONFIG_NO_HZ_FULL
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700678/**
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700679 * rcu_user_enter - inform RCU that we are resuming userspace.
680 *
681 * Enter RCU idle mode right before resuming userspace. No use of RCU
682 * is permitted between this call and rcu_user_exit(). This way the
683 * CPU doesn't need to maintain the tick for RCU maintenance purposes
684 * when the CPU runs in userspace.
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700685 *
686 * If you add or remove a call to rcu_user_enter(), be sure to test with
687 * CONFIG_RCU_EQS_DEBUG=y.
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700688 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100689noinstr void rcu_user_enter(void)
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700690{
Frederic Weisbeckerb04db8e2017-11-06 16:01:30 +0100691 lockdep_assert_irqs_disabled();
Paul E. McKenneyd4db30a2017-07-12 09:03:35 -0700692 rcu_eqs_enter(true);
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700693}
Paul E. McKenneyd1ec4c32015-05-13 10:41:58 -0700694#endif /* CONFIG_NO_HZ_FULL */
Frederic Weisbecker19dd15912012-06-04 16:42:35 -0700695
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800696/**
697 * rcu_nmi_exit - inform RCU of exit from NMI context
698 *
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700699 * If we are returning from the outermost NMI handler that interrupted an
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700700 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700701 * to let the RCU grace-period handling know that the CPU is back to
702 * being RCU-idle.
703 *
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800704 * If you add or remove a call to rcu_nmi_exit(), be sure to test
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700705 * with CONFIG_RCU_EQS_DEBUG=y.
706 */
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800707noinstr void rcu_nmi_exit(void)
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700708{
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700709 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700710
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200711 instrumentation_begin();
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700712 /*
713 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
714 * (We are exiting an NMI handler, so RCU better be paying attention
715 * to us!)
716 */
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700717 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700718 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
719
720 /*
721 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
722 * leave it in non-RCU-idle state.
723 */
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700724 if (rdp->dynticks_nmi_nesting != 1) {
Marco Elver6cf539a2019-10-09 17:57:43 +0200725 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
726 atomic_read(&rdp->dynticks));
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700727 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
728 rdp->dynticks_nmi_nesting - 2);
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100729 instrumentation_end();
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700730 return;
731 }
732
733 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
Marco Elver6cf539a2019-10-09 17:57:43 +0200734 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700735 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900736
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800737 if (!in_nmi())
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900738 rcu_prepare_for_idle();
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200739
740 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
741 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100742 instrumentation_end();
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900743
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700744 // RCU is watching here ...
Paul E. McKenneyfd581a92017-10-02 21:56:20 -0700745 rcu_dynticks_eqs_enter();
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700746 // ... but is no longer watching here.
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900747
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800748 if (!in_nmi())
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900749 rcu_dynticks_task_enter();
750}
751
752/**
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700753 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
754 *
755 * Exit from an interrupt handler, which might possibly result in entering
756 * idle mode, in other words, leaving the mode in which read-side critical
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700757 * sections can occur. The caller must have disabled interrupts.
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700758 *
759 * This code assumes that the idle loop never does anything that might
760 * result in unbalanced calls to irq_enter() and irq_exit(). If your
Paul E. McKenney58721f52017-10-03 10:42:22 -0700761 * architecture's idle loop violates this assumption, RCU will give you what
762 * you deserve, good and hard. But very infrequently and irreproducibly.
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700763 *
764 * Use things like work queues to work around this limitation.
765 *
766 * You have been warned.
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700767 *
768 * If you add or remove a call to rcu_irq_exit(), be sure to test with
769 * CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700770 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100771void noinstr rcu_irq_exit(void)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700772{
Frederic Weisbeckerb04db8e2017-11-06 16:01:30 +0100773 lockdep_assert_irqs_disabled();
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800774 rcu_nmi_exit();
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700775}
776
Thomas Gleixner8ae0ae62020-05-03 15:08:52 +0200777/**
778 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
779 * towards in kernel preemption
780 *
781 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
782 * from RCU point of view. Invoked from return from interrupt before kernel
783 * preemption.
784 */
785void rcu_irq_exit_preempt(void)
786{
787 lockdep_assert_irqs_disabled();
788 rcu_nmi_exit();
789
790 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
791 "RCU dynticks_nesting counter underflow/zero!");
792 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
793 DYNTICK_IRQ_NONIDLE,
794 "Bad RCU dynticks_nmi_nesting counter\n");
795 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
796 "RCU in extended quiescent state!");
797}
798
Thomas Gleixner07325d42020-05-21 22:05:16 +0200799#ifdef CONFIG_PROVE_RCU
800/**
801 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
802 */
803void rcu_irq_exit_check_preempt(void)
804{
805 lockdep_assert_irqs_disabled();
806
807 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
808 "RCU dynticks_nesting counter underflow/zero!");
809 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
810 DYNTICK_IRQ_NONIDLE,
811 "Bad RCU dynticks_nmi_nesting counter\n");
812 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
813 "RCU in extended quiescent state!");
814}
815#endif /* #ifdef CONFIG_PROVE_RCU */
816
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700817/*
818 * Wrapper for rcu_irq_exit() where interrupts are enabled.
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700819 *
820 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
821 * with CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700822 */
823void rcu_irq_exit_irqson(void)
824{
825 unsigned long flags;
826
827 local_irq_save(flags);
828 rcu_irq_exit();
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700829 local_irq_restore(flags);
830}
831
832/*
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700833 * Exit an RCU extended quiescent state, which can be either the
834 * idle loop or adaptive-tickless usermode execution.
Paul E. McKenney51a1fd32017-10-03 14:43:40 -0700835 *
836 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
837 * allow for the possibility of usermode upcalls messing up our count of
838 * interrupt nesting level during the busy period that is just now starting.
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700839 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100840static void noinstr rcu_eqs_exit(bool user)
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700841{
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700842 struct rcu_data *rdp;
Paul E. McKenney84585aa2017-10-04 15:55:16 -0700843 long oldval;
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700844
Frederic Weisbeckerb04db8e2017-11-06 16:01:30 +0100845 lockdep_assert_irqs_disabled();
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700846 rdp = this_cpu_ptr(&rcu_data);
847 oldval = rdp->dynticks_nesting;
Paul E. McKenney1ce46ee2015-05-05 23:04:22 -0700848 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
Paul E. McKenney51a1fd32017-10-03 14:43:40 -0700849 if (oldval) {
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700850 // RCU was already watching, so just do accounting and leave.
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700851 rdp->dynticks_nesting++;
Paul E. McKenney9dd238e2017-10-05 16:56:26 -0700852 return;
Paul E. McKenney3a5924052013-10-04 18:48:55 -0700853 }
Paul E. McKenney9dd238e2017-10-05 16:56:26 -0700854 rcu_dynticks_task_exit();
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700855 // RCU is not watching here ...
Paul E. McKenney9dd238e2017-10-05 16:56:26 -0700856 rcu_dynticks_eqs_exit();
Paul E. McKenneyac3caf82020-03-12 17:01:57 -0700857 // ... but is watching here.
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100858 instrumentation_begin();
Peter Zijlstrab58e7332020-06-15 18:24:27 +0200859
860 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
861 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
862
Paul E. McKenney9dd238e2017-10-05 16:56:26 -0700863 rcu_cleanup_after_idle();
Marco Elver6cf539a2019-10-09 17:57:43 +0200864 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
Paul E. McKenneye68bbb22017-10-05 19:55:31 -0700865 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700866 WRITE_ONCE(rdp->dynticks_nesting, 1);
867 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
868 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100869 instrumentation_end();
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700870}
871
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700872/**
873 * rcu_idle_exit - inform RCU that current CPU is leaving idle
874 *
875 * Exit idle mode, in other words, -enter- the mode in which RCU
876 * read-side critical sections can occur.
877 *
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700878 * If you add or remove a call to rcu_idle_exit(), be sure to test with
879 * CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700880 */
881void rcu_idle_exit(void)
882{
Frederic Weisbeckerc5d900b2012-07-11 20:26:31 +0200883 unsigned long flags;
884
885 local_irq_save(flags);
Paul E. McKenneycb349ca2012-09-04 17:35:31 -0700886 rcu_eqs_exit(false);
Frederic Weisbeckerc5d900b2012-07-11 20:26:31 +0200887 local_irq_restore(flags);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700888}
889
Paul E. McKenneyd1ec4c32015-05-13 10:41:58 -0700890#ifdef CONFIG_NO_HZ_FULL
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700891/**
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700892 * rcu_user_exit - inform RCU that we are exiting userspace.
893 *
894 * Exit RCU idle mode while entering the kernel because it can
895 * run a RCU read side critical section anytime.
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700896 *
897 * If you add or remove a call to rcu_user_exit(), be sure to test with
898 * CONFIG_RCU_EQS_DEBUG=y.
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700899 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +0100900void noinstr rcu_user_exit(void)
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700901{
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100902 rcu_eqs_exit(1);
Frederic Weisbeckeradf50912012-06-28 11:20:21 -0700903}
Paul E. McKenneyaaf2bc52020-05-21 22:05:15 +0200904
905/**
906 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
907 *
908 * The scheduler tick is not normally enabled when CPUs enter the kernel
909 * from nohz_full userspace execution. After all, nohz_full userspace
910 * execution is an RCU quiescent state and the time executing in the kernel
911 * is quite short. Except of course when it isn't. And it is not hard to
912 * cause a large system to spend tens of seconds or even minutes looping
913 * in the kernel, which can cause a number of problems, include RCU CPU
914 * stall warnings.
915 *
916 * Therefore, if a nohz_full CPU fails to report a quiescent state
917 * in a timely manner, the RCU grace-period kthread sets that CPU's
918 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
919 * exception will invoke this function, which will turn on the scheduler
920 * tick, which will enable RCU to detect that CPU's quiescent states,
921 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
922 * The tick will be disabled once a quiescent state is reported for
923 * this CPU.
924 *
925 * Of course, in carefully tuned systems, there might never be an
926 * interrupt or exception. In that case, the RCU grace-period kthread
927 * will eventually cause one to happen. However, in less carefully
928 * controlled environments, this function allows RCU to get what it
929 * needs without creating otherwise useless interruptions.
930 */
931void __rcu_irq_enter_check_tick(void)
932{
933 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
934
935 // Enabling the tick is unsafe in NMI handlers.
936 if (WARN_ON_ONCE(in_nmi()))
937 return;
938
939 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
940 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
941
942 if (!tick_nohz_full_cpu(rdp->cpu) ||
943 !READ_ONCE(rdp->rcu_urgent_qs) ||
944 READ_ONCE(rdp->rcu_forced_tick)) {
945 // RCU doesn't need nohz_full help from this CPU, or it is
946 // already getting that help.
947 return;
948 }
949
950 // We get here only when not in an extended quiescent state and
951 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
952 // already watching and (2) The fact that we are in an interrupt
953 // handler and that the rcu_node lock is an irq-disabled lock
954 // prevents self-deadlock. So we can safely recheck under the lock.
955 // Note that the nohz_full state currently cannot change.
956 raw_spin_lock_rcu_node(rdp->mynode);
957 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
958 // A nohz_full CPU is in the kernel and RCU needs a
959 // quiescent state. Turn on the tick!
960 WRITE_ONCE(rdp->rcu_forced_tick, true);
961 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
962 }
963 raw_spin_unlock_rcu_node(rdp->mynode);
964}
Paul E. McKenneyd1ec4c32015-05-13 10:41:58 -0700965#endif /* CONFIG_NO_HZ_FULL */
Frederic Weisbecker19dd15912012-06-04 16:42:35 -0700966
967/**
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800968 * rcu_nmi_enter - inform RCU of entry to NMI context
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100969 *
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700970 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700971 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
Paul E. McKenney734d1682014-11-21 14:45:12 -0800972 * that the CPU is active. This implementation permits nested NMIs, as
973 * long as the nesting level does not overflow an int. (You will probably
974 * run out of stack space first.)
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700975 *
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800976 * If you add or remove a call to rcu_nmi_enter(), be sure to test
Paul E. McKenneyc0da3132017-09-22 09:58:47 -0700977 * with CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100978 */
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800979noinstr void rcu_nmi_enter(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100980{
Paul E. McKenney84585aa2017-10-04 15:55:16 -0700981 long incby = 2;
Paul E. McKenney5b145572019-11-26 18:05:45 -0800982 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100983
Paul E. McKenney734d1682014-11-21 14:45:12 -0800984 /* Complain about underflow. */
Paul E. McKenney4c5273b2018-08-03 21:00:38 -0700985 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
Paul E. McKenney734d1682014-11-21 14:45:12 -0800986
987 /*
988 * If idle from RCU viewpoint, atomically increment ->dynticks
989 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
990 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
991 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
992 * to be in the outermost NMI handler that interrupted an RCU-idle
993 * period (observation due to Andy Lutomirski).
994 */
Paul E. McKenney02a5c5502016-11-02 17:25:06 -0700995 if (rcu_dynticks_curr_cpu_in_eqs()) {
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900996
Paul E. McKenney9ea366f2020-02-13 12:31:16 -0800997 if (!in_nmi())
Byungchul Parkcf7614e2018-06-22 15:12:06 +0900998 rcu_dynticks_task_exit();
999
Paul E. McKenneyac3caf82020-03-12 17:01:57 -07001000 // RCU is not watching here ...
Paul E. McKenney2625d462016-11-02 14:23:30 -07001001 rcu_dynticks_eqs_exit();
Paul E. McKenneyac3caf82020-03-12 17:01:57 -07001002 // ... but is watching here.
Byungchul Parkcf7614e2018-06-22 15:12:06 +09001003
Paul E. McKenney04b25a42020-05-19 17:00:54 -07001004 if (!in_nmi()) {
1005 instrumentation_begin();
Byungchul Parkcf7614e2018-06-22 15:12:06 +09001006 rcu_cleanup_after_idle();
Paul E. McKenney04b25a42020-05-19 17:00:54 -07001007 instrumentation_end();
1008 }
Byungchul Parkcf7614e2018-06-22 15:12:06 +09001009
Peter Zijlstrab58e7332020-06-15 18:24:27 +02001010 instrumentation_begin();
1011 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1012 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1013 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1014 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1015
Paul E. McKenney734d1682014-11-21 14:45:12 -08001016 incby = 1;
Paul E. McKenney9ea366f2020-02-13 12:31:16 -08001017 } else if (!in_nmi()) {
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001018 instrumentation_begin();
Paul E. McKenneyaaf2bc52020-05-21 22:05:15 +02001019 rcu_irq_enter_check_tick();
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001020 instrumentation_end();
Peter Zijlstrab58e7332020-06-15 18:24:27 +02001021 } else {
1022 instrumentation_begin();
Paul E. McKenney734d1682014-11-21 14:45:12 -08001023 }
Peter Zijlstrab58e7332020-06-15 18:24:27 +02001024
Paul E. McKenneybd2b8792017-10-04 12:29:01 -07001025 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
Paul E. McKenney4c5273b2018-08-03 21:00:38 -07001026 rdp->dynticks_nmi_nesting,
Marco Elver6cf539a2019-10-09 17:57:43 +02001027 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001028 instrumentation_end();
Paul E. McKenney4c5273b2018-08-03 21:00:38 -07001029 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1030 rdp->dynticks_nmi_nesting + incby);
Paul E. McKenney734d1682014-11-21 14:45:12 -08001031 barrier();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001032}
1033
1034/**
Paul E. McKenney34240692011-10-03 11:38:52 -07001035 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001036 *
Paul E. McKenney34240692011-10-03 11:38:52 -07001037 * Enter an interrupt handler, which might possibly result in exiting
1038 * idle mode, in other words, entering the mode in which read-side critical
1039 * sections can occur. The caller must have disabled interrupts.
Paul E. McKenneyc0da3132017-09-22 09:58:47 -07001040 *
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -07001041 * Note that the Linux kernel is fully capable of entering an interrupt
Paul E. McKenney58721f52017-10-03 10:42:22 -07001042 * handler that it never exits, for example when doing upcalls to user mode!
1043 * This code assumes that the idle loop never does upcalls to user mode.
1044 * If your architecture's idle loop does do upcalls to user mode (or does
1045 * anything else that results in unbalanced calls to the irq_enter() and
1046 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1047 * But very infrequently and irreproducibly.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001048 *
1049 * Use things like work queues to work around this limitation.
1050 *
1051 * You have been warned.
1052 *
1053 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1054 * CONFIG_RCU_EQS_DEBUG=y.
Paul E. McKenney23b5c8f2010-09-07 10:38:22 -07001055 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001056noinstr void rcu_irq_enter(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001057{
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001058 lockdep_assert_irqs_disabled();
Paul E. McKenney9ea366f2020-02-13 12:31:16 -08001059 rcu_nmi_enter();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001060}
Paul E. McKenney734d1682014-11-21 14:45:12 -08001061
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001062/*
1063 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1064 *
1065 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1066 * with CONFIG_RCU_EQS_DEBUG=y.
1067 */
1068void rcu_irq_enter_irqson(void)
1069{
1070 unsigned long flags;
Paul E. McKenney734d1682014-11-21 14:45:12 -08001071
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001072 local_irq_save(flags);
1073 rcu_irq_enter();
1074 local_irq_restore(flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001075}
1076
Paul E. McKenney66e4c332019-08-12 16:14:00 -07001077/*
Joel Fernandes (Google)516e5ae2019-09-05 10:26:41 -07001078 * If any sort of urgency was applied to the current CPU (for example,
1079 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1080 * to get to a quiescent state, disable it.
Paul E. McKenney66e4c332019-08-12 16:14:00 -07001081 */
Joel Fernandes (Google)516e5ae2019-09-05 10:26:41 -07001082static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
Paul E. McKenney66e4c332019-08-12 16:14:00 -07001083{
Paul E. McKenney5b145572019-11-26 18:05:45 -08001084 raw_lockdep_assert_held_rcu_node(rdp->mynode);
Joel Fernandes (Google)516e5ae2019-09-05 10:26:41 -07001085 WRITE_ONCE(rdp->rcu_urgent_qs, false);
1086 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
Paul E. McKenney66e4c332019-08-12 16:14:00 -07001087 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1088 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
Paul E. McKenney2a2ae872020-01-08 20:06:25 -08001089 WRITE_ONCE(rdp->rcu_forced_tick, false);
Paul E. McKenney66e4c332019-08-12 16:14:00 -07001090 }
1091}
1092
Thomas Gleixnerb1fcf9b2020-05-12 09:44:43 +02001093noinstr bool __rcu_is_watching(void)
1094{
1095 return !rcu_dynticks_curr_cpu_in_eqs();
1096}
1097
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001098/**
Zhouyi Zhou2320bda2018-10-08 06:50:41 +00001099 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001100 *
Paul E. McKenney791875d2017-05-03 11:06:05 -07001101 * Return true if RCU is watching the running CPU, which means that this
1102 * CPU can safely enter RCU read-side critical sections. In other words,
Zhouyi Zhou2320bda2018-10-08 06:50:41 +00001103 * if the current CPU is not in its idle loop or is in an interrupt or
1104 * NMI handler, return true.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001105 */
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001106bool rcu_is_watching(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001107{
Pranith Kumarf534ed12014-07-08 18:26:11 -04001108 bool ret;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -07001109
Alexei Starovoitov46f00d12015-06-16 10:35:18 -07001110 preempt_disable_notrace();
Paul E. McKenney791875d2017-05-03 11:06:05 -07001111 ret = !rcu_dynticks_curr_cpu_in_eqs();
Alexei Starovoitov46f00d12015-06-16 10:35:18 -07001112 preempt_enable_notrace();
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -07001113 return ret;
1114}
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07001115EXPORT_SYMBOL_GPL(rcu_is_watching);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -07001116
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -07001117/*
1118 * If a holdout task is actually running, request an urgent quiescent
1119 * state from its CPU. This is unsynchronized, so migrations can cause
1120 * the request to go to the wrong CPU. Which is OK, all that will happen
1121 * is that the CPU's next context switch will be a bit slower and next
1122 * time around this task will generate another request.
1123 */
1124void rcu_request_urgent_qs_task(struct task_struct *t)
1125{
1126 int cpu;
1127
1128 barrier();
1129 cpu = task_cpu(t);
1130 if (!task_curr(t))
1131 return; /* This task is not running on that CPU. */
Paul E. McKenney2dba13f2018-08-03 21:00:38 -07001132 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -07001133}
1134
Paul E. McKenney62fde6e2012-05-22 22:10:24 -07001135#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -08001136
1137/*
Paul E. McKenney55547882018-05-15 10:14:34 -07001138 * Is the current CPU online as far as RCU is concerned?
Paul E. McKenney2036d942012-01-30 17:02:47 -08001139 *
Paul E. McKenney55547882018-05-15 10:14:34 -07001140 * Disable preemption to avoid false positives that could otherwise
1141 * happen due to the current CPU number being sampled, this task being
1142 * preempted, its old CPU being taken offline, resuming on some other CPU,
Paul E. McKenney49918a52018-07-07 18:12:26 -07001143 * then determining that its old CPU is now offline.
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -08001144 *
Paul E. McKenney55547882018-05-15 10:14:34 -07001145 * Disable checking if in an NMI handler because we cannot safely
1146 * report errors from NMI handlers anyway. In addition, it is OK to use
1147 * RCU on an offline processor during initial boot, hence the check for
1148 * rcu_scheduler_fully_active.
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -08001149 */
1150bool rcu_lockdep_current_cpu_online(void)
1151{
Paul E. McKenney2036d942012-01-30 17:02:47 -08001152 struct rcu_data *rdp;
1153 struct rcu_node *rnp;
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07001154 bool ret = false;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -08001155
Paul E. McKenney55547882018-05-15 10:14:34 -07001156 if (in_nmi() || !rcu_scheduler_fully_active)
Fengguang Wuf6f7ee92013-10-10 11:08:33 -07001157 return true;
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001158 preempt_disable_notrace();
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07001159 rdp = this_cpu_ptr(&rcu_data);
1160 rnp = rdp->mynode;
1161 if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
1162 ret = true;
Thomas Gleixnerff5c4f52020-03-13 17:32:17 +01001163 preempt_enable_notrace();
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07001164 return ret;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -08001165}
1166EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1167
Paul E. McKenney62fde6e2012-05-22 22:10:24 -07001168#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -07001169
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001170/*
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001171 * We are reporting a quiescent state on behalf of some other CPU, so
1172 * it is our responsibility to check for and handle potential overflow
Paul E. McKenneya66ae8a2018-04-27 18:06:08 -07001173 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001174 * After all, the CPU might be in deep idle state, and thus executing no
1175 * code whatsoever.
1176 */
1177static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1178{
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08001179 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenneya66ae8a2018-04-27 18:06:08 -07001180 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1181 rnp->gp_seq))
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001182 WRITE_ONCE(rdp->gpwrap, true);
Paul E. McKenney8aa670c2018-04-28 14:15:40 -07001183 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1184 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001185}
1186
1187/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001188 * Snapshot the specified CPU's dynticks counter so that we can later
1189 * credit them with an implicit quiescent state. Return 1 if this CPU
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001190 * is in dynticks idle mode, which is an extended quiescent state.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001191 */
Paul E. McKenneyfe5ac722017-05-11 11:26:22 -07001192static int dyntick_save_progress_counter(struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001193{
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -07001194 rdp->dynticks_snap = rcu_dynticks_snap(rdp);
Paul E. McKenney02a5c5502016-11-02 17:25:06 -07001195 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
Paul E. McKenney88d1bea2018-07-04 14:45:00 -07001196 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001197 rcu_gpnum_ovf(rdp->mynode, rdp);
Paul E. McKenney23a9bac2015-12-13 08:57:10 -08001198 return 1;
Andreea-Cristina Bernat7941dbd2014-03-17 18:33:28 +02001199 }
Paul E. McKenney23a9bac2015-12-13 08:57:10 -08001200 return 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001201}
1202
1203/*
1204 * Return true if the specified CPU has passed through a quiescent
1205 * state by virtue of being in or having passed through an dynticks
1206 * idle state since the last call to dyntick_save_progress_counter()
Paul E. McKenneya82dcc72012-08-01 14:29:20 -07001207 * for this same CPU, or by virtue of having been offline.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001208 */
Paul E. McKenneyfe5ac722017-05-11 11:26:22 -07001209static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001210{
Paul E. McKenney3a19b462016-11-30 11:21:21 -08001211 unsigned long jtsq;
Paul E. McKenney0f9be8c2017-01-27 13:17:02 -08001212 bool *rnhqp;
Paul E. McKenney9226b102017-01-27 14:17:50 -08001213 bool *ruqp;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001214 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001215
1216 /*
1217 * If the CPU passed through or entered a dynticks idle phase with
1218 * no active irq/NMI handlers, then we can safely pretend that the CPU
1219 * already acknowledged the request to pass through a quiescent
1220 * state. Either way, that CPU cannot possibly be in an RCU
1221 * read-side critical section that started before the beginning
1222 * of the current RCU grace period.
1223 */
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -07001224 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
Paul E. McKenney88d1bea2018-07-04 14:45:00 -07001225 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001226 rcu_gpnum_ovf(rnp, rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001227 return 1;
1228 }
1229
Paul E. McKenneyf2e2df52018-05-15 16:23:23 -07001230 /* If waiting too long on an offline CPU, complain. */
1231 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
Paul E. McKenney88d1bea2018-07-04 14:45:00 -07001232 time_after(jiffies, rcu_state.gp_start + HZ)) {
Paul E. McKenneyf2e2df52018-05-15 16:23:23 -07001233 bool onl;
1234 struct rcu_node *rnp1;
1235
1236 WARN_ON(1); /* Offline CPUs are supposed to report QS! */
1237 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1238 __func__, rnp->grplo, rnp->grphi, rnp->level,
1239 (long)rnp->gp_seq, (long)rnp->completedqs);
1240 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1241 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1242 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1243 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1244 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1245 __func__, rdp->cpu, ".o"[onl],
1246 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1247 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1248 return 1; /* Break things loose after complaining. */
1249 }
1250
Paul E. McKenney65d798f2013-04-12 16:19:10 -07001251 /*
Paul E. McKenney4a81e832014-06-20 16:49:01 -07001252 * A CPU running for an extended time within the kernel can
Paul E. McKenneyc06aed02018-07-25 11:25:23 -07001253 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1254 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
Paul E. McKenney7e28c5a2018-07-11 08:09:28 -07001255 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1256 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1257 * variable are safe because the assignments are repeated if this
1258 * CPU failed to pass through a quiescent state. This code
Paul E. McKenneyc06aed02018-07-25 11:25:23 -07001259 * also checks .jiffies_resched in case jiffies_to_sched_qs
Paul E. McKenney7e28c5a2018-07-11 08:09:28 -07001260 * is set way high.
Paul E. McKenney65d798f2013-04-12 16:19:10 -07001261 */
Paul E. McKenneyc06aed02018-07-25 11:25:23 -07001262 jtsq = READ_ONCE(jiffies_to_sched_qs);
Paul E. McKenney2dba13f2018-08-03 21:00:38 -07001263 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1264 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
Paul E. McKenney0f9be8c2017-01-27 13:17:02 -08001265 if (!READ_ONCE(*rnhqp) &&
Paul E. McKenney7e28c5a2018-07-11 08:09:28 -07001266 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07001267 time_after(jiffies, rcu_state.jiffies_resched) ||
1268 rcu_state.cbovld)) {
Paul E. McKenney0f9be8c2017-01-27 13:17:02 -08001269 WRITE_ONCE(*rnhqp, true);
Paul E. McKenney9226b102017-01-27 14:17:50 -08001270 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1271 smp_store_release(ruqp, true);
Paul E. McKenney7e28c5a2018-07-11 08:09:28 -07001272 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1273 WRITE_ONCE(*ruqp, true);
Paul E. McKenney6193c762013-09-23 13:57:18 -07001274 }
1275
Paul E. McKenney28053bc2016-12-01 11:31:31 -08001276 /*
Paul E. McKenneyc98cac62018-11-21 11:35:03 -08001277 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
Paul E. McKenneyd3052102018-07-25 11:49:47 -07001278 * The above code handles this, but only for straight cond_resched().
1279 * And some in-kernel loops check need_resched() before calling
1280 * cond_resched(), which defeats the above code for CPUs that are
1281 * running in-kernel with scheduling-clock interrupts disabled.
1282 * So hit them over the head with the resched_cpu() hammer!
Paul E. McKenney28053bc2016-12-01 11:31:31 -08001283 */
Paul E. McKenneyd3052102018-07-25 11:49:47 -07001284 if (tick_nohz_full_cpu(rdp->cpu) &&
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07001285 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1286 rcu_state.cbovld)) {
Joel Fernandes (Google)05ef9e92019-08-15 22:59:14 -04001287 WRITE_ONCE(*ruqp, true);
Paul E. McKenney28053bc2016-12-01 11:31:31 -08001288 resched_cpu(rdp->cpu);
Paul E. McKenneyd3052102018-07-25 11:49:47 -07001289 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1290 }
1291
1292 /*
1293 * If more than halfway to RCU CPU stall-warning time, invoke
1294 * resched_cpu() more frequently to try to loosen things up a bit.
1295 * Also check to see if the CPU is getting hammered with interrupts,
1296 * but only once per grace period, just to keep the IPIs down to
1297 * a dull roar.
Paul E. McKenney49149502015-12-11 13:48:43 -08001298 */
Paul E. McKenney7e28c5a2018-07-11 08:09:28 -07001299 if (time_after(jiffies, rcu_state.jiffies_resched)) {
Paul E. McKenneyd3052102018-07-25 11:49:47 -07001300 if (time_after(jiffies,
1301 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1302 resched_cpu(rdp->cpu);
1303 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1304 }
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001305 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
Paul E. McKenney8aa670c2018-04-28 14:15:40 -07001306 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001307 (rnp->ffmask & rdp->grpmask)) {
1308 init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
Sebastian Andrzej Siewior49915ac2020-03-21 12:26:03 +01001309 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001310 rdp->rcu_iw_pending = true;
Paul E. McKenney8aa670c2018-04-28 14:15:40 -07001311 rdp->rcu_iw_gp_seq = rnp->gp_seq;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07001312 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1313 }
1314 }
Paul E. McKenney49149502015-12-11 13:48:43 -08001315
Paul E. McKenneya82dcc72012-08-01 14:29:20 -07001316 return 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001317}
1318
Paul E. McKenney41e80592018-04-12 11:24:09 -07001319/* Trace-event wrapper function for trace_rcu_future_grace_period. */
1320static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
Joel Fernandesb73de912018-05-20 21:42:18 -07001321 unsigned long gp_seq_req, const char *s)
Paul E. McKenney0446be42012-12-30 15:21:01 -08001322{
Paul E. McKenney0937d042020-01-03 14:53:31 -08001323 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1324 gp_seq_req, rnp->level,
1325 rnp->grplo, rnp->grphi, s);
Paul E. McKenney0446be42012-12-30 15:21:01 -08001326}
1327
1328/*
Joel Fernandesb73de912018-05-20 21:42:18 -07001329 * rcu_start_this_gp - Request the start of a particular grace period
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001330 * @rnp_start: The leaf node of the CPU from which to start.
Joel Fernandesb73de912018-05-20 21:42:18 -07001331 * @rdp: The rcu_data corresponding to the CPU from which to start.
1332 * @gp_seq_req: The gp_seq of the grace period to start.
1333 *
Paul E. McKenney41e80592018-04-12 11:24:09 -07001334 * Start the specified grace period, as needed to handle newly arrived
Paul E. McKenney0446be42012-12-30 15:21:01 -08001335 * callbacks. The required future grace periods are recorded in each
Paul E. McKenney7a1d0f22018-05-01 10:26:57 -07001336 * rcu_node structure's ->gp_seq_needed field. Returns true if there
Paul E. McKenney48a76392014-03-11 13:02:16 -07001337 * is reason to awaken the grace-period kthread.
Paul E. McKenney0446be42012-12-30 15:21:01 -08001338 *
Paul E. McKenneyd5cd9682018-04-12 10:45:06 -07001339 * The caller must hold the specified rcu_node structure's ->lock, which
1340 * is why the caller is responsible for waking the grace-period kthread.
Joel Fernandesb73de912018-05-20 21:42:18 -07001341 *
1342 * Returns true if the GP thread needs to be awakened else false.
Paul E. McKenney0446be42012-12-30 15:21:01 -08001343 */
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001344static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
Joel Fernandesb73de912018-05-20 21:42:18 -07001345 unsigned long gp_seq_req)
Paul E. McKenney0446be42012-12-30 15:21:01 -08001346{
Paul E. McKenney48a76392014-03-11 13:02:16 -07001347 bool ret = false;
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001348 struct rcu_node *rnp;
Paul E. McKenney0446be42012-12-30 15:21:01 -08001349
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001350 /*
1351 * Use funnel locking to either acquire the root rcu_node
1352 * structure's lock or bail out if the need for this grace period
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001353 * has already been recorded -- or if that grace period has in
1354 * fact already started. If there is already a grace period in
1355 * progress in a non-leaf node, no recording is needed because the
1356 * end of the grace period will scan the leaf rcu_node structures.
1357 * Note that rnp_start->lock must not be released.
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001358 */
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001359 raw_lockdep_assert_held_rcu_node(rnp_start);
1360 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1361 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1362 if (rnp != rnp_start)
1363 raw_spin_lock_rcu_node(rnp);
1364 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1365 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1366 (rnp != rnp_start &&
1367 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1368 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
Joel Fernandesb73de912018-05-20 21:42:18 -07001369 TPS("Prestarted"));
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001370 goto unlock_out;
1371 }
Paul E. McKenney8ff372902020-01-04 11:33:17 -08001372 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
Joel Fernandes (Google)226ca5e2018-05-22 23:38:15 -07001373 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
Paul E. McKenneya2165e42018-05-12 07:42:20 -07001374 /*
Joel Fernandes (Google)226ca5e2018-05-22 23:38:15 -07001375 * We just marked the leaf or internal node, and a
1376 * grace period is in progress, which means that
1377 * rcu_gp_cleanup() will see the marking. Bail to
1378 * reduce contention.
Paul E. McKenneya2165e42018-05-12 07:42:20 -07001379 */
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001380 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
Joel Fernandesb73de912018-05-20 21:42:18 -07001381 TPS("Startedleaf"));
Paul E. McKenneya2165e42018-05-12 07:42:20 -07001382 goto unlock_out;
1383 }
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001384 if (rnp != rnp_start && rnp->parent != NULL)
1385 raw_spin_unlock_rcu_node(rnp);
1386 if (!rnp->parent)
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001387 break; /* At root, and perhaps also leaf. */
Paul E. McKenney0446be42012-12-30 15:21:01 -08001388 }
1389
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001390 /* If GP already in progress, just leave, otherwise start one. */
Paul E. McKenneyde8e8732018-07-03 17:22:34 -07001391 if (rcu_gp_in_progress()) {
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001392 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
Paul E. McKenney0446be42012-12-30 15:21:01 -08001393 goto unlock_out;
1394 }
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001395 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001396 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
Paul E. McKenney2906d212020-01-03 15:17:12 -08001397 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
Paul E. McKenney5648d652020-01-21 12:30:22 -08001398 if (!READ_ONCE(rcu_state.gp_kthread)) {
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001399 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001400 goto unlock_out;
Paul E. McKenney0446be42012-12-30 15:21:01 -08001401 }
Paul E. McKenney62ae1952020-03-21 19:52:20 -07001402 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
Paul E. McKenney360e0da2018-04-12 11:50:41 -07001403 ret = true; /* Caller must wake GP kthread. */
Paul E. McKenney0446be42012-12-30 15:21:01 -08001404unlock_out:
Paul E. McKenneyab5e8692018-05-01 11:07:23 -07001405 /* Push furthest requested GP to leaf node and rcu_data structure. */
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001406 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
Paul E. McKenney8ff372902020-01-04 11:33:17 -08001407 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1408 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
Paul E. McKenneyab5e8692018-05-01 11:07:23 -07001409 }
Joel Fernandes (Google)df2bf8f2018-05-22 23:38:14 -07001410 if (rnp != rnp_start)
1411 raw_spin_unlock_rcu_node(rnp);
Paul E. McKenney48a76392014-03-11 13:02:16 -07001412 return ret;
Paul E. McKenney0446be42012-12-30 15:21:01 -08001413}
1414
1415/*
1416 * Clean up any old requests for the just-ended grace period. Also return
Paul E. McKenneyd1e4f012017-02-08 14:58:41 -08001417 * whether any additional grace periods have been requested.
Paul E. McKenney0446be42012-12-30 15:21:01 -08001418 */
Paul E. McKenney3481f2e2018-07-03 17:22:34 -07001419static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
Paul E. McKenney0446be42012-12-30 15:21:01 -08001420{
Paul E. McKenneyfb313402018-04-12 07:20:30 -07001421 bool needmore;
Paul E. McKenneyda1df502018-07-03 15:37:16 -07001422 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney0446be42012-12-30 15:21:01 -08001423
Paul E. McKenney7a1d0f22018-05-01 10:26:57 -07001424 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1425 if (!needmore)
1426 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
Joel Fernandesb73de912018-05-20 21:42:18 -07001427 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
Paul E. McKenney41e80592018-04-12 11:24:09 -07001428 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
Paul E. McKenney0446be42012-12-30 15:21:01 -08001429 return needmore;
1430}
1431
1432/*
Paul E. McKenney5648d652020-01-21 12:30:22 -08001433 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1434 * interrupt or softirq handler, in which case we just might immediately
1435 * sleep upon return, resulting in a grace-period hang), and don't bother
1436 * awakening when there is nothing for the grace-period kthread to do
1437 * (as in several CPUs raced to awaken, we lost), and finally don't try
1438 * to awaken a kthread that has not yet been created. If all those checks
1439 * are passed, track some debug information and awaken.
Zhang, Jun1d1f8982018-12-18 06:55:01 -08001440 *
1441 * So why do the self-wakeup when in an interrupt or softirq handler
1442 * in the grace-period kthread's context? Because the kthread might have
1443 * been interrupted just as it was going to sleep, and just after the final
1444 * pre-sleep check of the awaken condition. In this case, a wakeup really
1445 * is required, and is therefore supplied.
Paul E. McKenney48a76392014-03-11 13:02:16 -07001446 */
Paul E. McKenney532c00c2018-07-03 17:22:34 -07001447static void rcu_gp_kthread_wake(void)
Paul E. McKenney48a76392014-03-11 13:02:16 -07001448{
Paul E. McKenney5648d652020-01-21 12:30:22 -08001449 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1450
1451 if ((current == t && !in_irq() && !in_serving_softirq()) ||
1452 !READ_ONCE(rcu_state.gp_flags) || !t)
Paul E. McKenney48a76392014-03-11 13:02:16 -07001453 return;
Paul E. McKenneyfd897572018-12-10 16:09:49 -08001454 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1455 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
Paul E. McKenney532c00c2018-07-03 17:22:34 -07001456 swake_up_one(&rcu_state.gp_wq);
Paul E. McKenney48a76392014-03-11 13:02:16 -07001457}
1458
1459/*
Paul E. McKenney29365e52018-04-30 10:57:36 -07001460 * If there is room, assign a ->gp_seq number to any callbacks on this
1461 * CPU that have not already been assigned. Also accelerate any callbacks
1462 * that were previously assigned a ->gp_seq number that has since proven
1463 * to be too conservative, which can happen if callbacks get assigned a
1464 * ->gp_seq number while RCU is idle, but with reference to a non-root
1465 * rcu_node structure. This function is idempotent, so it does not hurt
1466 * to call it repeatedly. Returns an flag saying that we should awaken
1467 * the RCU grace-period kthread.
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001468 *
1469 * The caller must hold rnp->lock with interrupts disabled.
1470 */
Paul E. McKenney02f50142018-07-03 17:22:34 -07001471static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001472{
Joel Fernandesb73de912018-05-20 21:42:18 -07001473 unsigned long gp_seq_req;
Paul E. McKenney15fecf82017-02-08 12:36:42 -08001474 bool ret = false;
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001475
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07001476 rcu_lockdep_assert_cblist_protected(rdp);
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08001477 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenneyc0b334c2017-04-28 12:32:15 -07001478
Paul E. McKenney15fecf82017-02-08 12:36:42 -08001479 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1480 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
Paul E. McKenney48a76392014-03-11 13:02:16 -07001481 return false;
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001482
1483 /*
Paul E. McKenney15fecf82017-02-08 12:36:42 -08001484 * Callbacks are often registered with incomplete grace-period
1485 * information. Something about the fact that getting exact
1486 * information requires acquiring a global lock... RCU therefore
1487 * makes a conservative estimate of the grace period number at which
1488 * a given callback will become ready to invoke. The following
1489 * code checks this estimate and improves it when possible, thus
1490 * accelerating callback invocation to an earlier grace-period
1491 * number.
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001492 */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001493 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
Joel Fernandesb73de912018-05-20 21:42:18 -07001494 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1495 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
Paul E. McKenney6d4b4182012-11-27 16:55:44 -08001496
1497 /* Trace depending on how much we were able to accelerate. */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08001498 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001499 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
Paul E. McKenney6d4b4182012-11-27 16:55:44 -08001500 else
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001501 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
Paul E. McKenney48a76392014-03-11 13:02:16 -07001502 return ret;
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001503}
1504
1505/*
Paul E. McKenneye44e73c2018-05-01 16:29:47 -07001506 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1507 * rcu_node structure's ->lock be held. It consults the cached value
1508 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1509 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1510 * while holding the leaf rcu_node structure's ->lock.
1511 */
Paul E. McKenneyc6e09b92018-07-03 17:22:34 -07001512static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
Paul E. McKenneye44e73c2018-05-01 16:29:47 -07001513 struct rcu_data *rdp)
1514{
1515 unsigned long c;
1516 bool needwake;
1517
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07001518 rcu_lockdep_assert_cblist_protected(rdp);
Paul E. McKenneyc6e09b92018-07-03 17:22:34 -07001519 c = rcu_seq_snap(&rcu_state.gp_seq);
Paul E. McKenneya5b89502020-01-07 15:48:39 -08001520 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
Paul E. McKenneye44e73c2018-05-01 16:29:47 -07001521 /* Old request still live, so mark recent callbacks. */
1522 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1523 return;
1524 }
1525 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
Paul E. McKenney02f50142018-07-03 17:22:34 -07001526 needwake = rcu_accelerate_cbs(rnp, rdp);
Paul E. McKenneye44e73c2018-05-01 16:29:47 -07001527 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1528 if (needwake)
Paul E. McKenney532c00c2018-07-03 17:22:34 -07001529 rcu_gp_kthread_wake();
Paul E. McKenneye44e73c2018-05-01 16:29:47 -07001530}
1531
1532/*
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001533 * Move any callbacks whose grace period has completed to the
1534 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
Paul E. McKenney29365e52018-04-30 10:57:36 -07001535 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001536 * sublist. This function is idempotent, so it does not hurt to
1537 * invoke it repeatedly. As long as it is not invoked -too- often...
Paul E. McKenney48a76392014-03-11 13:02:16 -07001538 * Returns true if the RCU grace-period kthread needs to be awakened.
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001539 *
1540 * The caller must hold rnp->lock with interrupts disabled.
1541 */
Paul E. McKenney834f56b2018-07-03 17:22:34 -07001542static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001543{
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07001544 rcu_lockdep_assert_cblist_protected(rdp);
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08001545 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenneyc0b334c2017-04-28 12:32:15 -07001546
Paul E. McKenney15fecf82017-02-08 12:36:42 -08001547 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1548 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
Paul E. McKenney48a76392014-03-11 13:02:16 -07001549 return false;
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001550
1551 /*
Paul E. McKenney29365e52018-04-30 10:57:36 -07001552 * Find all callbacks whose ->gp_seq numbers indicate that they
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001553 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1554 */
Paul E. McKenney29365e52018-04-30 10:57:36 -07001555 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001556
1557 /* Classify any remaining callbacks. */
Paul E. McKenney02f50142018-07-03 17:22:34 -07001558 return rcu_accelerate_cbs(rnp, rdp);
Paul E. McKenneydc35c892012-12-03 13:52:00 -08001559}
1560
1561/*
Paul E. McKenney7f36ef82019-05-28 05:54:26 -07001562 * Move and classify callbacks, but only if doing so won't require
1563 * that the RCU grace-period kthread be awakened.
1564 */
1565static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1566 struct rcu_data *rdp)
1567{
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07001568 rcu_lockdep_assert_cblist_protected(rdp);
Paul E. McKenney6608c3a2019-06-01 06:16:38 -07001569 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1570 !raw_spin_trylock_rcu_node(rnp))
Paul E. McKenney7f36ef82019-05-28 05:54:26 -07001571 return;
1572 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
Paul E. McKenney6608c3a2019-06-01 06:16:38 -07001573 raw_spin_unlock_rcu_node(rnp);
Paul E. McKenney7f36ef82019-05-28 05:54:26 -07001574}
1575
1576/*
Paul E. McKenneyba9fbe92013-03-19 11:53:31 -07001577 * Update CPU-local rcu_data state to record the beginnings and ends of
1578 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1579 * structure corresponding to the current CPU, and must have irqs disabled.
Paul E. McKenney48a76392014-03-11 13:02:16 -07001580 * Returns true if the grace-period kthread needs to be awakened.
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -08001581 */
Paul E. McKenneyc7e48f72018-07-03 17:22:34 -07001582static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -08001583{
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07001584 bool ret = false;
Paul E. McKenneyb5ea0372019-12-09 15:19:45 -08001585 bool need_qs;
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07001586 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1587 rcu_segcblist_is_offloaded(&rdp->cblist);
Paul E. McKenney48a76392014-03-11 13:02:16 -07001588
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08001589 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenneyc0b334c2017-04-28 12:32:15 -07001590
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001591 if (rdp->gp_seq == rnp->gp_seq)
1592 return false; /* Nothing to do. */
1593
Paul E. McKenneyba9fbe92013-03-19 11:53:31 -07001594 /* Handle the ends of any preceding grace periods first. */
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001595 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1596 unlikely(READ_ONCE(rdp->gpwrap))) {
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07001597 if (!offloaded)
1598 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
Paul E. McKenneyb5ea0372019-12-09 15:19:45 -08001599 rdp->core_needs_qs = false;
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001600 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001601 } else {
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07001602 if (!offloaded)
1603 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
Paul E. McKenneyb5ea0372019-12-09 15:19:45 -08001604 if (rdp->core_needs_qs)
1605 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -08001606 }
Paul E. McKenney398ebe62013-03-19 10:53:14 -07001607
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001608 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1609 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1610 unlikely(READ_ONCE(rdp->gpwrap))) {
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001611 /*
1612 * If the current grace period is waiting for this CPU,
1613 * set up to detect a quiescent state, otherwise don't
1614 * go looking for one.
1615 */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001616 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
Paul E. McKenneyb5ea0372019-12-09 15:19:45 -08001617 need_qs = !!(rnp->qsmask & rdp->grpmask);
1618 rdp->cpu_no_qs.b.norm = need_qs;
1619 rdp->core_needs_qs = need_qs;
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001620 zero_cpu_stall_ticks(rdp);
1621 }
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001622 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
Zhang, Jun13dc7d02018-12-19 10:37:34 -08001623 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
Paul E. McKenney8ff372902020-01-04 11:33:17 -08001624 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
Paul E. McKenney3d184692018-05-15 16:47:30 -07001625 WRITE_ONCE(rdp->gpwrap, false);
1626 rcu_gpnum_ovf(rnp, rdp);
Paul E. McKenney48a76392014-03-11 13:02:16 -07001627 return ret;
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001628}
1629
Paul E. McKenney15cabdf2018-07-03 17:22:34 -07001630static void note_gp_changes(struct rcu_data *rdp)
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001631{
1632 unsigned long flags;
Paul E. McKenney48a76392014-03-11 13:02:16 -07001633 bool needwake;
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001634 struct rcu_node *rnp;
1635
1636 local_irq_save(flags);
1637 rnp = rdp->mynode;
Paul E. McKenney67e14c12018-04-27 16:01:46 -07001638 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
Paul E. McKenney7d0ae802015-03-03 14:57:58 -08001639 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001640 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001641 local_irq_restore(flags);
1642 return;
1643 }
Paul E. McKenneyc7e48f72018-07-03 17:22:34 -07001644 needwake = __note_gp_changes(rnp, rdp);
Boqun Feng67c583a72015-12-29 12:18:47 +08001645 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney48a76392014-03-11 13:02:16 -07001646 if (needwake)
Paul E. McKenney532c00c2018-07-03 17:22:34 -07001647 rcu_gp_kthread_wake();
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001648}
1649
Paul E. McKenney22212332018-07-03 17:22:34 -07001650static void rcu_gp_slow(int delay)
Paul E. McKenney0f41c0d2015-03-10 18:33:20 -07001651{
1652 if (delay > 0 &&
Paul E. McKenney22212332018-07-03 17:22:34 -07001653 !(rcu_seq_ctr(rcu_state.gp_seq) %
Paul E. McKenneydee4f422018-04-26 15:30:28 -07001654 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
Paul E. McKenney77865de2020-05-07 15:44:46 -07001655 schedule_timeout_idle(delay);
Paul E. McKenney0f41c0d2015-03-10 18:33:20 -07001656}
1657
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001658static unsigned long sleep_duration;
1659
1660/* Allow rcutorture to stall the grace-period kthread. */
1661void rcu_gp_set_torture_wait(int duration)
1662{
1663 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1664 WRITE_ONCE(sleep_duration, duration);
1665}
1666EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1667
1668/* Actually implement the aforementioned wait. */
1669static void rcu_gp_torture_wait(void)
1670{
1671 unsigned long duration;
1672
1673 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1674 return;
1675 duration = xchg(&sleep_duration, 0UL);
1676 if (duration > 0) {
1677 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
Paul E. McKenney77865de2020-05-07 15:44:46 -07001678 schedule_timeout_idle(duration);
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001679 pr_alert("%s: Wait complete\n", __func__);
1680 }
1681}
1682
Paul E. McKenney6eaef632013-03-19 10:08:37 -07001683/*
Paul E. McKenney45fed3e2015-11-07 23:35:00 -08001684 * Initialize a new grace period. Return false if no grace period required.
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001685 */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07001686static bool rcu_gp_init(void)
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001687{
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001688 unsigned long flags;
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001689 unsigned long oldmask;
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001690 unsigned long mask;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001691 struct rcu_data *rdp;
Paul E. McKenney336a4f62018-07-03 17:22:34 -07001692 struct rcu_node *rnp = rcu_get_root();
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001693
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001694 WRITE_ONCE(rcu_state.gp_activity, jiffies);
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001695 raw_spin_lock_irq_rcu_node(rnp);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001696 if (!READ_ONCE(rcu_state.gp_flags)) {
Paul E. McKenneyf7be8202013-08-08 18:27:52 -07001697 /* Spurious wakeup, tell caller to go back to sleep. */
Boqun Feng67c583a72015-12-29 12:18:47 +08001698 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney45fed3e2015-11-07 23:35:00 -08001699 return false;
Paul E. McKenneyf7be8202013-08-08 18:27:52 -07001700 }
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001701 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001702
Paul E. McKenneyde8e8732018-07-03 17:22:34 -07001703 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
Paul E. McKenneyf7be8202013-08-08 18:27:52 -07001704 /*
1705 * Grace period already in progress, don't start another.
1706 * Not supposed to be able to happen.
1707 */
Boqun Feng67c583a72015-12-29 12:18:47 +08001708 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney45fed3e2015-11-07 23:35:00 -08001709 return false;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001710 }
1711
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001712 /* Advance to a new grace period and initialize state. */
Paul E. McKenneyad3832e2018-07-03 17:22:34 -07001713 record_gp_stall_check_time();
Paul E. McKenneyff3bb6f2018-05-01 14:34:08 -07001714 /* Record GP times before starting GP, hence rcu_seq_start(). */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001715 rcu_seq_start(&rcu_state.gp_seq);
Paul E. McKenney62ae1952020-03-21 19:52:20 -07001716 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001717 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
Boqun Feng67c583a72015-12-29 12:18:47 +08001718 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001719
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001720 /*
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001721 * Apply per-leaf buffered online and offline operations to the
1722 * rcu_node tree. Note that this new grace period need not wait
1723 * for subsequent online CPUs, and that quiescent-state forcing
1724 * will handle subsequent offline CPUs.
1725 */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001726 rcu_state.gp_state = RCU_GP_ONOFF;
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07001727 rcu_for_each_leaf_node(rnp) {
Mike Galbraith894d45b2018-08-15 09:05:29 -07001728 raw_spin_lock(&rcu_state.ofl_lock);
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001729 raw_spin_lock_irq_rcu_node(rnp);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001730 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1731 !rnp->wait_blkd_tasks) {
1732 /* Nothing to do on this leaf rcu_node structure. */
Boqun Feng67c583a72015-12-29 12:18:47 +08001733 raw_spin_unlock_irq_rcu_node(rnp);
Mike Galbraith894d45b2018-08-15 09:05:29 -07001734 raw_spin_unlock(&rcu_state.ofl_lock);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001735 continue;
1736 }
1737
1738 /* Record old state, apply changes to ->qsmaskinit field. */
1739 oldmask = rnp->qsmaskinit;
1740 rnp->qsmaskinit = rnp->qsmaskinitnext;
1741
1742 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1743 if (!oldmask != !rnp->qsmaskinit) {
Paul E. McKenney962aff02018-05-02 12:49:21 -07001744 if (!oldmask) { /* First online CPU for rcu_node. */
1745 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1746 rcu_init_new_rnp(rnp);
1747 } else if (rcu_preempt_has_tasks(rnp)) {
1748 rnp->wait_blkd_tasks = true; /* blocked tasks */
1749 } else { /* Last offline CPU and can propagate. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001750 rcu_cleanup_dead_rnp(rnp);
Paul E. McKenney962aff02018-05-02 12:49:21 -07001751 }
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001752 }
1753
1754 /*
1755 * If all waited-on tasks from prior grace period are
1756 * done, and if all this rcu_node structure's CPUs are
1757 * still offline, propagate up the rcu_node tree and
1758 * clear ->wait_blkd_tasks. Otherwise, if one of this
1759 * rcu_node structure's CPUs has since come back online,
Paul E. McKenney962aff02018-05-02 12:49:21 -07001760 * simply clear ->wait_blkd_tasks.
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001761 */
1762 if (rnp->wait_blkd_tasks &&
Paul E. McKenney962aff02018-05-02 12:49:21 -07001763 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001764 rnp->wait_blkd_tasks = false;
Paul E. McKenney962aff02018-05-02 12:49:21 -07001765 if (!rnp->qsmaskinit)
1766 rcu_cleanup_dead_rnp(rnp);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001767 }
1768
Boqun Feng67c583a72015-12-29 12:18:47 +08001769 raw_spin_unlock_irq_rcu_node(rnp);
Mike Galbraith894d45b2018-08-15 09:05:29 -07001770 raw_spin_unlock(&rcu_state.ofl_lock);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08001771 }
Paul E. McKenney22212332018-07-03 17:22:34 -07001772 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001773
1774 /*
1775 * Set the quiescent-state-needed bits in all the rcu_node
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001776 * structures for all currently online CPUs in breadth-first
1777 * order, starting from the root rcu_node structure, relying on the
1778 * layout of the tree within the rcu_state.node[] array. Note that
1779 * other CPUs will access only the leaves of the hierarchy, thus
1780 * seeing that no grace period is in progress, at least until the
1781 * corresponding leaf node has been initialized.
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001782 *
1783 * The grace period cannot complete until the initialization
1784 * process finishes, because this kthread handles both.
1785 */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001786 rcu_state.gp_state = RCU_GP_INIT;
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07001787 rcu_for_each_node_breadth_first(rnp) {
Paul E. McKenney22212332018-07-03 17:22:34 -07001788 rcu_gp_slow(gp_init_delay);
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001789 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenneyda1df502018-07-03 15:37:16 -07001790 rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney81ab59a2018-07-03 17:22:34 -07001791 rcu_preempt_check_blocked_tasks(rnp);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001792 rnp->qsmask = rnp->qsmaskinit;
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001793 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001794 if (rnp == rdp->mynode)
Paul E. McKenneyc7e48f72018-07-03 17:22:34 -07001795 (void)__note_gp_changes(rnp, rdp);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001796 rcu_preempt_boost_start_gp(rnp);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001797 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001798 rnp->level, rnp->grplo,
1799 rnp->grphi, rnp->qsmask);
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001800 /* Quiescent states for tasks on any now-offline CPUs. */
1801 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
Paul E. McKenneyf2e2df52018-05-15 16:23:23 -07001802 rnp->rcu_gp_init_mask = mask;
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001803 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
Paul E. McKenneyb50912d2018-07-03 17:22:34 -07001804 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07001805 else
1806 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenneycee43932018-03-02 16:35:27 -08001807 cond_resched_tasks_rcu_qs();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001808 WRITE_ONCE(rcu_state.gp_activity, jiffies);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001809 }
1810
Paul E. McKenney45fed3e2015-11-07 23:35:00 -08001811 return true;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001812}
1813
1814/*
Peter Zijlstrab3dae102018-06-12 10:34:52 +02001815 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
Luis R. Rodriguezd5374222017-06-20 14:45:47 -07001816 * time.
Paul E. McKenneyb9a425c2015-07-01 13:50:28 -07001817 */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07001818static bool rcu_gp_fqs_check_wake(int *gfp)
Paul E. McKenneyb9a425c2015-07-01 13:50:28 -07001819{
Paul E. McKenney336a4f62018-07-03 17:22:34 -07001820 struct rcu_node *rnp = rcu_get_root();
Paul E. McKenneyb9a425c2015-07-01 13:50:28 -07001821
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001822 // If under overload conditions, force an immediate FQS scan.
1823 if (*gfp & RCU_GP_FLAG_OVLD)
1824 return true;
1825
1826 // Someone like call_rcu() requested a force-quiescent-state scan.
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07001827 *gfp = READ_ONCE(rcu_state.gp_flags);
Paul E. McKenneyb9a425c2015-07-01 13:50:28 -07001828 if (*gfp & RCU_GP_FLAG_FQS)
1829 return true;
1830
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001831 // The current grace period has completed.
Paul E. McKenneyb9a425c2015-07-01 13:50:28 -07001832 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1833 return true;
1834
1835 return false;
1836}
1837
1838/*
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001839 * Do one round of quiescent-state forcing.
1840 */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07001841static void rcu_gp_fqs(bool first_time)
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001842{
Paul E. McKenney336a4f62018-07-03 17:22:34 -07001843 struct rcu_node *rnp = rcu_get_root();
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001844
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001845 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1846 rcu_state.n_force_qs++;
Petr Mladek77f81fe2015-09-09 12:09:49 -07001847 if (first_time) {
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001848 /* Collect dyntick-idle snapshots. */
Paul E. McKenneye9ecb782018-07-03 17:22:34 -07001849 force_qs_rnp(dyntick_save_progress_counter);
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001850 } else {
1851 /* Handle dyntick-idle and offline CPUs. */
Paul E. McKenneye9ecb782018-07-03 17:22:34 -07001852 force_qs_rnp(rcu_implicit_dynticks_qs);
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001853 }
1854 /* Clear flag to prevent immediate re-entry. */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001855 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001856 raw_spin_lock_irq_rcu_node(rnp);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001857 WRITE_ONCE(rcu_state.gp_flags,
1858 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
Boqun Feng67c583a72015-12-29 12:18:47 +08001859 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001860 }
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07001861}
1862
1863/*
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001864 * Loop doing repeated quiescent-state forcing until the grace period ends.
1865 */
1866static void rcu_gp_fqs_loop(void)
1867{
1868 bool first_gp_fqs;
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001869 int gf = 0;
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001870 unsigned long j;
1871 int ret;
1872 struct rcu_node *rnp = rcu_get_root();
1873
1874 first_gp_fqs = true;
Paul E. McKenneyc06aed02018-07-25 11:25:23 -07001875 j = READ_ONCE(jiffies_till_first_fqs);
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001876 if (rcu_state.cbovld)
1877 gf = RCU_GP_FLAG_OVLD;
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001878 ret = 0;
1879 for (;;) {
1880 if (!ret) {
1881 rcu_state.jiffies_force_qs = jiffies + j;
1882 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
Paul E. McKenney9cf422a2018-11-20 10:43:34 -08001883 jiffies + (j ? 3 * j : 2));
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001884 }
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08001885 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001886 TPS("fqswait"));
1887 rcu_state.gp_state = RCU_GP_WAIT_FQS;
1888 ret = swait_event_idle_timeout_exclusive(
1889 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001890 rcu_gp_torture_wait();
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001891 rcu_state.gp_state = RCU_GP_DOING_FQS;
1892 /* Locking provides needed memory barriers. */
1893 /* If grace period done, leave loop. */
1894 if (!READ_ONCE(rnp->qsmask) &&
1895 !rcu_preempt_blocked_readers_cgp(rnp))
1896 break;
1897 /* If time for quiescent-state forcing, do it. */
Paul E. McKenney29ffebc2020-04-10 14:48:20 -07001898 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001899 (gf & RCU_GP_FLAG_FQS)) {
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08001900 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001901 TPS("fqsstart"));
1902 rcu_gp_fqs(first_gp_fqs);
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001903 gf = 0;
1904 if (first_gp_fqs) {
1905 first_gp_fqs = false;
1906 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1907 }
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08001908 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001909 TPS("fqsend"));
1910 cond_resched_tasks_rcu_qs();
1911 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1912 ret = 0; /* Force full wait till next FQS. */
Paul E. McKenneyc06aed02018-07-25 11:25:23 -07001913 j = READ_ONCE(jiffies_till_next_fqs);
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001914 } else {
1915 /* Deal with stray signal. */
1916 cond_resched_tasks_rcu_qs();
1917 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1918 WARN_ON(signal_pending(current));
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08001919 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001920 TPS("fqswaitsig"));
1921 ret = 1; /* Keep old FQS timing. */
1922 j = jiffies;
1923 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1924 j = 1;
1925 else
1926 j = rcu_state.jiffies_force_qs - j;
Paul E. McKenney1fca4d12020-02-22 20:07:09 -08001927 gf = 0;
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07001928 }
1929 }
1930}
1931
1932/*
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001933 * Clean up after the old grace period.
1934 */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07001935static void rcu_gp_cleanup(void)
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001936{
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07001937 int cpu;
Paul E. McKenney48a76392014-03-11 13:02:16 -07001938 bool needgp = false;
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07001939 unsigned long gp_duration;
Paul E. McKenneyde30ad52018-04-26 11:52:09 -07001940 unsigned long new_gp_seq;
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07001941 bool offloaded;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001942 struct rcu_data *rdp;
Paul E. McKenney336a4f62018-07-03 17:22:34 -07001943 struct rcu_node *rnp = rcu_get_root();
Paul Gortmakerabedf8e2016-02-19 09:46:41 +01001944 struct swait_queue_head *sq;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001945
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001946 WRITE_ONCE(rcu_state.gp_activity, jiffies);
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001947 raw_spin_lock_irq_rcu_node(rnp);
Paul E. McKenneyc51d7b52018-10-03 17:25:33 -07001948 rcu_state.gp_end = jiffies;
1949 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001950 if (gp_duration > rcu_state.gp_max)
1951 rcu_state.gp_max = gp_duration;
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001952
1953 /*
1954 * We know the grace period is complete, but to everyone else
1955 * it appears to still be ongoing. But it is also the case
1956 * that to everyone else it looks like there is nothing that
1957 * they can do to advance the grace period. It is therefore
1958 * safe for us to drop the lock in order to mark the grace
1959 * period as completed in all of the rcu_node structures.
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001960 */
Boqun Feng67c583a72015-12-29 12:18:47 +08001961 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001962
Paul E. McKenney5d4b8652012-07-07 07:56:57 -07001963 /*
Paul E. McKenneyff3bb6f2018-05-01 14:34:08 -07001964 * Propagate new ->gp_seq value to rcu_node structures so that
1965 * other CPUs don't have to wait until the start of the next grace
1966 * period to process their callbacks. This also avoids some nasty
1967 * RCU grace-period initialization races by forcing the end of
1968 * the current grace period to be completely recorded in all of
1969 * the rcu_node structures before the beginning of the next grace
1970 * period is recorded in any of the rcu_node structures.
Paul E. McKenney5d4b8652012-07-07 07:56:57 -07001971 */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001972 new_gp_seq = rcu_state.gp_seq;
Paul E. McKenneyde30ad52018-04-26 11:52:09 -07001973 rcu_seq_end(&new_gp_seq);
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07001974 rcu_for_each_node_breadth_first(rnp) {
Peter Zijlstra2a67e742015-10-08 12:24:23 +02001975 raw_spin_lock_irq_rcu_node(rnp);
Paul E. McKenney4bc8d552017-11-27 15:13:56 -08001976 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
Paul E. McKenney81ab59a2018-07-03 17:22:34 -07001977 dump_blkd_tasks(rnp, 10);
Paul E. McKenney5c60d252015-02-09 05:37:47 -08001978 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyde30ad52018-04-26 11:52:09 -07001979 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
Paul E. McKenneyda1df502018-07-03 15:37:16 -07001980 rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenneyb11cc572012-12-17 14:21:14 -08001981 if (rnp == rdp->mynode)
Paul E. McKenneyc7e48f72018-07-03 17:22:34 -07001982 needgp = __note_gp_changes(rnp, rdp) || needgp;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001983 /* smp_mb() provided by prior unlock-lock pair. */
Paul E. McKenney3481f2e2018-07-03 17:22:34 -07001984 needgp = rcu_future_gp_cleanup(rnp) || needgp;
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07001985 // Reset overload indication for CPUs no longer overloaded
1986 if (rcu_is_leaf_node(rnp))
1987 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1988 rdp = per_cpu_ptr(&rcu_data, cpu);
1989 check_cb_ovld_locked(rdp, rnp);
1990 }
Daniel Wagner065bb782016-02-19 09:46:40 +01001991 sq = rcu_nocb_gp_get(rnp);
Boqun Feng67c583a72015-12-29 12:18:47 +08001992 raw_spin_unlock_irq_rcu_node(rnp);
Daniel Wagner065bb782016-02-19 09:46:40 +01001993 rcu_nocb_gp_cleanup(sq);
Paul E. McKenneycee43932018-03-02 16:35:27 -08001994 cond_resched_tasks_rcu_qs();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001995 WRITE_ONCE(rcu_state.gp_activity, jiffies);
Paul E. McKenney22212332018-07-03 17:22:34 -07001996 rcu_gp_slow(gp_cleanup_delay);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07001997 }
Paul E. McKenney336a4f62018-07-03 17:22:34 -07001998 rnp = rcu_get_root();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07001999 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07002000
Paul E. McKenney0a89e5a2018-10-15 10:00:58 -07002001 /* Declare grace period done, trace first to use old GP number. */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002002 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
Paul E. McKenney0a89e5a2018-10-15 10:00:58 -07002003 rcu_seq_end(&rcu_state.gp_seq);
Paul E. McKenney62ae1952020-03-21 19:52:20 -07002004 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002005 rcu_state.gp_state = RCU_GP_IDLE;
Paul E. McKenneyfb313402018-04-12 07:20:30 -07002006 /* Check for GP requests since above loop. */
Paul E. McKenneyda1df502018-07-03 15:37:16 -07002007 rdp = this_cpu_ptr(&rcu_data);
Joel Fernandes (Google)5b550722018-05-13 20:15:40 -07002008 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
Paul E. McKenneyabd13fd2018-05-01 13:08:46 -07002009 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
Paul E. McKenney41e80592018-04-12 11:24:09 -07002010 TPS("CleanupMore"));
Paul E. McKenneyfb313402018-04-12 07:20:30 -07002011 needgp = true;
2012 }
Paul E. McKenney48a76392014-03-11 13:02:16 -07002013 /* Advance CBs to reduce false positives below. */
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002014 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2015 rcu_segcblist_is_offloaded(&rdp->cblist);
2016 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002017 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
Paul E. McKenney2906d212020-01-03 15:17:12 -08002018 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002019 trace_rcu_grace_period(rcu_state.name,
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08002020 rcu_state.gp_seq,
Paul E. McKenneybb311ec2013-08-09 16:02:09 -07002021 TPS("newreq"));
Paul E. McKenney18390ae2018-04-22 15:06:05 -07002022 } else {
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002023 WRITE_ONCE(rcu_state.gp_flags,
2024 rcu_state.gp_flags & RCU_GP_FLAG_INIT);
Paul E. McKenneybb311ec2013-08-09 16:02:09 -07002025 }
Boqun Feng67c583a72015-12-29 12:18:47 +08002026 raw_spin_unlock_irq_rcu_node(rnp);
Paul E. McKenney7fdefc12012-06-22 11:08:41 -07002027}
2028
2029/*
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002030 * Body of kthread that handles grace periods.
2031 */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07002032static int __noreturn rcu_gp_kthread(void *unused)
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002033{
Paul E. McKenney58719682015-02-24 11:05:36 -08002034 rcu_bind_gp_kthread();
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002035 for (;;) {
2036
2037 /* Handle grace-period start. */
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002038 for (;;) {
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08002039 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenney63c4db72013-08-09 12:19:29 -07002040 TPS("reqwait"));
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002041 rcu_state.gp_state = RCU_GP_WAIT_GPS;
2042 swait_event_idle_exclusive(rcu_state.gp_wq,
2043 READ_ONCE(rcu_state.gp_flags) &
2044 RCU_GP_FLAG_INIT);
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07002045 rcu_gp_torture_wait();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002046 rcu_state.gp_state = RCU_GP_DONE_GPS;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002047 /* Locking provides needed memory barrier. */
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07002048 if (rcu_gp_init())
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002049 break;
Paul E. McKenneycee43932018-03-02 16:35:27 -08002050 cond_resched_tasks_rcu_qs();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002051 WRITE_ONCE(rcu_state.gp_activity, jiffies);
Paul E. McKenney73a860c2014-08-14 10:28:23 -07002052 WARN_ON(signal_pending(current));
Paul E. McKenney0f11ad32020-02-10 09:58:37 -08002053 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
Paul E. McKenney63c4db72013-08-09 12:19:29 -07002054 TPS("reqwaitsig"));
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002055 }
Paul E. McKenneycabc49c2012-06-20 17:07:14 -07002056
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07002057 /* Handle quiescent-state forcing. */
Paul E. McKenneyc3854a02018-07-05 18:23:23 -07002058 rcu_gp_fqs_loop();
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07002059
2060 /* Handle grace-period end. */
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002061 rcu_state.gp_state = RCU_GP_CLEANUP;
Paul E. McKenney0854a05c2018-07-03 17:22:34 -07002062 rcu_gp_cleanup();
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002063 rcu_state.gp_state = RCU_GP_CLEANED;
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002064 }
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07002065}
2066
2067/*
Paul E. McKenney49918a52018-07-07 18:12:26 -07002068 * Report a full set of quiescent states to the rcu_state data structure.
2069 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2070 * another grace period is required. Whether we wake the grace-period
2071 * kthread or it awakens itself for the next round of quiescent-state
2072 * forcing, that kthread will clean up after the just-completed grace
2073 * period. Note that the caller must hold rnp->lock, which is released
2074 * before return.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07002075 */
Paul E. McKenneyaff4e9e2018-07-03 17:22:34 -07002076static void rcu_report_qs_rsp(unsigned long flags)
Paul E. McKenney336a4f62018-07-03 17:22:34 -07002077 __releases(rcu_get_root()->lock)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07002078{
Paul E. McKenney336a4f62018-07-03 17:22:34 -07002079 raw_lockdep_assert_held_rcu_node(rcu_get_root());
Paul E. McKenneyde8e8732018-07-03 17:22:34 -07002080 WARN_ON_ONCE(!rcu_gp_in_progress());
Paul E. McKenney9cbc5b92018-07-05 15:47:01 -07002081 WRITE_ONCE(rcu_state.gp_flags,
2082 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
Paul E. McKenney336a4f62018-07-03 17:22:34 -07002083 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
Paul E. McKenney532c00c2018-07-03 17:22:34 -07002084 rcu_gp_kthread_wake();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07002085}
2086
2087/*
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08002088 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2089 * Allows quiescent states for a group of CPUs to be reported at one go
2090 * to the specified rcu_node structure, though all the CPUs in the group
Paul E. McKenney654e9532015-03-15 09:19:35 -07002091 * must be represented by the same rcu_node structure (which need not be a
2092 * leaf rcu_node structure, though it often will be). The gps parameter
2093 * is the grace-period snapshot, which means that the quiescent states
Paul E. McKenneyc9a24e22018-04-27 14:54:46 -07002094 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
Paul E. McKenney654e9532015-03-15 09:19:35 -07002095 * must be held upon entry, and it is released before return.
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07002096 *
2097 * As a special case, if mask is zero, the bit-already-cleared check is
2098 * disabled. This allows propagating quiescent state due to resumed tasks
2099 * during grace-period initialization.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002100 */
Paul E. McKenneyb50912d2018-07-03 17:22:34 -07002101static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2102 unsigned long gps, unsigned long flags)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002103 __releases(rnp->lock)
2104{
Paul E. McKenney654e9532015-03-15 09:19:35 -07002105 unsigned long oldmask = 0;
Paul E. McKenney28ecd582009-09-18 09:50:17 -07002106 struct rcu_node *rnp_c;
2107
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08002108 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenneyc0b334c2017-04-28 12:32:15 -07002109
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002110 /* Walk up the rcu_node hierarchy. */
2111 for (;;) {
Paul E. McKenneyec2c2972018-05-07 09:34:17 -07002112 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002113
Paul E. McKenney654e9532015-03-15 09:19:35 -07002114 /*
2115 * Our bit has already been cleared, or the
2116 * relevant grace period is already over, so done.
2117 */
Boqun Feng67c583a72015-12-29 12:18:47 +08002118 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002119 return;
2120 }
Paul E. McKenney654e9532015-03-15 09:19:35 -07002121 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
Paul E. McKenney5b4c11d2018-04-13 17:11:44 -07002122 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
Paul E. McKenney2dee9402017-07-11 21:52:31 -07002123 rcu_preempt_blocked_readers_cgp(rnp));
Paul E. McKenney7672d642020-01-03 11:38:51 -08002124 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
Paul E. McKenney67a0edb2018-07-05 16:15:38 -07002125 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -07002126 mask, rnp->qsmask, rnp->level,
2127 rnp->grplo, rnp->grphi,
2128 !!rnp->gp_tasks);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08002129 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002130
2131 /* Other bits still set at this level, so done. */
Boqun Feng67c583a72015-12-29 12:18:47 +08002132 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002133 return;
2134 }
Paul E. McKenneyd43a5d32018-04-28 18:50:06 -07002135 rnp->completedqs = rnp->gp_seq;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002136 mask = rnp->grpmask;
2137 if (rnp->parent == NULL) {
2138
2139 /* No more levels. Exit loop holding root lock. */
2140
2141 break;
2142 }
Boqun Feng67c583a72015-12-29 12:18:47 +08002143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney28ecd582009-09-18 09:50:17 -07002144 rnp_c = rnp;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002145 rnp = rnp->parent;
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002146 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney0937d042020-01-03 14:53:31 -08002147 oldmask = READ_ONCE(rnp_c->qsmask);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002148 }
2149
2150 /*
2151 * Get here if we are the last CPU to pass through a quiescent
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08002152 * state for this grace period. Invoke rcu_report_qs_rsp()
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07002153 * to clean up and start the next grace period if one is needed.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002154 */
Paul E. McKenneyaff4e9e2018-07-03 17:22:34 -07002155 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002156}
2157
2158/*
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002159 * Record a quiescent state for all tasks that were previously queued
2160 * on the specified rcu_node structure and that were blocking the current
Paul E. McKenney49918a52018-07-07 18:12:26 -07002161 * RCU grace period. The caller must hold the corresponding rnp->lock with
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002162 * irqs disabled, and this lock is released upon return, but irqs remain
2163 * disabled.
2164 */
Paul E. McKenney17a82122018-05-03 14:30:02 -07002165static void __maybe_unused
Paul E. McKenney139ad4d2018-07-03 17:22:34 -07002166rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002167 __releases(rnp->lock)
2168{
Paul E. McKenney654e9532015-03-15 09:19:35 -07002169 unsigned long gps;
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002170 unsigned long mask;
2171 struct rcu_node *rnp_p;
2172
Matthew Wilcoxa32e01e2018-01-17 06:24:30 -08002173 raw_lockdep_assert_held_rcu_node(rnp);
Lai Jiangshanc130d2d2019-10-15 10:28:48 +00002174 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
Paul E. McKenneyc74859d2018-04-27 14:05:27 -07002175 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2176 rnp->qsmask != 0) {
Boqun Feng67c583a72015-12-29 12:18:47 +08002177 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002178 return; /* Still need more quiescent states! */
2179 }
2180
Paul E. McKenney77cfc7b2018-05-01 15:00:10 -07002181 rnp->completedqs = rnp->gp_seq;
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002182 rnp_p = rnp->parent;
2183 if (rnp_p == NULL) {
2184 /*
Paul E. McKenneya77da142015-03-08 14:52:27 -07002185 * Only one rcu_node structure in the tree, so don't
2186 * try to report up to its nonexistent parent!
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002187 */
Paul E. McKenneyaff4e9e2018-07-03 17:22:34 -07002188 rcu_report_qs_rsp(flags);
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002189 return;
2190 }
2191
Paul E. McKenneyc9a24e22018-04-27 14:54:46 -07002192 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2193 gps = rnp->gp_seq;
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002194 mask = rnp->grpmask;
Boqun Feng67c583a72015-12-29 12:18:47 +08002195 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002196 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
Paul E. McKenneyb50912d2018-07-03 17:22:34 -07002197 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
Paul E. McKenneycc99a312015-02-23 08:59:29 -08002198}
2199
2200/*
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08002201 * Record a quiescent state for the specified CPU to that CPU's rcu_data
Paul E. McKenney4b455dc2016-01-27 22:44:45 -08002202 * structure. This must be called from the specified CPU.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002203 */
2204static void
Paul E. McKenney33085c42018-07-03 17:22:34 -07002205rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002206{
2207 unsigned long flags;
2208 unsigned long mask;
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002209 bool needwake = false;
2210 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2211 rcu_segcblist_is_offloaded(&rdp->cblist);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002212 struct rcu_node *rnp;
2213
2214 rnp = rdp->mynode;
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002215 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenneyc9a24e22018-04-27 14:54:46 -07002216 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2217 rdp->gpwrap) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002218
2219 /*
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -07002220 * The grace period in which this quiescent state was
2221 * recorded has ended, so don't report it upwards.
2222 * We will instead need a new quiescent state that lies
2223 * within the current grace period.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002224 */
Paul E. McKenney5b74c452015-08-06 15:16:57 -07002225 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
Boqun Feng67c583a72015-12-29 12:18:47 +08002226 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002227 return;
2228 }
2229 mask = rdp->grpmask;
Paul E. McKenneyb5ea0372019-12-09 15:19:45 -08002230 if (rdp->cpu == smp_processor_id())
2231 rdp->core_needs_qs = false;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002232 if ((rnp->qsmask & mask) == 0) {
Boqun Feng67c583a72015-12-29 12:18:47 +08002233 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002234 } else {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002235 /*
2236 * This GP can't end until cpu checks in, so all of our
2237 * callbacks can be processed during the next GP.
2238 */
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002239 if (!offloaded)
2240 needwake = rcu_accelerate_cbs(rnp, rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002241
Joel Fernandes (Google)516e5ae2019-09-05 10:26:41 -07002242 rcu_disable_urgency_upon_qs(rdp);
Paul E. McKenneyb50912d2018-07-03 17:22:34 -07002243 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
Paul E. McKenney654e9532015-03-15 09:19:35 -07002244 /* ^^^ Released rnp->lock */
Paul E. McKenney48a76392014-03-11 13:02:16 -07002245 if (needwake)
Paul E. McKenney532c00c2018-07-03 17:22:34 -07002246 rcu_gp_kthread_wake();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002247 }
2248}
2249
2250/*
2251 * Check to see if there is a new grace period of which this CPU
2252 * is not yet aware, and if so, set up local rcu_data state for it.
2253 * Otherwise, see if this CPU has just passed through its first
2254 * quiescent state for this grace period, and record that fact if so.
2255 */
2256static void
Paul E. McKenney8087d3e2018-07-03 17:22:34 -07002257rcu_check_quiescent_state(struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002258{
Paul E. McKenney05eb5522013-03-19 12:38:24 -07002259 /* Check for grace-period ends and beginnings. */
Paul E. McKenney15cabdf2018-07-03 17:22:34 -07002260 note_gp_changes(rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002261
2262 /*
2263 * Does this CPU still need to do its part for current grace period?
2264 * If no, return and let the other CPUs do their part as well.
2265 */
Paul E. McKenney97c668b2015-08-06 11:31:51 -07002266 if (!rdp->core_needs_qs)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002267 return;
2268
2269 /*
2270 * Was there a quiescent state since the beginning of the grace
2271 * period? If no, then exit and wait for the next call.
2272 */
Paul E. McKenney3a19b462016-11-30 11:21:21 -08002273 if (rdp->cpu_no_qs.b.norm)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002274 return;
2275
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08002276 /*
2277 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2278 * judge of that).
2279 */
Paul E. McKenney33085c42018-07-03 17:22:34 -07002280 rcu_report_qs_rdp(rdp->cpu, rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002281}
2282
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002283/*
Paul E. McKenney780cd592018-07-03 17:22:34 -07002284 * Near the end of the offline process. Trace the fact that this CPU
2285 * is going offline.
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08002286 */
Paul E. McKenney780cd592018-07-03 17:22:34 -07002287int rcutree_dying_cpu(unsigned int cpu)
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08002288{
Yafang Shao4f5fbd72019-03-26 20:13:11 +08002289 bool blkd;
2290 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2291 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08002292
Paul E. McKenneyea463512015-03-03 14:05:26 -08002293 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
Paul E. McKenney780cd592018-07-03 17:22:34 -07002294 return 0;
Paul E. McKenneyea463512015-03-03 14:05:26 -08002295
Yafang Shao4f5fbd72019-03-26 20:13:11 +08002296 blkd = !!(rnp->qsmask & rdp->grpmask);
Paul E. McKenney0937d042020-01-03 14:53:31 -08002297 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
Paul E. McKenney477351f2018-05-01 12:54:11 -07002298 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
Paul E. McKenney780cd592018-07-03 17:22:34 -07002299 return 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002300}
2301
2302/*
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002303 * All CPUs for the specified rcu_node structure have gone offline,
2304 * and all tasks that were preempted within an RCU read-side critical
2305 * section while running on one of those CPUs have since exited their RCU
2306 * read-side critical section. Some other CPU is reporting this fact with
2307 * the specified rcu_node structure's ->lock held and interrupts disabled.
2308 * This function therefore goes up the tree of rcu_node structures,
2309 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2310 * the leaf rcu_node structure's ->qsmaskinit field has already been
Paul E. McKenneyc50cbe52018-05-02 13:51:57 -07002311 * updated.
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002312 *
2313 * This function does check that the specified rcu_node structure has
2314 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2315 * prematurely. That said, invoking it after the fact will cost you
2316 * a needless lock acquisition. So once it has done its work, don't
2317 * invoke it again.
2318 */
2319static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2320{
2321 long mask;
2322 struct rcu_node *rnp = rnp_leaf;
2323
Paul E. McKenney962aff02018-05-02 12:49:21 -07002324 raw_lockdep_assert_held_rcu_node(rnp_leaf);
Paul E. McKenneyea463512015-03-03 14:05:26 -08002325 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
Paul E. McKenney962aff02018-05-02 12:49:21 -07002326 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2327 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002328 return;
2329 for (;;) {
2330 mask = rnp->grpmask;
2331 rnp = rnp->parent;
2332 if (!rnp)
2333 break;
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002334 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002335 rnp->qsmaskinit &= ~mask;
Paul E. McKenney962aff02018-05-02 12:49:21 -07002336 /* Between grace periods, so better already be zero! */
2337 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002338 if (rnp->qsmaskinit) {
Boqun Feng67c583a72015-12-29 12:18:47 +08002339 raw_spin_unlock_rcu_node(rnp);
2340 /* irqs remain disabled. */
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002341 return;
2342 }
Boqun Feng67c583a72015-12-29 12:18:47 +08002343 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -07002344 }
2345}
2346
2347/*
Paul E. McKenneye5601402012-01-07 11:03:57 -08002348 * The CPU has been completely removed, and some other CPU is reporting
Paul E. McKenneya58163d2017-06-20 12:11:34 -07002349 * this fact from process context. Do the remainder of the cleanup.
2350 * There can only be one CPU hotplug operation at a time, so no need for
2351 * explicit locking.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002352 */
Paul E. McKenney780cd592018-07-03 17:22:34 -07002353int rcutree_dead_cpu(unsigned int cpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002354{
Paul E. McKenneyda1df502018-07-03 15:37:16 -07002355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08002356 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
Paul E. McKenneye5601402012-01-07 11:03:57 -08002357
Paul E. McKenneyea463512015-03-03 14:05:26 -08002358 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
Paul E. McKenney780cd592018-07-03 17:22:34 -07002359 return 0;
Paul E. McKenneyea463512015-03-03 14:05:26 -08002360
Paul E. McKenney2036d942012-01-30 17:02:47 -08002361 /* Adjust any no-longer-needed kthreads. */
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00002362 rcu_boost_kthread_setaffinity(rnp, -1);
Paul E. McKenney780cd592018-07-03 17:22:34 -07002363 /* Do any needed no-CB deferred wakeups from this CPU. */
2364 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
Paul E. McKenney96926682019-08-02 15:12:47 -07002365
2366 // Stop-machine done, so allow nohz_full to disable tick.
2367 tick_dep_clear(TICK_DEP_BIT_RCU);
Paul E. McKenney780cd592018-07-03 17:22:34 -07002368 return 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002369}
2370
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002371/*
2372 * Invoke any RCU callbacks that have made it to the end of their grace
2373 * period. Thottle as specified by rdp->blimit.
2374 */
Paul E. McKenney5bb5d092018-07-03 17:22:34 -07002375static void rcu_do_batch(struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002376{
2377 unsigned long flags;
Paul E. McKenneyec5ef872019-05-21 13:03:49 -07002378 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2379 rcu_segcblist_is_offloaded(&rdp->cblist);
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002380 struct rcu_head *rhp;
2381 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2382 long bl, count;
Eric Dumazetcfcdef52019-07-24 18:07:52 -07002383 long pending, tlimit = 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002384
Paul E. McKenneydc35c892012-12-03 13:52:00 -08002385 /* If no callbacks are ready, just return. */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002386 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002387 trace_rcu_batch_start(rcu_state.name,
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002388 rcu_segcblist_n_cbs(&rdp->cblist), 0);
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002389 trace_rcu_batch_end(rcu_state.name, 0,
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002390 !rcu_segcblist_empty(&rdp->cblist),
Paul E. McKenney4968c302011-12-07 16:32:40 -08002391 need_resched(), is_idle_task(current),
2392 rcu_is_callbacks_kthread());
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002393 return;
Paul E. McKenney29c00b42011-06-17 15:53:19 -07002394 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002395
2396 /*
2397 * Extract the list of ready callbacks, disabling to prevent
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002398 * races with call_rcu() from interrupt handlers. Leave the
2399 * callback counts, as rcu_barrier() needs to be conservative.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002400 */
2401 local_irq_save(flags);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002402 rcu_nocb_lock(rdp);
Paul E. McKenney8146c4e22012-01-10 14:23:29 -08002403 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
Eric Dumazetcfcdef52019-07-24 18:07:52 -07002404 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2405 bl = max(rdp->blimit, pending >> rcu_divisor);
2406 if (unlikely(bl > 100))
2407 tlimit = local_clock() + rcu_resched_ns;
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002408 trace_rcu_batch_start(rcu_state.name,
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002409 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2410 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
Paul E. McKenney7f36ef82019-05-28 05:54:26 -07002411 if (offloaded)
2412 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002413 rcu_nocb_unlock_irqrestore(rdp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002414
2415 /* Invoke callbacks. */
Paul E. McKenney6a949b72019-07-28 11:50:56 -07002416 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002417 rhp = rcu_cblist_dequeue(&rcl);
2418 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002419 rcu_callback_t f;
2420
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002421 debug_rcu_head_unqueue(rhp);
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002422
2423 rcu_lock_acquire(&rcu_callback_map);
2424 trace_rcu_invoke_callback(rcu_state.name, rhp);
2425
2426 f = rhp->func;
2427 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2428 f(rhp);
2429
2430 rcu_lock_release(&rcu_callback_map);
2431
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002432 /*
2433 * Stop only if limit reached and CPU has something to do.
2434 * Note: The rcl structure counts down from zero.
2435 */
Paul E. McKenneyec5ef872019-05-21 13:03:49 -07002436 if (-rcl.len >= bl && !offloaded &&
Paul E. McKenneydff16722011-11-29 15:57:13 -08002437 (need_resched() ||
2438 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002439 break;
Eric Dumazetcfcdef52019-07-24 18:07:52 -07002440 if (unlikely(tlimit)) {
2441 /* only call local_clock() every 32 callbacks */
2442 if (likely((-rcl.len & 31) || local_clock() < tlimit))
2443 continue;
2444 /* Exceeded the time limit, so leave. */
2445 break;
2446 }
Paul E. McKenneyec5ef872019-05-21 13:03:49 -07002447 if (offloaded) {
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002448 WARN_ON_ONCE(in_serving_softirq());
2449 local_bh_enable();
2450 lockdep_assert_irqs_enabled();
2451 cond_resched_tasks_rcu_qs();
2452 lockdep_assert_irqs_enabled();
2453 local_bh_disable();
2454 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002455 }
2456
2457 local_irq_save(flags);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002458 rcu_nocb_lock(rdp);
Paul E. McKenney4b27f202017-05-02 08:45:25 -07002459 count = -rcl.len;
Paul E. McKenneye816d562020-05-01 16:49:48 -07002460 rdp->n_cbs_invoked += count;
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002461 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
Paul E. McKenney8ef0f372017-05-02 08:18:40 -07002462 is_idle_task(current), rcu_is_callbacks_kthread());
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002463
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002464 /* Update counts and requeue any remaining callbacks. */
2465 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08002466 smp_mb(); /* List handling before counting for rcu_barrier(). */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002467 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002468
2469 /* Reinstate batch limit if we have worked down the excess. */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002470 count = rcu_segcblist_n_cbs(&rdp->cblist);
Paul E. McKenneyd5a9a8c2019-04-10 17:01:39 -07002471 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002472 rdp->blimit = blimit;
2473
Paul E. McKenney37c72e52009-10-14 10:15:55 -07002474 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002475 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
Paul E. McKenney37c72e52009-10-14 10:15:55 -07002476 rdp->qlen_last_fqs_check = 0;
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002477 rdp->n_force_qs_snap = rcu_state.n_force_qs;
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002478 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2479 rdp->qlen_last_fqs_check = count;
Paul E. McKenneyefd88b02017-10-19 14:52:41 -07002480
2481 /*
2482 * The following usually indicates a double call_rcu(). To track
2483 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2484 */
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07002485 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2486 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2487 count != 0 && rcu_segcblist_empty(&rdp->cblist));
Paul E. McKenney37c72e52009-10-14 10:15:55 -07002488
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002489 rcu_nocb_unlock_irqrestore(rdp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002490
Paul E. McKenneye0f23062011-06-21 01:29:39 -07002491 /* Re-invoke RCU core processing if there are callbacks remaining. */
Paul E. McKenneyec5ef872019-05-21 13:03:49 -07002492 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
Paul E. McKenneya46e0892011-06-15 15:47:09 -07002493 invoke_rcu_core();
Paul E. McKenney6a949b72019-07-28 11:50:56 -07002494 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002495}
2496
2497/*
Paul E. McKenneyc98cac62018-11-21 11:35:03 -08002498 * This function is invoked from each scheduling-clock interrupt,
2499 * and checks to see if this CPU is in a non-context-switch quiescent
2500 * state, for example, user mode or idle loop. It also schedules RCU
2501 * core processing. If the current grace period has gone on too long,
2502 * it will ask the scheduler to manufacture a context switch for the sole
2503 * purpose of providing a providing the needed quiescent state.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002504 */
Paul E. McKenneyc98cac62018-11-21 11:35:03 -08002505void rcu_sched_clock_irq(int user)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002506{
Steven Rostedt (Red Hat)f7f7bac2013-07-12 17:18:47 -04002507 trace_rcu_utilization(TPS("Start scheduler-tick"));
Paul E. McKenney4e950202018-07-05 17:59:36 -07002508 raw_cpu_inc(rcu_data.ticks_this_gp);
Paul E. McKenney92aa39e2018-07-09 13:47:30 -07002509 /* The load-acquire pairs with the store-release setting to true. */
Paul E. McKenney2dba13f2018-08-03 21:00:38 -07002510 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
Paul E. McKenney92aa39e2018-07-09 13:47:30 -07002511 /* Idle and userspace execution already are quiescent states. */
Paul E. McKenneya0ef9ec2018-07-09 15:50:16 -07002512 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
Paul E. McKenney92aa39e2018-07-09 13:47:30 -07002513 set_tsk_need_resched(current);
2514 set_preempt_need_resched();
2515 }
Paul E. McKenney2dba13f2018-08-03 21:00:38 -07002516 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002517 }
Paul E. McKenneyc98cac62018-11-21 11:35:03 -08002518 rcu_flavor_sched_clock_irq(user);
Paul E. McKenneydd7dafd2019-09-14 03:39:22 -07002519 if (rcu_pending(user))
Paul E. McKenneya46e0892011-06-15 15:47:09 -07002520 invoke_rcu_core();
Byungchul Park07f27572018-05-11 17:30:34 +09002521
Steven Rostedt (Red Hat)f7f7bac2013-07-12 17:18:47 -04002522 trace_rcu_utilization(TPS("End scheduler-tick"));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002523}
2524
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002525/*
Zhouyi Zhou5d8a7522019-03-20 03:33:00 +00002526 * Scan the leaf rcu_node structures. For each structure on which all
2527 * CPUs have reported a quiescent state and on which there are tasks
2528 * blocking the current grace period, initiate RCU priority boosting.
2529 * Otherwise, invoke the specified function to check dyntick state for
2530 * each CPU that has not yet reported a quiescent state.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002531 */
Paul E. McKenney8ff0b902018-07-05 17:55:14 -07002532static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002533{
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002534 int cpu;
2535 unsigned long flags;
2536 unsigned long mask;
Paul E. McKenney66e4c332019-08-12 16:14:00 -07002537 struct rcu_data *rdp;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07002538 struct rcu_node *rnp;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002539
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07002540 rcu_state.cbovld = rcu_state.cbovldnext;
2541 rcu_state.cbovldnext = false;
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07002542 rcu_for_each_leaf_node(rnp) {
Paul E. McKenneycee43932018-03-02 16:35:27 -08002543 cond_resched_tasks_rcu_qs();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002544 mask = 0;
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002545 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07002546 rcu_state.cbovldnext |= !!rnp->cbovldmask;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07002547 if (rnp->qsmask == 0) {
Lai Jiangshanc130d2d2019-10-15 10:28:48 +00002548 if (!IS_ENABLED(CONFIG_PREEMPT_RCU) ||
Paul E. McKenneya77da142015-03-08 14:52:27 -07002549 rcu_preempt_blocked_readers_cgp(rnp)) {
2550 /*
2551 * No point in scanning bits because they
2552 * are all zero. But we might need to
2553 * priority-boost blocked readers.
2554 */
2555 rcu_initiate_boost(rnp, flags);
2556 /* rcu_initiate_boost() releases rnp->lock */
2557 continue;
2558 }
Paul E. McKenney92816432018-05-02 11:07:02 -07002559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2560 continue;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002561 }
Paul E. McKenney7441e762019-10-30 09:37:11 -07002562 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2563 rdp = per_cpu_ptr(&rcu_data, cpu);
2564 if (f(rdp)) {
2565 mask |= rdp->grpmask;
2566 rcu_disable_urgency_upon_qs(rdp);
Paul E. McKenney0edd1b12013-06-21 16:37:22 -07002567 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002568 }
Paul E. McKenney45f014c52e2010-01-04 15:09:08 -08002569 if (mask != 0) {
Paul E. McKenneyc9a24e22018-04-27 14:54:46 -07002570 /* Idle/offline CPUs, report (releases rnp->lock). */
Paul E. McKenneyb50912d2018-07-03 17:22:34 -07002571 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08002572 } else {
2573 /* Nothing to do here, so just drop the lock. */
Boqun Feng67c583a72015-12-29 12:18:47 +08002574 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002575 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002576 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002577}
2578
2579/*
2580 * Force quiescent states on reluctant CPUs, and also detect which
2581 * CPUs are in dyntick-idle mode.
2582 */
Paul E. McKenneycd920e52018-11-28 16:57:54 -08002583void rcu_force_quiescent_state(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002584{
2585 unsigned long flags;
Paul E. McKenney394f2762012-06-26 17:00:35 -07002586 bool ret;
2587 struct rcu_node *rnp;
2588 struct rcu_node *rnp_old = NULL;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002589
Paul E. McKenney394f2762012-06-26 17:00:35 -07002590 /* Funnel through hierarchy to reduce memory contention. */
Paul E. McKenneyda1df502018-07-03 15:37:16 -07002591 rnp = __this_cpu_read(rcu_data.mynode);
Paul E. McKenney394f2762012-06-26 17:00:35 -07002592 for (; rnp != NULL; rnp = rnp->parent) {
Paul E. McKenney67a0edb2018-07-05 16:15:38 -07002593 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
Paul E. McKenney66e4c332019-08-12 16:14:00 -07002594 !raw_spin_trylock(&rnp->fqslock);
Paul E. McKenney394f2762012-06-26 17:00:35 -07002595 if (rnp_old != NULL)
2596 raw_spin_unlock(&rnp_old->fqslock);
Paul E. McKenneyd62df572018-01-10 13:10:49 -08002597 if (ret)
Paul E. McKenney394f2762012-06-26 17:00:35 -07002598 return;
Paul E. McKenney394f2762012-06-26 17:00:35 -07002599 rnp_old = rnp;
2600 }
Paul E. McKenney336a4f62018-07-03 17:22:34 -07002601 /* rnp_old == rcu_get_root(), rnp == NULL. */
Paul E. McKenney394f2762012-06-26 17:00:35 -07002602
2603 /* Reached the root of the rcu_node tree, acquire lock. */
Peter Zijlstra2a67e742015-10-08 12:24:23 +02002604 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
Paul E. McKenney394f2762012-06-26 17:00:35 -07002605 raw_spin_unlock(&rnp_old->fqslock);
Paul E. McKenney67a0edb2018-07-05 16:15:38 -07002606 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
Boqun Feng67c583a72015-12-29 12:18:47 +08002607 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07002608 return; /* Someone beat us to it. */
Paul E. McKenney46a1e342010-01-04 15:09:09 -08002609 }
Paul E. McKenney67a0edb2018-07-05 16:15:38 -07002610 WRITE_ONCE(rcu_state.gp_flags,
2611 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
Boqun Feng67c583a72015-12-29 12:18:47 +08002612 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
Paul E. McKenney532c00c2018-07-03 17:22:34 -07002613 rcu_gp_kthread_wake();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002614}
Paul E. McKenneycd920e52018-11-28 16:57:54 -08002615EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002616
Paul E. McKenneyfb60e532018-11-21 12:42:12 -08002617/* Perform RCU core processing work for the current CPU. */
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002618static __latent_entropy void rcu_core(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002619{
Shaohua Li09223372011-06-14 13:26:25 +08002620 unsigned long flags;
Paul E. McKenneyda1df502018-07-03 15:37:16 -07002621 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
Paul E. McKenneyb0f74032013-02-04 12:14:24 -08002622 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenneyc1ab99d2019-05-21 13:39:15 -07002623 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2624 rcu_segcblist_is_offloaded(&rdp->cblist);
Paul E. McKenneya26ac242011-01-12 14:10:23 -08002625
2626 if (cpu_is_offline(smp_processor_id()))
Peter Zijlstra08bca602011-05-20 16:06:29 -07002627 return;
Paul E. McKenneya26ac242011-01-12 14:10:23 -08002628 trace_rcu_utilization(TPS("Start RCU core"));
Shaohua Li09223372011-06-14 13:26:25 +08002629 WARN_ON_ONCE(!rdp->beenonline);
Paul E. McKenney29154c52012-05-30 03:21:48 -07002630
Paul E. McKenney3e310092018-06-21 12:50:01 -07002631 /* Report any deferred quiescent states if preemption enabled. */
Paul E. McKenneyfced9c82018-07-26 13:44:00 -07002632 if (!(preempt_count() & PREEMPT_MASK)) {
Paul E. McKenney3e310092018-06-21 12:50:01 -07002633 rcu_preempt_deferred_qs(current);
Paul E. McKenneyfced9c82018-07-26 13:44:00 -07002634 } else if (rcu_preempt_need_deferred_qs(current)) {
2635 set_tsk_need_resched(current);
2636 set_preempt_need_resched();
2637 }
Paul E. McKenney3e310092018-06-21 12:50:01 -07002638
Paul E. McKenney29154c52012-05-30 03:21:48 -07002639 /* Update RCU state based on any recent quiescent states. */
Paul E. McKenney8087d3e2018-07-03 17:22:34 -07002640 rcu_check_quiescent_state(rdp);
Paul E. McKenney29154c52012-05-30 03:21:48 -07002641
2642 /* No grace period and unregistered callbacks? */
Paul E. McKenneyde8e8732018-07-03 17:22:34 -07002643 if (!rcu_gp_in_progress() &&
Paul E. McKenneyc1ab99d2019-05-21 13:39:15 -07002644 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
Paul E. McKenney48a76392014-03-11 13:02:16 -07002645 local_irq_save(flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002646 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
Paul E. McKenneyc6e09b92018-07-03 17:22:34 -07002647 rcu_accelerate_cbs_unlocked(rnp, rdp);
Paul E. McKenney29154c52012-05-30 03:21:48 -07002648 local_irq_restore(flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002649 }
Yao Dongdong9910aff2015-02-25 17:09:46 +08002650
Paul E. McKenney791416c2018-10-01 15:42:44 -07002651 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002652
Paul E. McKenney29154c52012-05-30 03:21:48 -07002653 /* If there are callbacks ready, invoke them. */
Paul E. McKenneyc1ab99d2019-05-21 13:39:15 -07002654 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
Paul E. McKenney43e903a2019-03-25 08:36:03 -07002655 likely(READ_ONCE(rcu_scheduler_fully_active)))
2656 rcu_do_batch(rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002657
2658 /* Do any needed deferred wakeups of rcuo kthreads. */
2659 do_nocb_deferred_wakeup(rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002660 trace_rcu_utilization(TPS("End RCU core"));
2661}
2662
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002663static void rcu_core_si(struct softirq_action *h)
2664{
2665 rcu_core();
2666}
2667
2668static void rcu_wake_cond(struct task_struct *t, int status)
2669{
2670 /*
2671 * If the thread is yielding, only wake it when this
2672 * is invoked from idle
2673 */
2674 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2675 wake_up_process(t);
2676}
2677
2678static void invoke_rcu_core_kthread(void)
2679{
2680 struct task_struct *t;
2681 unsigned long flags;
2682
2683 local_irq_save(flags);
2684 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2685 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2686 if (t != NULL && t != current)
2687 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2688 local_irq_restore(flags);
2689}
2690
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002691/*
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002692 * Wake up this CPU's rcuc kthread to do RCU core processing.
2693 */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002694static void invoke_rcu_core(void)
2695{
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002696 if (!cpu_online(smp_processor_id()))
2697 return;
2698 if (use_softirq)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002699 raise_softirq(RCU_SOFTIRQ);
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002700 else
2701 invoke_rcu_core_kthread();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002702}
2703
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002704static void rcu_cpu_kthread_park(unsigned int cpu)
2705{
2706 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2707}
2708
2709static int rcu_cpu_kthread_should_run(unsigned int cpu)
2710{
2711 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2712}
2713
2714/*
2715 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2716 * the RCU softirq used in configurations of RCU that do not support RCU
2717 * priority boosting.
2718 */
2719static void rcu_cpu_kthread(unsigned int cpu)
2720{
2721 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2722 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2723 int spincnt;
2724
Lai Jiangshan2488a5e2019-10-15 10:23:57 +00002725 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002726 for (spincnt = 0; spincnt < 10; spincnt++) {
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002727 local_bh_disable();
2728 *statusp = RCU_KTHREAD_RUNNING;
2729 local_irq_disable();
2730 work = *workp;
2731 *workp = 0;
2732 local_irq_enable();
2733 if (work)
2734 rcu_core();
2735 local_bh_enable();
2736 if (*workp == 0) {
2737 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2738 *statusp = RCU_KTHREAD_WAITING;
2739 return;
2740 }
2741 }
2742 *statusp = RCU_KTHREAD_YIELDING;
2743 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
Paul E. McKenney77865de2020-05-07 15:44:46 -07002744 schedule_timeout_idle(2);
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01002745 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2746 *statusp = RCU_KTHREAD_WAITING;
2747}
2748
2749static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2750 .store = &rcu_data.rcu_cpu_kthread_task,
2751 .thread_should_run = rcu_cpu_kthread_should_run,
2752 .thread_fn = rcu_cpu_kthread,
2753 .thread_comm = "rcuc/%u",
2754 .setup = rcu_cpu_kthread_setup,
2755 .park = rcu_cpu_kthread_park,
2756};
2757
2758/*
2759 * Spawn per-CPU RCU core processing kthreads.
2760 */
2761static int __init rcu_spawn_core_kthreads(void)
2762{
2763 int cpu;
2764
2765 for_each_possible_cpu(cpu)
2766 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2767 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2768 return 0;
2769 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2770 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2771 return 0;
2772}
2773early_initcall(rcu_spawn_core_kthreads);
2774
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002775/*
2776 * Handle any core-RCU processing required by a call_rcu() invocation.
Paul E. McKenney29154c52012-05-30 03:21:48 -07002777 */
Paul E. McKenney5c7d8962018-07-03 17:22:34 -07002778static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2779 unsigned long flags)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002780{
Paul E. McKenney37c72e52009-10-14 10:15:55 -07002781 /*
2782 * If called from an extended quiescent state, invoke the RCU
2783 * core in order to force a re-evaluation of RCU's idleness.
2784 */
2785 if (!rcu_is_watching())
Paul E. McKenney2655d572011-04-07 22:47:23 -07002786 invoke_rcu_core();
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002787
2788 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
Paul E. McKenney470716f2013-03-19 11:32:11 -07002789 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002790 return;
2791
2792 /*
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002793 * Force the grace period if too many callbacks or too long waiting.
Paul E. McKenneycd920e52018-11-28 16:57:54 -08002794 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002795 * if some other CPU has recently done so. Also, don't bother
Paul E. McKenneycd920e52018-11-28 16:57:54 -08002796 * invoking rcu_force_quiescent_state() if the newly enqueued callback
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002797 * is the only one waiting for a grace period to complete.
2798 */
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002799 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2800 rdp->qlen_last_fqs_check + qhimark)) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002801
2802 /* Are we ignoring a completed grace period? */
Paul E. McKenney15cabdf2018-07-03 17:22:34 -07002803 note_gp_changes(rdp);
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002804
2805 /* Start a new grace period if one not already started. */
Paul E. McKenneyde8e8732018-07-03 17:22:34 -07002806 if (!rcu_gp_in_progress()) {
Paul E. McKenneyc6e09b92018-07-03 17:22:34 -07002807 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002808 } else {
2809 /* Give the grace period a kick. */
Paul E. McKenneyd5a9a8c2019-04-10 17:01:39 -07002810 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
Paul E. McKenney5c7d8962018-07-03 17:22:34 -07002811 if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002812 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
Paul E. McKenneycd920e52018-11-28 16:57:54 -08002813 rcu_force_quiescent_state();
Paul E. McKenney5c7d8962018-07-03 17:22:34 -07002814 rdp->n_force_qs_snap = rcu_state.n_force_qs;
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002815 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08002816 }
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -07002817 }
Paul E. McKenney29154c52012-05-30 03:21:48 -07002818}
2819
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002820/*
Paul E. McKenneyae150182013-04-23 13:20:57 -07002821 * RCU callback function to leak a callback.
2822 */
2823static void rcu_leak_callback(struct rcu_head *rhp)
2824{
2825}
2826
2827/*
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07002828 * Check and if necessary update the leaf rcu_node structure's
2829 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2830 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2831 * structure's ->lock.
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002832 */
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07002833static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2834{
2835 raw_lockdep_assert_held_rcu_node(rnp);
2836 if (qovld_calc <= 0)
2837 return; // Early boot and wildcard value set.
2838 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2839 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2840 else
2841 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2842}
2843
2844/*
2845 * Check and if necessary update the leaf rcu_node structure's
2846 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2847 * number of queued RCU callbacks. No locks need be held, but the
2848 * caller must have disabled interrupts.
2849 *
2850 * Note that this function ignores the possibility that there are a lot
2851 * of callbacks all of which have already seen the end of their respective
2852 * grace periods. This omission is due to the need for no-CBs CPUs to
2853 * be holding ->nocb_lock to do this check, which is too heavy for a
2854 * common-case operation.
2855 */
2856static void check_cb_ovld(struct rcu_data *rdp)
2857{
2858 struct rcu_node *const rnp = rdp->mynode;
2859
2860 if (qovld_calc <= 0 ||
2861 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2862 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2863 return; // Early boot wildcard value or already set correctly.
2864 raw_spin_lock_rcu_node(rnp);
2865 check_cb_ovld_locked(rdp, rnp);
2866 raw_spin_unlock_rcu_node(rnp);
2867}
2868
Paul E. McKenneyb692dc42020-02-11 07:29:02 -08002869/* Helper function for call_rcu() and friends. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002870static void
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002871__call_rcu(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002872{
2873 unsigned long flags;
2874 struct rcu_data *rdp;
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002875 bool was_alldone;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002876
Paul E. McKenneyb8f2ed52016-08-23 06:51:47 -07002877 /* Misaligned rcu_head! */
2878 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2879
Paul E. McKenneyae150182013-04-23 13:20:57 -07002880 if (debug_rcu_head_queue(head)) {
Paul E. McKenneyfa3c6642017-05-03 11:38:55 -07002881 /*
2882 * Probable double call_rcu(), so leak the callback.
2883 * Use rcu:rcu_callback trace event to find the previous
2884 * time callback was passed to __call_rcu().
2885 */
Sakari Ailusd75f7732019-03-25 21:32:28 +02002886 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
Paul E. McKenneyfa3c6642017-05-03 11:38:55 -07002887 head, head->func);
Paul E. McKenney7d0ae802015-03-03 14:57:58 -08002888 WRITE_ONCE(head->func, rcu_leak_callback);
Paul E. McKenneyae150182013-04-23 13:20:57 -07002889 return;
2890 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002891 head->func = func;
2892 head->next = NULL;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002893 local_irq_save(flags);
Walter Wu26e760c2020-08-06 23:24:35 -07002894 kasan_record_aux_stack(head);
Paul E. McKenneyda1df502018-07-03 15:37:16 -07002895 rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002896
2897 /* Add the callback to our list. */
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002898 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2899 // This can trigger due to call_rcu() from offline CPU:
2900 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
Paul E. McKenney34404ca2015-01-19 20:39:20 -08002901 WARN_ON_ONCE(!rcu_is_watching());
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002902 // Very early boot, before rcu_init(). Initialize if needed
2903 // and then drop through to queue the callback.
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002904 if (rcu_segcblist_empty(&rdp->cblist))
2905 rcu_segcblist_init(&rdp->cblist);
Paul E. McKenney0d8ee372012-08-03 13:16:15 -07002906 }
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002907
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07002908 check_cb_ovld(rdp);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07002909 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2910 return; // Enqueued onto ->nocb_bypass, so just leave.
Paul E. McKenneyb692dc42020-02-11 07:29:02 -08002911 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002912 rcu_segcblist_enqueue(&rdp->cblist, head);
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02002913 if (__is_kvfree_rcu_offset((unsigned long)func))
2914 trace_rcu_kvfree_callback(rcu_state.name, head,
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002915 (unsigned long)func,
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002916 rcu_segcblist_n_cbs(&rdp->cblist));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002917 else
Paul E. McKenney3c779df2018-07-05 15:54:02 -07002918 trace_rcu_callback(rcu_state.name, head,
Paul E. McKenney15fecf82017-02-08 12:36:42 -08002919 rcu_segcblist_n_cbs(&rdp->cblist));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002920
Paul E. McKenney29154c52012-05-30 03:21:48 -07002921 /* Go handle any RCU core processing required. */
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07002922 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2923 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2924 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2925 } else {
2926 __call_rcu_core(rdp, head, flags);
2927 local_irq_restore(flags);
2928 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002929}
2930
Paul E. McKenneya68a2bb2017-05-03 08:34:57 -07002931/**
Paul E. McKenney45975c72018-07-02 14:30:37 -07002932 * call_rcu() - Queue an RCU callback for invocation after a grace period.
Paul E. McKenneya68a2bb2017-05-03 08:34:57 -07002933 * @head: structure to be used for queueing the RCU updates.
2934 * @func: actual callback function to be invoked after the grace period
2935 *
2936 * The callback function will be invoked some time after a full grace
Paul E. McKenney45975c72018-07-02 14:30:37 -07002937 * period elapses, in other words after all pre-existing RCU read-side
2938 * critical sections have completed. However, the callback function
2939 * might well execute concurrently with RCU read-side critical sections
2940 * that started after call_rcu() was invoked. RCU read-side critical
2941 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
2942 * may be nested. In addition, regions of code across which interrupts,
2943 * preemption, or softirqs have been disabled also serve as RCU read-side
2944 * critical sections. This includes hardware interrupt handlers, softirq
2945 * handlers, and NMI handlers.
Paul E. McKenney27fdb352017-10-19 14:26:21 -07002946 *
Paul E. McKenney45975c72018-07-02 14:30:37 -07002947 * Note that all CPUs must agree that the grace period extended beyond
2948 * all pre-existing RCU read-side critical section. On systems with more
2949 * than one CPU, this means that when "func()" is invoked, each CPU is
2950 * guaranteed to have executed a full memory barrier since the end of its
2951 * last RCU read-side critical section whose beginning preceded the call
2952 * to call_rcu(). It also means that each CPU executing an RCU read-side
2953 * critical section that continues beyond the start of "func()" must have
2954 * executed a memory barrier after the call_rcu() but before the beginning
2955 * of that RCU read-side critical section. Note that these guarantees
2956 * include CPUs that are offline, idle, or executing in user mode, as
2957 * well as CPUs that are executing in the kernel.
Paul E. McKenneya68a2bb2017-05-03 08:34:57 -07002958 *
Paul E. McKenney45975c72018-07-02 14:30:37 -07002959 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2960 * resulting RCU callback function "func()", then both CPU A and CPU B are
2961 * guaranteed to execute a full memory barrier during the time interval
2962 * between the call to call_rcu() and the invocation of "func()" -- even
2963 * if CPU A and CPU B are the same CPU (but again only if the system has
2964 * more than one CPU).
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002965 */
Paul E. McKenney45975c72018-07-02 14:30:37 -07002966void call_rcu(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002967{
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04002968 __call_rcu(head, func);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002969}
Paul E. McKenney45975c72018-07-02 14:30:37 -07002970EXPORT_SYMBOL_GPL(call_rcu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01002971
Byungchul Parka35d1692019-08-05 18:22:27 -04002972
2973/* Maximum number of jiffies to wait before draining a batch. */
2974#define KFREE_DRAIN_JIFFIES (HZ / 50)
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07002975#define KFREE_N_BATCHES 2
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002976#define FREE_N_CHANNELS 2
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07002977
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002978/**
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002979 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002980 * @nr_records: Number of active pointers in the array
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002981 * @next: Next bulk object in the block chain
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002982 * @records: Array of the kvfree_rcu() pointers
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002983 */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002984struct kvfree_rcu_bulk_data {
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002985 unsigned long nr_records;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002986 struct kvfree_rcu_bulk_data *next;
Uladzislau Rezki (Sony)3af84862020-05-25 23:47:49 +02002987 void *records[];
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002988};
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07002989
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002990/*
2991 * This macro defines how many entries the "records" array
2992 * will contain. It is based on the fact that the size of
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002993 * kvfree_rcu_bulk_data structure becomes exactly one page.
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002994 */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02002995#define KVFREE_BULK_MAX_ENTR \
2996 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01002997
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07002998/**
2999 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3000 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3001 * @head_free: List of kfree_rcu() objects waiting for a grace period
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003002 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003003 * @krcp: Pointer to @kfree_rcu_cpu structure
3004 */
3005
3006struct kfree_rcu_cpu_work {
3007 struct rcu_work rcu_work;
3008 struct rcu_head *head_free;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003009 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003010 struct kfree_rcu_cpu *krcp;
3011};
Byungchul Parka35d1692019-08-05 18:22:27 -04003012
3013/**
3014 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
Byungchul Parka35d1692019-08-05 18:22:27 -04003015 * @head: List of kfree_rcu() objects not yet waiting for a grace period
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003016 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003017 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
Byungchul Parka35d1692019-08-05 18:22:27 -04003018 * @lock: Synchronize access to this structure
3019 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3020 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
Sebastian Andrzej Siewior69f08d32020-05-25 23:47:51 +02003021 * @initialized: The @rcu_work fields have been initialized
Mauro Carvalho Chehab8e116902020-05-04 14:35:00 +02003022 * @count: Number of objects for which GP not started
Byungchul Parka35d1692019-08-05 18:22:27 -04003023 *
3024 * This is a per-CPU structure. The reason that it is not included in
3025 * the rcu_data structure is to permit this code to be extracted from
3026 * the RCU files. Such extraction could allow further optimization of
3027 * the interactions with the slab allocators.
Andreea-Cristina Bernat495aa962014-03-18 20:48:48 +02003028 */
Byungchul Parka35d1692019-08-05 18:22:27 -04003029struct kfree_rcu_cpu {
Byungchul Parka35d1692019-08-05 18:22:27 -04003030 struct rcu_head *head;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003031 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003032 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003033 raw_spinlock_t lock;
Byungchul Parka35d1692019-08-05 18:22:27 -04003034 struct delayed_work monitor_work;
Joel Fernandes569d7672019-09-22 10:49:57 -07003035 bool monitor_todo;
Byungchul Parka35d1692019-08-05 18:22:27 -04003036 bool initialized;
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003037 int count;
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02003038
3039 /*
3040 * A simple cache list that contains objects for
3041 * reuse purpose. In order to save some per-cpu
3042 * space the list is singular. Even though it is
3043 * lockless an access has to be protected by the
3044 * per-cpu lock.
3045 */
3046 struct llist_head bkvcache;
3047 int nr_bkv_objs;
Byungchul Parka35d1692019-08-05 18:22:27 -04003048};
3049
Sebastian Andrzej Siewior69f08d32020-05-25 23:47:51 +02003050static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3051 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3052};
Byungchul Parka35d1692019-08-05 18:22:27 -04003053
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003054static __always_inline void
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003055debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003056{
3057#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
Joel Fernandes (Google)446044e2020-05-25 23:47:48 +02003058 int i;
3059
3060 for (i = 0; i < bhead->nr_records; i++)
3061 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003062#endif
3063}
3064
Uladzislau Rezki (Sony)952371d2020-05-25 23:47:50 +02003065static inline struct kfree_rcu_cpu *
3066krc_this_cpu_lock(unsigned long *flags)
3067{
3068 struct kfree_rcu_cpu *krcp;
3069
3070 local_irq_save(*flags); // For safely calling this_cpu_ptr().
3071 krcp = this_cpu_ptr(&krc);
Sebastian Andrzej Siewior69f08d32020-05-25 23:47:51 +02003072 raw_spin_lock(&krcp->lock);
Uladzislau Rezki (Sony)952371d2020-05-25 23:47:50 +02003073
3074 return krcp;
3075}
3076
3077static inline void
3078krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3079{
Sebastian Andrzej Siewior69f08d32020-05-25 23:47:51 +02003080 raw_spin_unlock(&krcp->lock);
Uladzislau Rezki (Sony)952371d2020-05-25 23:47:50 +02003081 local_irq_restore(flags);
3082}
3083
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003084static inline struct kvfree_rcu_bulk_data *
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02003085get_cached_bnode(struct kfree_rcu_cpu *krcp)
3086{
3087 if (!krcp->nr_bkv_objs)
3088 return NULL;
3089
3090 krcp->nr_bkv_objs--;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003091 return (struct kvfree_rcu_bulk_data *)
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02003092 llist_del_first(&krcp->bkvcache);
3093}
3094
3095static inline bool
3096put_cached_bnode(struct kfree_rcu_cpu *krcp,
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003097 struct kvfree_rcu_bulk_data *bnode)
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02003098{
3099 // Check the limit.
3100 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3101 return false;
3102
3103 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3104 krcp->nr_bkv_objs++;
3105 return true;
3106
3107}
3108
Paul E. McKenney6d813392012-02-23 13:30:16 -08003109/*
Byungchul Parka35d1692019-08-05 18:22:27 -04003110 * This function is invoked in workqueue context after a grace period.
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003111 * It frees all the objects queued on ->bhead_free or ->head_free.
Byungchul Parka35d1692019-08-05 18:22:27 -04003112 */
3113static void kfree_rcu_work(struct work_struct *work)
3114{
3115 unsigned long flags;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003116 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
Byungchul Parka35d1692019-08-05 18:22:27 -04003117 struct rcu_head *head, *next;
3118 struct kfree_rcu_cpu *krcp;
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003119 struct kfree_rcu_cpu_work *krwp;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003120 int i, j;
Byungchul Parka35d1692019-08-05 18:22:27 -04003121
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003122 krwp = container_of(to_rcu_work(work),
3123 struct kfree_rcu_cpu_work, rcu_work);
3124 krcp = krwp->krcp;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003125
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003126 raw_spin_lock_irqsave(&krcp->lock, flags);
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003127 // Channels 1 and 2.
3128 for (i = 0; i < FREE_N_CHANNELS; i++) {
3129 bkvhead[i] = krwp->bkvhead_free[i];
3130 krwp->bkvhead_free[i] = NULL;
3131 }
3132
3133 // Channel 3.
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003134 head = krwp->head_free;
3135 krwp->head_free = NULL;
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003136 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003137
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003138 // Handle two first channels.
3139 for (i = 0; i < FREE_N_CHANNELS; i++) {
3140 for (; bkvhead[i]; bkvhead[i] = bnext) {
3141 bnext = bkvhead[i]->next;
3142 debug_rcu_bhead_unqueue(bkvhead[i]);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003143
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003144 rcu_lock_acquire(&rcu_callback_map);
3145 if (i == 0) { // kmalloc() / kfree().
3146 trace_rcu_invoke_kfree_bulk_callback(
3147 rcu_state.name, bkvhead[i]->nr_records,
3148 bkvhead[i]->records);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003149
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003150 kfree_bulk(bkvhead[i]->nr_records,
3151 bkvhead[i]->records);
3152 } else { // vmalloc() / vfree().
3153 for (j = 0; j < bkvhead[i]->nr_records; j++) {
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003154 trace_rcu_invoke_kvfree_callback(
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003155 rcu_state.name,
3156 bkvhead[i]->records[j], 0);
Uladzislau Rezki (Sony)61370792020-01-20 15:42:26 +01003157
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003158 vfree(bkvhead[i]->records[j]);
3159 }
3160 }
3161 rcu_lock_release(&rcu_callback_map);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003162
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003163 krcp = krc_this_cpu_lock(&flags);
3164 if (put_cached_bnode(krcp, bkvhead[i]))
3165 bkvhead[i] = NULL;
3166 krc_this_cpu_unlock(krcp, flags);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003167
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003168 if (bkvhead[i])
3169 free_page((unsigned long) bkvhead[i]);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003170
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003171 cond_resched_tasks_rcu_qs();
3172 }
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003173 }
3174
3175 /*
3176 * Emergency case only. It can happen under low memory
3177 * condition when an allocation gets failed, so the "bulk"
3178 * path can not be temporary maintained.
3179 */
Byungchul Parka35d1692019-08-05 18:22:27 -04003180 for (; head; head = next) {
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003181 unsigned long offset = (unsigned long)head->func;
Joel Fernandes (Google)446044e2020-05-25 23:47:48 +02003182 void *ptr = (void *)head - offset;
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003183
Byungchul Parka35d1692019-08-05 18:22:27 -04003184 next = head->next;
Joel Fernandes (Google)446044e2020-05-25 23:47:48 +02003185 debug_rcu_head_unqueue((struct rcu_head *)ptr);
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003186 rcu_lock_acquire(&rcu_callback_map);
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003187 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003188
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003189 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003190 kvfree(ptr);
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003191
3192 rcu_lock_release(&rcu_callback_map);
Byungchul Parka35d1692019-08-05 18:22:27 -04003193 cond_resched_tasks_rcu_qs();
3194 }
3195}
3196
3197/*
3198 * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3199 *
3200 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3201 * timeout has been reached.
3202 */
3203static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3204{
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003205 struct kfree_rcu_cpu_work *krwp;
Uladzislau Rezki (Sony)594aa592020-05-25 23:47:47 +02003206 bool repeat = false;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003207 int i, j;
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003208
Byungchul Parka35d1692019-08-05 18:22:27 -04003209 lockdep_assert_held(&krcp->lock);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003210
3211 for (i = 0; i < KFREE_N_BATCHES; i++) {
3212 krwp = &(krcp->krw_arr[i]);
3213
3214 /*
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003215 * Try to detach bkvhead or head and attach it over any
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003216 * available corresponding free channel. It can be that
3217 * a previous RCU batch is in progress, it means that
3218 * immediately to queue another one is not possible so
3219 * return false to tell caller to retry.
3220 */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003221 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3222 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003223 (krcp->head && !krwp->head_free)) {
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003224 // Channel 1 corresponds to SLAB ptrs.
3225 // Channel 2 corresponds to vmalloc ptrs.
3226 for (j = 0; j < FREE_N_CHANNELS; j++) {
3227 if (!krwp->bkvhead_free[j]) {
3228 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3229 krcp->bkvhead[j] = NULL;
3230 }
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003231 }
3232
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003233 // Channel 3 corresponds to emergency path.
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003234 if (!krwp->head_free) {
3235 krwp->head_free = krcp->head;
3236 krcp->head = NULL;
3237 }
3238
Joel Fernandes (Google)a6a82ce2020-03-16 12:32:28 -04003239 WRITE_ONCE(krcp->count, 0);
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003240
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003241 /*
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003242 * One work is per one batch, so there are three
3243 * "free channels", the batch can handle. It can
3244 * be that the work is in the pending state when
3245 * channels have been detached following by each
3246 * other.
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003247 */
3248 queue_rcu_work(system_wq, &krwp->rcu_work);
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003249 }
Uladzislau Rezki (Sony)594aa592020-05-25 23:47:47 +02003250
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003251 // Repeat if any "free" corresponding channel is still busy.
3252 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
Uladzislau Rezki (Sony)594aa592020-05-25 23:47:47 +02003253 repeat = true;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003254 }
Byungchul Parka35d1692019-08-05 18:22:27 -04003255
Uladzislau Rezki (Sony)594aa592020-05-25 23:47:47 +02003256 return !repeat;
Byungchul Parka35d1692019-08-05 18:22:27 -04003257}
3258
3259static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3260 unsigned long flags)
3261{
3262 // Attempt to start a new batch.
Joel Fernandes569d7672019-09-22 10:49:57 -07003263 krcp->monitor_todo = false;
Byungchul Parka35d1692019-08-05 18:22:27 -04003264 if (queue_kfree_rcu_work(krcp)) {
3265 // Success! Our job is done here.
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003266 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003267 return;
3268 }
3269
3270 // Previous RCU batch still in progress, try again later.
Joel Fernandes569d7672019-09-22 10:49:57 -07003271 krcp->monitor_todo = true;
3272 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003273 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003274}
3275
3276/*
3277 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3278 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3279 */
3280static void kfree_rcu_monitor(struct work_struct *work)
3281{
3282 unsigned long flags;
3283 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3284 monitor_work.work);
3285
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003286 raw_spin_lock_irqsave(&krcp->lock, flags);
Joel Fernandes569d7672019-09-22 10:49:57 -07003287 if (krcp->monitor_todo)
Byungchul Parka35d1692019-08-05 18:22:27 -04003288 kfree_rcu_drain_unlock(krcp, flags);
3289 else
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003290 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003291}
3292
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003293static inline bool
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003294kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003295{
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003296 struct kvfree_rcu_bulk_data *bnode;
3297 int idx;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003298
3299 if (unlikely(!krcp->initialized))
3300 return false;
3301
3302 lockdep_assert_held(&krcp->lock);
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003303 idx = !!is_vmalloc_addr(ptr);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003304
3305 /* Check if a new block is required. */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003306 if (!krcp->bkvhead[idx] ||
3307 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02003308 bnode = get_cached_bnode(krcp);
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003309 if (!bnode) {
Joel Fernandes (Google)4d291942020-05-25 23:47:46 +02003310 /*
3311 * To keep this path working on raw non-preemptible
3312 * sections, prevent the optional entry into the
3313 * allocator as it uses sleeping locks. In fact, even
3314 * if the caller of kfree_rcu() is preemptible, this
3315 * path still is not, as krcp->lock is a raw spinlock.
3316 * With additional page pre-allocation in the works,
3317 * hitting this return is going to be much less likely.
3318 */
3319 if (IS_ENABLED(CONFIG_PREEMPT_RT))
3320 return false;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003321
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003322 /*
3323 * NOTE: For one argument of kvfree_rcu() we can
3324 * drop the lock and get the page in sleepable
3325 * context. That would allow to maintain an array
3326 * for the CONFIG_PREEMPT_RT as well if no cached
3327 * pages are available.
3328 */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003329 bnode = (struct kvfree_rcu_bulk_data *)
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003330 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3331 }
3332
3333 /* Switch to emergency path. */
3334 if (unlikely(!bnode))
3335 return false;
3336
3337 /* Initialize the new block. */
3338 bnode->nr_records = 0;
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003339 bnode->next = krcp->bkvhead[idx];
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003340
3341 /* Attach it to the head. */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003342 krcp->bkvhead[idx] = bnode;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003343 }
3344
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003345 /* Finally insert. */
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003346 krcp->bkvhead[idx]->records
3347 [krcp->bkvhead[idx]->nr_records++] = ptr;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003348
3349 return true;
3350}
3351
Byungchul Parka35d1692019-08-05 18:22:27 -04003352/*
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003353 * Queue a request for lazy invocation of appropriate free routine after a
3354 * grace period. Please note there are three paths are maintained, two are the
3355 * main ones that use array of pointers interface and third one is emergency
3356 * one, that is used only when the main path can not be maintained temporary,
3357 * due to memory pressure.
Byungchul Parka35d1692019-08-05 18:22:27 -04003358 *
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003359 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003360 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3361 * be free'd in workqueue context. This allows us to: batch requests together to
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02003362 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
Andreea-Cristina Bernat495aa962014-03-18 20:48:48 +02003363 */
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003364void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
Andreea-Cristina Bernat495aa962014-03-18 20:48:48 +02003365{
Byungchul Parka35d1692019-08-05 18:22:27 -04003366 unsigned long flags;
3367 struct kfree_rcu_cpu *krcp;
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003368 bool success;
Joel Fernandes (Google)446044e2020-05-25 23:47:48 +02003369 void *ptr;
Byungchul Parka35d1692019-08-05 18:22:27 -04003370
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003371 if (head) {
3372 ptr = (void *) head - (unsigned long) func;
3373 } else {
3374 /*
3375 * Please note there is a limitation for the head-less
3376 * variant, that is why there is a clear rule for such
3377 * objects: it can be used from might_sleep() context
3378 * only. For other places please embed an rcu_head to
3379 * your data.
3380 */
3381 might_sleep();
3382 ptr = (unsigned long *) func;
3383 }
3384
Uladzislau Rezki (Sony)952371d2020-05-25 23:47:50 +02003385 krcp = krc_this_cpu_lock(&flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003386
3387 // Queue the object but don't yet schedule the batch.
Joel Fernandes (Google)446044e2020-05-25 23:47:48 +02003388 if (debug_rcu_head_queue(ptr)) {
Joel Fernandes (Google)e99637b2019-09-22 13:03:17 -07003389 // Probable double kfree_rcu(), just leak.
3390 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3391 __func__, head);
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003392
3393 // Mark as success and leave.
3394 success = true;
Joel Fernandes (Google)e99637b2019-09-22 13:03:17 -07003395 goto unlock_return;
3396 }
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003397
3398 /*
3399 * Under high memory pressure GFP_NOWAIT can fail,
3400 * in that case the emergency path is maintained.
3401 */
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003402 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3403 if (!success) {
3404 if (head == NULL)
3405 // Inline if kvfree_rcu(one_arg) call.
3406 goto unlock_return;
3407
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003408 head->func = func;
3409 head->next = krcp->head;
3410 krcp->head = head;
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003411 success = true;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01003412 }
Byungchul Parka35d1692019-08-05 18:22:27 -04003413
Joel Fernandes (Google)a6a82ce2020-03-16 12:32:28 -04003414 WRITE_ONCE(krcp->count, krcp->count + 1);
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003415
Byungchul Parka35d1692019-08-05 18:22:27 -04003416 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3417 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
Joel Fernandes569d7672019-09-22 10:49:57 -07003418 !krcp->monitor_todo) {
3419 krcp->monitor_todo = true;
Byungchul Parka35d1692019-08-05 18:22:27 -04003420 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
Joel Fernandes569d7672019-09-22 10:49:57 -07003421 }
Byungchul Parka35d1692019-08-05 18:22:27 -04003422
Joel Fernandes (Google)e99637b2019-09-22 13:03:17 -07003423unlock_return:
Uladzislau Rezki (Sony)952371d2020-05-25 23:47:50 +02003424 krc_this_cpu_unlock(krcp, flags);
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +02003425
3426 /*
3427 * Inline kvfree() after synchronize_rcu(). We can do
3428 * it from might_sleep() context only, so the current
3429 * CPU can pass the QS state.
3430 */
3431 if (!success) {
3432 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3433 synchronize_rcu();
3434 kvfree(ptr);
3435 }
Andreea-Cristina Bernat495aa962014-03-18 20:48:48 +02003436}
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +02003437EXPORT_SYMBOL_GPL(kvfree_call_rcu);
Andreea-Cristina Bernat495aa962014-03-18 20:48:48 +02003438
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003439static unsigned long
3440kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3441{
3442 int cpu;
Joel Fernandes (Google)a6a82ce2020-03-16 12:32:28 -04003443 unsigned long count = 0;
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003444
3445 /* Snapshot count of all CPUs */
3446 for_each_online_cpu(cpu) {
3447 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3448
Joel Fernandes (Google)a6a82ce2020-03-16 12:32:28 -04003449 count += READ_ONCE(krcp->count);
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003450 }
3451
3452 return count;
3453}
3454
3455static unsigned long
3456kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3457{
3458 int cpu, freed = 0;
3459 unsigned long flags;
3460
3461 for_each_online_cpu(cpu) {
3462 int count;
3463 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3464
3465 count = krcp->count;
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003466 raw_spin_lock_irqsave(&krcp->lock, flags);
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003467 if (krcp->monitor_todo)
3468 kfree_rcu_drain_unlock(krcp, flags);
3469 else
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003470 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003471
3472 sc->nr_to_scan -= count;
3473 freed += count;
3474
3475 if (sc->nr_to_scan <= 0)
3476 break;
3477 }
3478
Peter Enderborgc6dfd722020-06-04 12:23:20 +02003479 return freed == 0 ? SHRINK_STOP : freed;
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04003480}
3481
3482static struct shrinker kfree_rcu_shrinker = {
3483 .count_objects = kfree_rcu_shrink_count,
3484 .scan_objects = kfree_rcu_shrink_scan,
3485 .batch = 0,
3486 .seeks = DEFAULT_SEEKS,
3487};
3488
Byungchul Parka35d1692019-08-05 18:22:27 -04003489void __init kfree_rcu_scheduler_running(void)
3490{
3491 int cpu;
3492 unsigned long flags;
3493
3494 for_each_online_cpu(cpu) {
3495 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3496
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003497 raw_spin_lock_irqsave(&krcp->lock, flags);
Joel Fernandes569d7672019-09-22 10:49:57 -07003498 if (!krcp->head || krcp->monitor_todo) {
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003499 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003500 continue;
3501 }
Joel Fernandes569d7672019-09-22 10:49:57 -07003502 krcp->monitor_todo = true;
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07003503 schedule_delayed_work_on(cpu, &krcp->monitor_work,
3504 KFREE_DRAIN_JIFFIES);
Joel Fernandes (Google)8ac88f72020-05-25 23:47:45 +02003505 raw_spin_unlock_irqrestore(&krcp->lock, flags);
Byungchul Parka35d1692019-08-05 18:22:27 -04003506 }
3507}
3508
Paul E. McKenneye5bc3af2018-11-29 10:42:06 -08003509/*
3510 * During early boot, any blocking grace-period wait automatically
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02003511 * implies a grace period. Later on, this is never the case for PREEMPTION.
Paul E. McKenneye5bc3af2018-11-29 10:42:06 -08003512 *
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02003513 * Howevr, because a context switch is a grace period for !PREEMPTION, any
Paul E. McKenneye5bc3af2018-11-29 10:42:06 -08003514 * blocking grace-period wait automatically implies a grace period if
3515 * there is only one CPU online at any point time during execution of
3516 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3517 * occasionally incorrectly indicate that there are multiple CPUs online
3518 * when there was in fact only one the whole time, as this just adds some
3519 * overhead: RCU still operates correctly.
3520 */
3521static int rcu_blocking_is_gp(void)
3522{
3523 int ret;
3524
Thomas Gleixner01b1d882019-07-26 23:19:38 +02003525 if (IS_ENABLED(CONFIG_PREEMPTION))
Paul E. McKenneye5bc3af2018-11-29 10:42:06 -08003526 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3527 might_sleep(); /* Check for RCU read-side critical section. */
3528 preempt_disable();
3529 ret = num_online_cpus() <= 1;
3530 preempt_enable();
3531 return ret;
3532}
3533
3534/**
3535 * synchronize_rcu - wait until a grace period has elapsed.
3536 *
3537 * Control will return to the caller some time after a full grace
3538 * period has elapsed, in other words after all currently executing RCU
3539 * read-side critical sections have completed. Note, however, that
3540 * upon return from synchronize_rcu(), the caller might well be executing
3541 * concurrently with new RCU read-side critical sections that began while
3542 * synchronize_rcu() was waiting. RCU read-side critical sections are
3543 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3544 * In addition, regions of code across which interrupts, preemption, or
3545 * softirqs have been disabled also serve as RCU read-side critical
3546 * sections. This includes hardware interrupt handlers, softirq handlers,
3547 * and NMI handlers.
3548 *
3549 * Note that this guarantee implies further memory-ordering guarantees.
3550 * On systems with more than one CPU, when synchronize_rcu() returns,
3551 * each CPU is guaranteed to have executed a full memory barrier since
3552 * the end of its last RCU read-side critical section whose beginning
3553 * preceded the call to synchronize_rcu(). In addition, each CPU having
3554 * an RCU read-side critical section that extends beyond the return from
3555 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3556 * after the beginning of synchronize_rcu() and before the beginning of
3557 * that RCU read-side critical section. Note that these guarantees include
3558 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3559 * that are executing in the kernel.
3560 *
3561 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3562 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3563 * to have executed a full memory barrier during the execution of
3564 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3565 * again only if the system has more than one CPU).
3566 */
3567void synchronize_rcu(void)
3568{
3569 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3570 lock_is_held(&rcu_lock_map) ||
3571 lock_is_held(&rcu_sched_lock_map),
3572 "Illegal synchronize_rcu() in RCU read-side critical section");
3573 if (rcu_blocking_is_gp())
3574 return;
3575 if (rcu_gp_is_expedited())
3576 synchronize_rcu_expedited();
3577 else
3578 wait_rcu_gp(call_rcu);
3579}
3580EXPORT_SYMBOL_GPL(synchronize_rcu);
3581
Paul E. McKenney765a3f42014-03-14 16:37:08 -07003582/**
3583 * get_state_synchronize_rcu - Snapshot current RCU state
3584 *
3585 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3586 * to determine whether or not a full grace period has elapsed in the
3587 * meantime.
3588 */
3589unsigned long get_state_synchronize_rcu(void)
3590{
3591 /*
3592 * Any prior manipulation of RCU-protected data must happen
Paul E. McKenneye4be81a2018-04-27 15:16:50 -07003593 * before the load from ->gp_seq.
Paul E. McKenney765a3f42014-03-14 16:37:08 -07003594 */
3595 smp_mb(); /* ^^^ */
Paul E. McKenney16fc9c62018-07-03 15:54:39 -07003596 return rcu_seq_snap(&rcu_state.gp_seq);
Paul E. McKenney765a3f42014-03-14 16:37:08 -07003597}
3598EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3599
3600/**
3601 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3602 *
3603 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3604 *
3605 * If a full RCU grace period has elapsed since the earlier call to
3606 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3607 * synchronize_rcu() to wait for a full grace period.
3608 *
3609 * Yes, this function does not take counter wrap into account. But
3610 * counter wrap is harmless. If the counter wraps, we have waited for
3611 * more than 2 billion grace periods (and way more on a 64-bit system!),
3612 * so waiting for one additional grace period should be just fine.
3613 */
3614void cond_synchronize_rcu(unsigned long oldstate)
3615{
Paul E. McKenney16fc9c62018-07-03 15:54:39 -07003616 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
Paul E. McKenney765a3f42014-03-14 16:37:08 -07003617 synchronize_rcu();
Paul E. McKenneye4be81a2018-04-27 15:16:50 -07003618 else
3619 smp_mb(); /* Ensure GP ends before subsequent accesses. */
Paul E. McKenney765a3f42014-03-14 16:37:08 -07003620}
3621EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3622
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003623/*
Paul E. McKenney98ece502018-07-03 17:22:34 -07003624 * Check to see if there is any immediate RCU-related work to be done by
Paul E. McKenney49918a52018-07-07 18:12:26 -07003625 * the current CPU, returning 1 if so and zero otherwise. The checks are
3626 * in order of increasing expense: checks that can be carried out against
3627 * CPU-local state are performed first. However, we must check for CPU
3628 * stalls first, else we might not get a chance.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003629 */
Paul E. McKenneydd7dafd2019-09-14 03:39:22 -07003630static int rcu_pending(int user)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003631{
Paul E. McKenneyed93dfc2019-09-13 14:09:56 -07003632 bool gp_in_progress;
Paul E. McKenney98ece502018-07-03 17:22:34 -07003633 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney2f51f982009-11-13 19:51:39 -08003634 struct rcu_node *rnp = rdp->mynode;
3635
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003636 /* Check for CPU stalls, if enabled. */
Paul E. McKenneyea12ff22018-07-03 17:22:34 -07003637 check_cpu_stall(rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003638
Paul E. McKenney85f69b32019-04-16 14:48:28 -07003639 /* Does this CPU need a deferred NOCB wakeup? */
3640 if (rcu_nocb_need_deferred_wakeup(rdp))
3641 return 1;
3642
Paul E. McKenneydd7dafd2019-09-14 03:39:22 -07003643 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3644 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
Paul E. McKenneya0969322013-11-08 09:03:10 -08003645 return 0;
3646
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003647 /* Is the RCU core waiting for a quiescent state from this CPU? */
Paul E. McKenneyed93dfc2019-09-13 14:09:56 -07003648 gp_in_progress = rcu_gp_in_progress();
3649 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003650 return 1;
3651
3652 /* Does this CPU have callbacks ready to invoke? */
Paul E. McKenney01c495f2018-01-10 12:36:00 -08003653 if (rcu_segcblist_ready_cbs(&rdp->cblist))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003654 return 1;
3655
3656 /* Has RCU gone idle with this CPU needing another grace period? */
Paul E. McKenneyed93dfc2019-09-13 14:09:56 -07003657 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
Paul E. McKenney921bb5f2019-05-21 13:53:28 -07003658 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3659 !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
Paul E. McKenneyc1935202018-04-12 16:29:13 -07003660 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003661 return 1;
3662
Paul E. McKenney67e14c12018-04-27 16:01:46 -07003663 /* Have RCU grace period completed or started? */
3664 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
Paul E. McKenney01c495f2018-01-10 12:36:00 -08003665 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003666 return 1;
3667
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003668 /* nothing to do */
3669 return 0;
3670}
3671
3672/*
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003673 * Helper function for rcu_barrier() tracing. If tracing is disabled,
Paul E. McKenneya83eff02012-05-23 18:47:05 -07003674 * the compiler is expected to optimize this away.
3675 */
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003676static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
Paul E. McKenneya83eff02012-05-23 18:47:05 -07003677{
Paul E. McKenney8344b872018-07-03 17:22:34 -07003678 trace_rcu_barrier(rcu_state.name, s, cpu,
3679 atomic_read(&rcu_state.barrier_cpu_count), done);
Paul E. McKenneya83eff02012-05-23 18:47:05 -07003680}
3681
3682/*
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003683 * RCU callback function for rcu_barrier(). If we are last, wake
3684 * up the task executing rcu_barrier().
Paul E. McKenneyaa24f932020-01-20 15:43:45 -08003685 *
3686 * Note that the value of rcu_state.barrier_sequence must be captured
3687 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3688 * other CPUs might count the value down to zero before this CPU gets
3689 * around to invoking rcu_barrier_trace(), which might result in bogus
3690 * data from the next instance of rcu_barrier().
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003691 */
Paul E. McKenney24ebbca2012-05-29 00:34:56 -07003692static void rcu_barrier_callback(struct rcu_head *rhp)
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003693{
Paul E. McKenneyaa24f932020-01-20 15:43:45 -08003694 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3695
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003696 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
Paul E. McKenneyaa24f932020-01-20 15:43:45 -08003697 rcu_barrier_trace(TPS("LastCB"), -1, s);
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003698 complete(&rcu_state.barrier_completion);
Paul E. McKenneya83eff02012-05-23 18:47:05 -07003699 } else {
Paul E. McKenneyaa24f932020-01-20 15:43:45 -08003700 rcu_barrier_trace(TPS("CB"), -1, s);
Paul E. McKenneya83eff02012-05-23 18:47:05 -07003701 }
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003702}
3703
3704/*
3705 * Called with preemption disabled, and from cross-cpu IRQ context.
3706 */
Paul E. McKenney127e2982020-02-11 06:17:33 -08003707static void rcu_barrier_func(void *cpu_in)
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003708{
Paul E. McKenney127e2982020-02-11 06:17:33 -08003709 uintptr_t cpu = (uintptr_t)cpu_in;
3710 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003711
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003712 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
Paul E. McKenneyf92c7342017-04-10 15:40:35 -07003713 rdp->barrier_head.func = rcu_barrier_callback;
3714 debug_rcu_head_queue(&rdp->barrier_head);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07003715 rcu_nocb_lock(rdp);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07003716 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04003717 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003718 atomic_inc(&rcu_state.barrier_cpu_count);
Paul E. McKenneyf92c7342017-04-10 15:40:35 -07003719 } else {
3720 debug_rcu_head_unqueue(&rdp->barrier_head);
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003721 rcu_barrier_trace(TPS("IRQNQ"), -1,
Paul E. McKenney66e4c332019-08-12 16:14:00 -07003722 rcu_state.barrier_sequence);
Paul E. McKenneyf92c7342017-04-10 15:40:35 -07003723 }
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07003724 rcu_nocb_unlock(rdp);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003725}
3726
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003727/**
3728 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3729 *
3730 * Note that this primitive does not necessarily wait for an RCU grace period
3731 * to complete. For example, if there are no RCU callbacks queued anywhere
3732 * in the system, then rcu_barrier() is within its rights to return
3733 * immediately, without waiting for anything, much less an RCU grace period.
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003734 */
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003735void rcu_barrier(void)
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003736{
Paul E. McKenney127e2982020-02-11 06:17:33 -08003737 uintptr_t cpu;
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003738 struct rcu_data *rdp;
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003739 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003740
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003741 rcu_barrier_trace(TPS("Begin"), -1, s);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003742
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07003743 /* Take mutex to serialize concurrent rcu_barrier() requests. */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003744 mutex_lock(&rcu_state.barrier_mutex);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003745
Paul E. McKenney4f525a52015-06-26 11:20:00 -07003746 /* Did someone else do our work for us? */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003747 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003748 rcu_barrier_trace(TPS("EarlyExit"), -1,
Paul E. McKenney66e4c332019-08-12 16:14:00 -07003749 rcu_state.barrier_sequence);
Paul E. McKenneycf3a9c42012-05-29 14:56:46 -07003750 smp_mb(); /* caller's subsequent code after above check. */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003751 mutex_unlock(&rcu_state.barrier_mutex);
Paul E. McKenneycf3a9c42012-05-29 14:56:46 -07003752 return;
3753 }
3754
Paul E. McKenney4f525a52015-06-26 11:20:00 -07003755 /* Mark the start of the barrier operation. */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003756 rcu_seq_start(&rcu_state.barrier_sequence);
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003757 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003758
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003759 /*
Paul E. McKenney127e2982020-02-11 06:17:33 -08003760 * Initialize the count to two rather than to zero in order
3761 * to avoid a too-soon return to zero in case of an immediate
3762 * invocation of the just-enqueued callback (or preemption of
3763 * this task). Exclude CPU-hotplug operations to ensure that no
3764 * offline non-offloaded CPU has callbacks queued.
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003765 */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003766 init_completion(&rcu_state.barrier_completion);
Paul E. McKenney127e2982020-02-11 06:17:33 -08003767 atomic_set(&rcu_state.barrier_cpu_count, 2);
Paul E. McKenney1331e7a2012-08-02 17:43:50 -07003768 get_online_cpus();
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003769
3770 /*
Paul E. McKenney1331e7a2012-08-02 17:43:50 -07003771 * Force each CPU with callbacks to register a new callback.
3772 * When that callback is invoked, we will know that all of the
3773 * corresponding CPU's preceding callbacks have been invoked.
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003774 */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07003775 for_each_possible_cpu(cpu) {
Paul E. McKenneyda1df502018-07-03 15:37:16 -07003776 rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenney127e2982020-02-11 06:17:33 -08003777 if (cpu_is_offline(cpu) &&
Paul E. McKenneyce5215c2019-04-12 15:58:34 -07003778 !rcu_segcblist_is_offloaded(&rdp->cblist))
3779 continue;
Paul E. McKenney127e2982020-02-11 06:17:33 -08003780 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003781 rcu_barrier_trace(TPS("OnlineQ"), cpu,
Paul E. McKenney66e4c332019-08-12 16:14:00 -07003782 rcu_state.barrier_sequence);
Paul E. McKenney127e2982020-02-11 06:17:33 -08003783 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3784 } else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3785 cpu_is_offline(cpu)) {
3786 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3787 rcu_state.barrier_sequence);
3788 local_irq_disable();
3789 rcu_barrier_func((void *)cpu);
3790 local_irq_enable();
3791 } else if (cpu_is_offline(cpu)) {
3792 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3793 rcu_state.barrier_sequence);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003794 } else {
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003795 rcu_barrier_trace(TPS("OnlineNQ"), cpu,
Paul E. McKenney66e4c332019-08-12 16:14:00 -07003796 rcu_state.barrier_sequence);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003797 }
3798 }
Paul E. McKenney1331e7a2012-08-02 17:43:50 -07003799 put_online_cpus();
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003800
3801 /*
3802 * Now that we have an rcu_barrier_callback() callback on each
3803 * CPU, and thus each counted, remove the initial count.
3804 */
Paul E. McKenney127e2982020-02-11 06:17:33 -08003805 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003806 complete(&rcu_state.barrier_completion);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003807
3808 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003809 wait_for_completion(&rcu_state.barrier_completion);
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003810
Paul E. McKenney4f525a52015-06-26 11:20:00 -07003811 /* Mark the end of the barrier operation. */
Paul E. McKenneydd46a782018-07-10 18:37:30 -07003812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003813 rcu_seq_end(&rcu_state.barrier_sequence);
Paul E. McKenney4f525a52015-06-26 11:20:00 -07003814
Paul E. McKenneyb1420f12012-03-01 13:18:08 -08003815 /* Other rcu_barrier() invocations can now safely proceed. */
Paul E. McKenneyec9f5832018-07-05 16:26:12 -07003816 mutex_unlock(&rcu_state.barrier_mutex);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003817}
Paul E. McKenney45975c72018-07-02 14:30:37 -07003818EXPORT_SYMBOL_GPL(rcu_barrier);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07003819
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003820/*
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003821 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3822 * first CPU in a given leaf rcu_node structure coming online. The caller
3823 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3824 * disabled.
3825 */
3826static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3827{
3828 long mask;
Paul E. McKenney8d672fa2018-05-02 14:46:43 -07003829 long oldmask;
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003830 struct rcu_node *rnp = rnp_leaf;
3831
Paul E. McKenney8d672fa2018-05-02 14:46:43 -07003832 raw_lockdep_assert_held_rcu_node(rnp_leaf);
Paul E. McKenney962aff02018-05-02 12:49:21 -07003833 WARN_ON_ONCE(rnp->wait_blkd_tasks);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003834 for (;;) {
3835 mask = rnp->grpmask;
3836 rnp = rnp->parent;
3837 if (rnp == NULL)
3838 return;
Paul E. McKenney6cf10082015-10-08 15:36:54 -07003839 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
Paul E. McKenney8d672fa2018-05-02 14:46:43 -07003840 oldmask = rnp->qsmaskinit;
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003841 rnp->qsmaskinit |= mask;
Boqun Feng67c583a72015-12-29 12:18:47 +08003842 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
Paul E. McKenney8d672fa2018-05-02 14:46:43 -07003843 if (oldmask)
3844 return;
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003845 }
3846}
3847
3848/*
Paul E. McKenney27569622009-08-15 09:53:46 -07003849 * Do boot-time initialization of a CPU's per-CPU RCU data.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003850 */
Paul E. McKenney27569622009-08-15 09:53:46 -07003851static void __init
Paul E. McKenney53b46302018-07-03 17:22:34 -07003852rcu_boot_init_percpu_data(int cpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003853{
Paul E. McKenneyda1df502018-07-03 15:37:16 -07003854 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenney27569622009-08-15 09:53:46 -07003855
3856 /* Set up local state, ensuring consistent view of global state. */
Mark Rutlandbc75e992016-06-03 15:20:04 +01003857 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
Paul E. McKenney4c5273b2018-08-03 21:00:38 -07003858 WARN_ON_ONCE(rdp->dynticks_nesting != 1);
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -07003859 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
Paul E. McKenney53b46302018-07-03 17:22:34 -07003860 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
Paul E. McKenney57738942018-05-08 14:18:57 -07003861 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
Paul E. McKenney53b46302018-07-03 17:22:34 -07003862 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
Paul E. McKenney57738942018-05-08 14:18:57 -07003863 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
Paul E. McKenney27569622009-08-15 09:53:46 -07003864 rdp->cpu = cpu;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07003865 rcu_boot_init_nocb_percpu_data(rdp);
Paul E. McKenney27569622009-08-15 09:53:46 -07003866}
3867
3868/*
Paul E. McKenney53b46302018-07-03 17:22:34 -07003869 * Invoked early in the CPU-online process, when pretty much all services
3870 * are available. The incoming CPU is not present.
3871 *
3872 * Initializes a CPU's per-CPU RCU data. Note that only one online or
Paul E. McKenneyff3bb6f2018-05-01 14:34:08 -07003873 * offline event can be happening at a given time. Note also that we can
3874 * accept some slop in the rsp->gp_seq access due to the fact that this
Paul E. McKenneye83e73f2019-05-14 09:50:49 -07003875 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3876 * And any offloaded callbacks are being numbered elsewhere.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003877 */
Paul E. McKenney53b46302018-07-03 17:22:34 -07003878int rcutree_prepare_cpu(unsigned int cpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003879{
3880 unsigned long flags;
Paul E. McKenneyda1df502018-07-03 15:37:16 -07003881 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenney336a4f62018-07-03 17:22:34 -07003882 struct rcu_node *rnp = rcu_get_root();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003883
3884 /* Set up local state, ensuring consistent view of global state. */
Paul E. McKenney6cf10082015-10-08 15:36:54 -07003885 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney37c72e52009-10-14 10:15:55 -07003886 rdp->qlen_last_fqs_check = 0;
Paul E. McKenney53b46302018-07-03 17:22:34 -07003887 rdp->n_force_qs_snap = rcu_state.n_force_qs;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003888 rdp->blimit = blimit;
Paul E. McKenney15fecf82017-02-08 12:36:42 -08003889 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
Paul E. McKenneye83e73f2019-05-14 09:50:49 -07003890 !rcu_segcblist_is_offloaded(&rdp->cblist))
Paul E. McKenney15fecf82017-02-08 12:36:42 -08003891 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
Paul E. McKenney4c5273b2018-08-03 21:00:38 -07003892 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
Paul E. McKenney2625d462016-11-02 14:23:30 -07003893 rcu_dynticks_eqs_online();
Boqun Feng67c583a72015-12-29 12:18:47 +08003894 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003895
Paul E. McKenney0aa04b02015-01-23 21:52:37 -08003896 /*
3897 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3898 * propagation up the rcu_node tree will happen at the beginning
3899 * of the next grace period.
3900 */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01003901 rnp = rdp->mynode;
Peter Zijlstra2a67e742015-10-08 12:24:23 +02003902 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
Paul E. McKenneyb9585e92015-07-31 16:04:45 -07003903 rdp->beenonline = true; /* We have now been online. */
Paul E. McKenney8ff372902020-01-04 11:33:17 -08003904 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
3905 rdp->gp_seq_needed = rdp->gp_seq;
Paul E. McKenney5b74c452015-08-06 15:16:57 -07003906 rdp->cpu_no_qs.b.norm = true;
Paul E. McKenney97c668b2015-08-06 11:31:51 -07003907 rdp->core_needs_qs = false;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003908 rdp->rcu_iw_pending = false;
Paul E. McKenney8ff372902020-01-04 11:33:17 -08003909 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
Paul E. McKenney53b46302018-07-03 17:22:34 -07003910 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
Boqun Feng67c583a72015-12-29 12:18:47 +08003911 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Thomas Gleixner4df83742016-07-13 17:17:03 +00003912 rcu_prepare_kthreads(cpu);
Paul E. McKenneyad368d12018-11-27 13:55:53 -08003913 rcu_spawn_cpu_nocb_kthread(cpu);
Thomas Gleixner4df83742016-07-13 17:17:03 +00003914
3915 return 0;
3916}
3917
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07003918/*
3919 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3920 */
Thomas Gleixner4df83742016-07-13 17:17:03 +00003921static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3922{
Paul E. McKenneyda1df502018-07-03 15:37:16 -07003923 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Thomas Gleixner4df83742016-07-13 17:17:03 +00003924
3925 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3926}
3927
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07003928/*
3929 * Near the end of the CPU-online process. Pretty much all services
3930 * enabled, and the CPU is now very much alive.
3931 */
Thomas Gleixner4df83742016-07-13 17:17:03 +00003932int rcutree_online_cpu(unsigned int cpu)
3933{
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003934 unsigned long flags;
3935 struct rcu_data *rdp;
3936 struct rcu_node *rnp;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003937
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07003938 rdp = per_cpu_ptr(&rcu_data, cpu);
3939 rnp = rdp->mynode;
3940 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3941 rnp->ffmask |= rdp->grpmask;
3942 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003943 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3944 return 0; /* Too early in boot for scheduler work. */
3945 sync_sched_exp_online_cleanup(cpu);
3946 rcutree_affinity_setting(cpu, -1);
Paul E. McKenney96926682019-08-02 15:12:47 -07003947
3948 // Stop-machine done, so allow nohz_full to disable tick.
3949 tick_dep_clear(TICK_DEP_BIT_RCU);
Thomas Gleixner4df83742016-07-13 17:17:03 +00003950 return 0;
3951}
3952
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07003953/*
3954 * Near the beginning of the process. The CPU is still very much alive
3955 * with pretty much all services enabled.
3956 */
Thomas Gleixner4df83742016-07-13 17:17:03 +00003957int rcutree_offline_cpu(unsigned int cpu)
3958{
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003959 unsigned long flags;
3960 struct rcu_data *rdp;
3961 struct rcu_node *rnp;
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003962
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07003963 rdp = per_cpu_ptr(&rcu_data, cpu);
3964 rnp = rdp->mynode;
3965 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3966 rnp->ffmask &= ~rdp->grpmask;
3967 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07003968
Thomas Gleixner4df83742016-07-13 17:17:03 +00003969 rcutree_affinity_setting(cpu, cpu);
Paul E. McKenney96926682019-08-02 15:12:47 -07003970
3971 // nohz_full CPUs need the tick for stop-machine to work quickly
3972 tick_dep_set(TICK_DEP_BIT_RCU);
Thomas Gleixner4df83742016-07-13 17:17:03 +00003973 return 0;
3974}
3975
Peter Zijlstraf64c6012018-05-22 09:50:53 -07003976static DEFINE_PER_CPU(int, rcu_cpu_started);
3977
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07003978/*
3979 * Mark the specified CPU as being online so that subsequent grace periods
3980 * (both expedited and normal) will wait on it. Note that this means that
3981 * incoming CPUs are not allowed to use RCU read-side critical sections
3982 * until this function is called. Failing to observe this restriction
3983 * will result in lockdep splats.
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07003984 *
3985 * Note that this function is special in that it is invoked directly
3986 * from the incoming CPU rather than from the cpuhp_step mechanism.
3987 * This is because this function must be invoked at a precise location.
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07003988 */
3989void rcu_cpu_starting(unsigned int cpu)
3990{
3991 unsigned long flags;
3992 unsigned long mask;
3993 struct rcu_data *rdp;
3994 struct rcu_node *rnp;
Wei Yangabfce042020-04-19 21:57:15 +00003995 bool newcpu;
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07003996
Peter Zijlstraf64c6012018-05-22 09:50:53 -07003997 if (per_cpu(rcu_cpu_started, cpu))
3998 return;
3999
4000 per_cpu(rcu_cpu_started, cpu) = 1;
4001
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004002 rdp = per_cpu_ptr(&rcu_data, cpu);
4003 rnp = rdp->mynode;
4004 mask = rdp->grpmask;
4005 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney105abf82020-01-03 15:44:23 -08004006 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
Wei Yangabfce042020-04-19 21:57:15 +00004007 newcpu = !(rnp->expmaskinitnext & mask);
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004008 rnp->expmaskinitnext |= mask;
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004009 /* Allow lockless access for expedited grace periods. */
Wei Yangabfce042020-04-19 21:57:15 +00004010 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
Paul E. McKenney2f084692020-02-10 05:29:58 -08004011 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004012 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004013 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4014 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004015 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
Joel Fernandes (Google)516e5ae2019-09-05 10:26:41 -07004016 rcu_disable_urgency_upon_qs(rdp);
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004017 /* Report QS -after- changing ->qsmaskinitnext! */
4018 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4019 } else {
4020 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07004021 }
Paul E. McKenney313517fc2017-06-08 16:55:40 -07004022 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07004023}
4024
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004025#ifdef CONFIG_HOTPLUG_CPU
4026/*
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07004027 * The outgoing function has no further need of RCU, so remove it from
Paul E. McKenney53b46302018-07-03 17:22:34 -07004028 * the rcu_node tree's ->qsmaskinitnext bit masks.
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07004029 *
4030 * Note that this function is special in that it is invoked directly
4031 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4032 * This is because this function must be invoked at a precise location.
4033 */
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004034void rcu_report_dead(unsigned int cpu)
4035{
Paul E. McKenney53b46302018-07-03 17:22:34 -07004036 unsigned long flags;
4037 unsigned long mask;
4038 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4039 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004040
Paul E. McKenney49918a52018-07-07 18:12:26 -07004041 /* QS for any half-done expedited grace period. */
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004042 preempt_disable();
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -07004043 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004044 preempt_enable();
Paul E. McKenney3e310092018-06-21 12:50:01 -07004045 rcu_preempt_deferred_qs(current);
Paul E. McKenney53b46302018-07-03 17:22:34 -07004046
4047 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4048 mask = rdp->grpmask;
Mike Galbraith894d45b2018-08-15 09:05:29 -07004049 raw_spin_lock(&rcu_state.ofl_lock);
Paul E. McKenney53b46302018-07-03 17:22:34 -07004050 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4051 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4052 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4053 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4054 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4055 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4056 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4057 }
Paul E. McKenney105abf82020-01-03 15:44:23 -08004058 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
Paul E. McKenney53b46302018-07-03 17:22:34 -07004059 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Mike Galbraith894d45b2018-08-15 09:05:29 -07004060 raw_spin_unlock(&rcu_state.ofl_lock);
Peter Zijlstraf64c6012018-05-22 09:50:53 -07004061
4062 per_cpu(rcu_cpu_started, cpu) = 0;
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004063}
Paul E. McKenneya58163d2017-06-20 12:11:34 -07004064
Paul E. McKenney53b46302018-07-03 17:22:34 -07004065/*
4066 * The outgoing CPU has just passed through the dying-idle state, and we
4067 * are being invoked from the CPU that was IPIed to continue the offline
4068 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4069 */
4070void rcutree_migrate_callbacks(int cpu)
Paul E. McKenneya58163d2017-06-20 12:11:34 -07004071{
4072 unsigned long flags;
Paul E. McKenneyb1a2d792017-06-26 12:23:46 -07004073 struct rcu_data *my_rdp;
Paul E. McKenneyc00045be2019-04-16 14:09:15 -07004074 struct rcu_node *my_rnp;
Paul E. McKenneyda1df502018-07-03 15:37:16 -07004075 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenneyec4eacc2018-04-22 08:49:24 -07004076 bool needwake;
Paul E. McKenneya58163d2017-06-20 12:11:34 -07004077
Paul E. McKenneyce5215c2019-04-12 15:58:34 -07004078 if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4079 rcu_segcblist_empty(&rdp->cblist))
Paul E. McKenney95335c02017-06-26 10:49:50 -07004080 return; /* No callbacks to migrate. */
4081
Paul E. McKenneyb1a2d792017-06-26 12:23:46 -07004082 local_irq_save(flags);
Paul E. McKenneyda1df502018-07-03 15:37:16 -07004083 my_rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenneyc00045be2019-04-16 14:09:15 -07004084 my_rnp = my_rdp->mynode;
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07004085 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -07004086 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
Paul E. McKenneyc00045be2019-04-16 14:09:15 -07004087 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
Paul E. McKenneyec4eacc2018-04-22 08:49:24 -07004088 /* Leverage recent GPs and set GP for new callbacks. */
Paul E. McKenneyc00045be2019-04-16 14:09:15 -07004089 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4090 rcu_advance_cbs(my_rnp, my_rdp);
Paul E. McKenneyf2dbe4a2017-06-27 07:44:06 -07004091 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
Paul E. McKenney23651d92019-07-10 12:54:56 -07004092 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
Paul E. McKenneyc0352802019-05-21 08:28:41 -07004093 rcu_segcblist_disable(&rdp->cblist);
Paul E. McKenney09efeee2017-07-19 10:56:46 -07004094 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4095 !rcu_segcblist_n_cbs(&my_rdp->cblist));
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07004096 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4097 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4098 __call_rcu_nocb_wake(my_rdp, true, flags);
4099 } else {
4100 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4101 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4102 }
Paul E. McKenneyec4eacc2018-04-22 08:49:24 -07004103 if (needwake)
Paul E. McKenney532c00c2018-07-03 17:22:34 -07004104 rcu_gp_kthread_wake();
Paul E. McKenney5d6742b2019-05-15 09:56:40 -07004105 lockdep_assert_irqs_enabled();
Paul E. McKenneya58163d2017-06-20 12:11:34 -07004106 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4107 !rcu_segcblist_empty(&rdp->cblist),
4108 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4109 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4110 rcu_segcblist_first_cb(&rdp->cblist));
4111}
Thomas Gleixner27d50c72016-02-26 18:43:44 +00004112#endif
4113
Paul E. McKenneydeb34f32017-03-23 13:21:30 -07004114/*
4115 * On non-huge systems, use expedited RCU grace periods to make suspend
4116 * and hibernation run faster.
4117 */
Borislav Petkovd1d74d12013-04-22 00:12:42 +02004118static int rcu_pm_notify(struct notifier_block *self,
4119 unsigned long action, void *hcpu)
4120{
4121 switch (action) {
4122 case PM_HIBERNATION_PREPARE:
4123 case PM_SUSPEND_PREPARE:
Paul E. McKenneye85e6a22019-01-10 15:30:15 -08004124 rcu_expedite_gp();
Borislav Petkovd1d74d12013-04-22 00:12:42 +02004125 break;
4126 case PM_POST_HIBERNATION:
4127 case PM_POST_SUSPEND:
Paul E. McKenneye85e6a22019-01-10 15:30:15 -08004128 rcu_unexpedite_gp();
Borislav Petkovd1d74d12013-04-22 00:12:42 +02004129 break;
4130 default:
4131 break;
4132 }
4133 return NOTIFY_OK;
4134}
4135
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004136/*
Paul E. McKenney49918a52018-07-07 18:12:26 -07004137 * Spawn the kthreads that handle RCU's grace periods.
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07004138 */
4139static int __init rcu_spawn_gp_kthread(void)
4140{
4141 unsigned long flags;
Paul E. McKenneya94844b2014-12-12 07:37:48 -08004142 int kthread_prio_in = kthread_prio;
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07004143 struct rcu_node *rnp;
Paul E. McKenneya94844b2014-12-12 07:37:48 -08004144 struct sched_param sp;
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07004145 struct task_struct *t;
4146
Paul E. McKenneya94844b2014-12-12 07:37:48 -08004147 /* Force priority into range. */
Joel Fernandes (Google)c7cd1612018-06-19 15:14:17 -07004148 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4149 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4150 kthread_prio = 2;
4151 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
Paul E. McKenneya94844b2014-12-12 07:37:48 -08004152 kthread_prio = 1;
4153 else if (kthread_prio < 0)
4154 kthread_prio = 0;
4155 else if (kthread_prio > 99)
4156 kthread_prio = 99;
Joel Fernandes (Google)c7cd1612018-06-19 15:14:17 -07004157
Paul E. McKenneya94844b2014-12-12 07:37:48 -08004158 if (kthread_prio != kthread_prio_in)
4159 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4160 kthread_prio, kthread_prio_in);
4161
Paul E. McKenney9386c0b2014-07-13 12:00:53 -07004162 rcu_scheduler_fully_active = 1;
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004163 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
Paul E. McKenney08543bd2018-10-22 08:04:03 -07004164 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4165 return 0;
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004166 if (kthread_prio) {
4167 sp.sched_priority = kthread_prio;
4168 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -07004169 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004170 rnp = rcu_get_root();
Lai Jiangshan0c340292010-03-28 11:12:30 +08004171 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney5648d652020-01-21 12:30:22 -08004172 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4173 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4174 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4175 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
Paul E. McKenneyb97d23c2018-07-04 15:35:00 -07004176 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4177 wake_up_process(t);
Lai Jiangshan0c340292010-03-28 11:12:30 +08004178 rcu_spawn_nocb_kthreads();
Paul E. McKenney4a90a062010-04-14 16:48:11 -07004179 rcu_spawn_boost_kthreads();
Lai Jiangshan0c340292010-03-28 11:12:30 +08004180 return 0;
Lai Jiangshan394f99a2010-06-28 16:25:04 +08004181}
Lai Jiangshan0c340292010-03-28 11:12:30 +08004182early_initcall(rcu_spawn_gp_kthread);
4183
Paul E. McKenney6ce75a22012-06-12 11:01:13 -07004184/*
Paul E. McKenney52d7e482017-01-10 02:28:26 -08004185 * This function is invoked towards the end of the scheduler's
4186 * initialization process. Before this is called, the idle task might
4187 * contain synchronous grace-period primitives (during which time, this idle
4188 * task is booting the system, and such primitives are no-ops). After this
4189 * function is called, any synchronous grace-period primitives are run as
4190 * expedited, with the requesting task driving the grace period forward.
Paul E. McKenney900b1022017-02-10 14:32:54 -08004191 * A later core_initcall() rcu_set_runtime_mode() will switch to full
Paul E. McKenney52d7e482017-01-10 02:28:26 -08004192 * runtime RCU functionality.
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -07004193 */
4194void rcu_scheduler_starting(void)
4195{
4196 WARN_ON(num_online_cpus() != 1);
4197 WARN_ON(nr_context_switches() > 0);
Paul E. McKenney52d7e482017-01-10 02:28:26 -08004198 rcu_test_sync_prims();
4199 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4200 rcu_test_sync_prims();
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -07004201}
4202
4203/*
Paul E. McKenney49918a52018-07-07 18:12:26 -07004204 * Helper function for rcu_init() that initializes the rcu_state structure.
Paul E. McKenney4102ada2013-10-08 20:23:47 -07004205 */
Paul E. McKenneyb8bb1f62018-07-03 17:22:34 -07004206static void __init rcu_init_one(void)
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -07004207{
Alexander Gordeevcb007102015-06-03 08:18:30 +02004208 static const char * const buf[] = RCU_NODE_NAME_INIT;
4209 static const char * const fqs[] = RCU_FQS_NAME_INIT;
Paul E. McKenney3dc5dbe2015-09-26 14:51:24 -07004210 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4211 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
Alexander Gordeev199977b2015-06-03 08:18:29 +02004212
Alexander Gordeev199977b2015-06-03 08:18:29 +02004213 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
Paul E. McKenney026ad282013-04-03 22:14:11 -07004214 int cpustride = 1;
4215 int i;
4216 int j;
4217 struct rcu_node *rnp;
4218
Alexander Gordeev05b84ae2015-06-03 08:18:28 +02004219 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
Paul E. McKenney026ad282013-04-03 22:14:11 -07004220
Paul E. McKenney3eaaaf6c2015-03-09 16:51:17 -07004221 /* Silence gcc 4.8 false positive about array index out of range. */
4222 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4223 panic("rcu_init_one: rcu_num_lvls out of range");
Paul E. McKenney026ad282013-04-03 22:14:11 -07004224
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -07004225 /* Initialize the level-tracking arrays. */
Paul E. McKenneyb17c7032012-09-06 15:38:02 -07004226
Paul E. McKenney39479092013-10-09 15:20:33 -07004227 for (i = 1; i < rcu_num_lvls; i++)
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004228 rcu_state.level[i] =
4229 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
Paul E. McKenney41f5c632017-03-15 12:59:17 -07004230 rcu_init_levelspread(levelspread, num_rcu_lvl);
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -07004231
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08004232 /* Initialize the elements themselves, starting from the leaves. */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07004233
Paul E. McKenney017c4262010-01-14 16:10:58 -08004234 for (i = rcu_num_lvls - 1; i >= 0; i--) {
Alexander Gordeev199977b2015-06-03 08:18:29 +02004235 cpustride *= levelspread[i];
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004236 rnp = rcu_state.level[i];
Paul E. McKenney41f5c632017-03-15 12:59:17 -07004237 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
Boqun Feng67c583a72015-12-29 12:18:47 +08004238 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4239 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07004240 &rcu_node_class[i], buf[i]);
Jiang Fangb5b39362013-02-02 14:13:42 -08004241 raw_spin_lock_init(&rnp->fqslock);
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08004242 lockdep_set_class_and_name(&rnp->fqslock,
4243 &rcu_fqs_class[i], fqs[i]);
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004244 rnp->gp_seq = rcu_state.gp_seq;
4245 rnp->gp_seq_needed = rcu_state.gp_seq;
4246 rnp->completedqs = rcu_state.gp_seq;
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08004247 rnp->qsmask = 0;
4248 rnp->qsmaskinit = 0;
4249 rnp->grplo = j * cpustride;
Borislav Petkovd1d74d12013-04-22 00:12:42 +02004250 rnp->grphi = (j + 1) * cpustride - 1;
Paul E. McKenney017c4262010-01-14 16:10:58 -08004251 if (rnp->grphi >= nr_cpu_ids)
4252 rnp->grphi = nr_cpu_ids - 1;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004253 if (i == 0) {
4254 rnp->grpnum = 0;
Paul E. McKenney4102ada2013-10-08 20:23:47 -07004255 rnp->grpmask = 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004256 rnp->parent = NULL;
4257 } else {
Alexander Gordeev199977b2015-06-03 08:18:29 +02004258 rnp->grpnum = j % levelspread[i - 1];
Paul E. McKenneydf63fa5b2018-07-31 09:49:20 -07004259 rnp->grpmask = BIT(rnp->grpnum);
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004260 rnp->parent = rcu_state.level[i - 1] +
Alexander Gordeev199977b2015-06-03 08:18:29 +02004261 j / levelspread[i - 1];
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004262 }
4263 rnp->level = i;
4264 INIT_LIST_HEAD(&rnp->blkd_tasks);
4265 rcu_init_one_nocb(rnp);
Paul E. McKenneyf6a12f32016-01-30 17:57:35 -08004266 init_waitqueue_head(&rnp->exp_wq[0]);
4267 init_waitqueue_head(&rnp->exp_wq[1]);
Paul E. McKenney3b5f6682016-03-16 16:47:55 -07004268 init_waitqueue_head(&rnp->exp_wq[2]);
4269 init_waitqueue_head(&rnp->exp_wq[3]);
Paul E. McKenneyf6a12f32016-01-30 17:57:35 -08004270 spin_lock_init(&rnp->exp_lock);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004271 }
4272 }
4273
Paul E. McKenneyeb7a6652018-07-05 17:47:45 -07004274 init_swait_queue_head(&rcu_state.gp_wq);
4275 init_swait_queue_head(&rcu_state.expedited_wq);
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07004276 rnp = rcu_first_leaf_node();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004277 for_each_possible_cpu(i) {
4278 while (i > rnp->grphi)
4279 rnp++;
Paul E. McKenneyda1df502018-07-03 15:37:16 -07004280 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
Paul E. McKenney53b46302018-07-03 17:22:34 -07004281 rcu_boot_init_percpu_data(i);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004282 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004283}
4284
4285/*
4286 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4287 * replace the definitions in tree.h because those are needed to size
4288 * the ->node array in the rcu_state structure.
4289 */
4290static void __init rcu_init_geometry(void)
4291{
4292 ulong d;
4293 int i;
Alexander Gordeev05b84ae2015-06-03 08:18:28 +02004294 int rcu_capacity[RCU_NUM_LVLS];
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004295
4296 /*
4297 * Initialize any unspecified boot parameters.
4298 * The default values of jiffies_till_first_fqs and
4299 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4300 * value, which is a function of HZ, then adding one for each
4301 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4302 */
4303 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4304 if (jiffies_till_first_fqs == ULONG_MAX)
4305 jiffies_till_first_fqs = d;
4306 if (jiffies_till_next_fqs == ULONG_MAX)
4307 jiffies_till_next_fqs = d;
Neeraj Upadhyay69730322019-03-11 15:16:11 +05304308 adjust_jiffies_till_sched_qs();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004309
4310 /* If the compile-time values are accurate, just leave. */
Paul E. McKenney47d631a2015-04-21 09:12:13 -07004311 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004312 nr_cpu_ids == NR_CPUS)
4313 return;
Joe Perchesa7538352018-05-14 13:27:33 -07004314 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004315 rcu_fanout_leaf, nr_cpu_ids);
4316
4317 /*
Paul E. McKenneyee968ac2015-07-31 08:28:35 -07004318 * The boot-time rcu_fanout_leaf parameter must be at least two
4319 * and cannot exceed the number of bits in the rcu_node masks.
4320 * Complain and fall back to the compile-time values if this
4321 * limit is exceeded.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004322 */
Paul E. McKenneyee968ac2015-07-31 08:28:35 -07004323 if (rcu_fanout_leaf < 2 ||
Alexander Gordeev75cf15a2015-06-03 08:18:23 +02004324 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
Paul E. McKenney13bd64942015-06-04 10:06:01 -07004325 rcu_fanout_leaf = RCU_FANOUT_LEAF;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004326 WARN_ON(1);
4327 return;
4328 }
4329
Alexander Gordeev75cf15a2015-06-03 08:18:23 +02004330 /*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004331 * Compute number of nodes that can be handled an rcu_node tree
Alexander Gordeev96181382015-06-03 08:18:26 +02004332 * with the given number of levels.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004333 */
Alexander Gordeev96181382015-06-03 08:18:26 +02004334 rcu_capacity[0] = rcu_fanout_leaf;
Alexander Gordeev05b84ae2015-06-03 08:18:28 +02004335 for (i = 1; i < RCU_NUM_LVLS; i++)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004336 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4337
4338 /*
Alexander Gordeev75cf15a2015-06-03 08:18:23 +02004339 * The tree must be able to accommodate the configured number of CPUs.
Paul E. McKenneyee968ac2015-07-31 08:28:35 -07004340 * If this limit is exceeded, fall back to the compile-time values.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004341 */
Paul E. McKenneyee968ac2015-07-31 08:28:35 -07004342 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4343 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4344 WARN_ON(1);
4345 return;
4346 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004347
Alexander Gordeev679f9852015-06-03 08:18:25 +02004348 /* Calculate the number of levels in the tree. */
Alexander Gordeev96181382015-06-03 08:18:26 +02004349 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
Alexander Gordeev679f9852015-06-03 08:18:25 +02004350 }
Alexander Gordeev96181382015-06-03 08:18:26 +02004351 rcu_num_lvls = i + 1;
Alexander Gordeev679f9852015-06-03 08:18:25 +02004352
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004353 /* Calculate the number of rcu_nodes at each level of the tree. */
Alexander Gordeev679f9852015-06-03 08:18:25 +02004354 for (i = 0; i < rcu_num_lvls; i++) {
Alexander Gordeev96181382015-06-03 08:18:26 +02004355 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
Alexander Gordeev679f9852015-06-03 08:18:25 +02004356 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4357 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004358
4359 /* Calculate the total number of rcu_node structures. */
4360 rcu_num_nodes = 0;
Alexander Gordeev679f9852015-06-03 08:18:25 +02004361 for (i = 0; i < rcu_num_lvls; i++)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004362 rcu_num_nodes += num_rcu_lvl[i];
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004363}
4364
Paul E. McKenneya3dc2942015-04-20 11:40:50 -07004365/*
4366 * Dump out the structure of the rcu_node combining tree associated
Paul E. McKenney49918a52018-07-07 18:12:26 -07004367 * with the rcu_state structure.
Paul E. McKenneya3dc2942015-04-20 11:40:50 -07004368 */
Paul E. McKenneyb8bb1f62018-07-03 17:22:34 -07004369static void __init rcu_dump_rcu_node_tree(void)
Paul E. McKenneya3dc2942015-04-20 11:40:50 -07004370{
4371 int level = 0;
4372 struct rcu_node *rnp;
4373
4374 pr_info("rcu_node tree layout dump\n");
4375 pr_info(" ");
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -07004376 rcu_for_each_node_breadth_first(rnp) {
Paul E. McKenneya3dc2942015-04-20 11:40:50 -07004377 if (rnp->level != level) {
4378 pr_cont("\n");
4379 pr_info(" ");
4380 level = rnp->level;
4381 }
4382 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4383 }
4384 pr_cont("\n");
4385}
4386
Paul E. McKenneyad7c9462018-01-08 14:35:52 -08004387struct workqueue_struct *rcu_gp_wq;
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -08004388struct workqueue_struct *rcu_par_gp_wq;
Paul E. McKenneyad7c9462018-01-08 14:35:52 -08004389
Byungchul Parka35d1692019-08-05 18:22:27 -04004390static void __init kfree_rcu_batch_init(void)
4391{
4392 int cpu;
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07004393 int i;
Byungchul Parka35d1692019-08-05 18:22:27 -04004394
4395 for_each_possible_cpu(cpu) {
4396 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02004397 struct kvfree_rcu_bulk_data *bnode;
Byungchul Parka35d1692019-08-05 18:22:27 -04004398
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01004399 for (i = 0; i < KFREE_N_BATCHES; i++) {
4400 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
Joel Fernandes (Google)0392beb2019-09-19 14:58:26 -07004401 krcp->krw_arr[i].krcp = krcp;
Uladzislau Rezki (Sony)34c88172020-01-20 15:42:25 +01004402 }
4403
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02004404 for (i = 0; i < rcu_min_cached_objs; i++) {
Uladzislau Rezki (Sony)5f3c8d62020-05-25 23:47:53 +02004405 bnode = (struct kvfree_rcu_bulk_data *)
Uladzislau Rezki (Sony)53c72b52020-05-25 23:47:52 +02004406 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
4407
4408 if (bnode)
4409 put_cached_bnode(krcp, bnode);
4410 else
4411 pr_err("Failed to preallocate for %d CPU!\n", cpu);
4412 }
4413
Byungchul Parka35d1692019-08-05 18:22:27 -04004414 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4415 krcp->initialized = true;
4416 }
Joel Fernandes (Google)91542442020-03-16 12:32:27 -04004417 if (register_shrinker(&kfree_rcu_shrinker))
4418 pr_err("Failed to register kfree_rcu() shrinker!\n");
Byungchul Parka35d1692019-08-05 18:22:27 -04004419}
4420
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004421void __init rcu_init(void)
4422{
4423 int cpu;
4424
Paul E. McKenney47627672015-01-19 21:10:21 -08004425 rcu_early_boot_tests();
4426
Byungchul Parka35d1692019-08-05 18:22:27 -04004427 kfree_rcu_batch_init();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004428 rcu_bootup_announce();
4429 rcu_init_geometry();
Paul E. McKenneyb8bb1f62018-07-03 17:22:34 -07004430 rcu_init_one();
Paul E. McKenneya3dc2942015-04-20 11:40:50 -07004431 if (dump_tree)
Paul E. McKenneyb8bb1f62018-07-03 17:22:34 -07004432 rcu_dump_rcu_node_tree();
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +01004433 if (use_softirq)
4434 open_softirq(RCU_SOFTIRQ, rcu_core_si);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004435
4436 /*
4437 * We don't need protection against CPU-hotplug here because
4438 * this is called early in boot, before either interrupts
4439 * or the scheduler are operational.
4440 */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004441 pm_notifier(rcu_pm_notify, 0);
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07004442 for_each_online_cpu(cpu) {
Thomas Gleixner4df83742016-07-13 17:17:03 +00004443 rcutree_prepare_cpu(cpu);
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07004444 rcu_cpu_starting(cpu);
Paul E. McKenney9b9500d2017-08-17 17:05:59 -07004445 rcutree_online_cpu(cpu);
Paul E. McKenney7ec99de2016-06-30 13:58:26 -07004446 }
Paul E. McKenneyad7c9462018-01-08 14:35:52 -08004447
4448 /* Create workqueue for expedited GPs and for Tree SRCU. */
4449 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4450 WARN_ON(!rcu_gp_wq);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -08004451 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4452 WARN_ON(!rcu_par_gp_wq);
Paul E. McKenneye0fcba92018-08-14 08:45:54 -07004453 srcu_init();
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -07004454
4455 /* Fill in default value for rcutree.qovld boot parameter. */
4456 /* -After- the rcu_node ->lock fields are initialized! */
4457 if (qovld < 0)
4458 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4459 else
4460 qovld_calc = qovld;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004461}
4462
Paul E. McKenney10462d62019-01-11 16:10:57 -08004463#include "tree_stall.h"
Paul E. McKenney3549c2b2016-04-15 16:35:29 -07004464#include "tree_exp.h"
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01004465#include "tree_plugin.h"