blob: 486fc901bd085ff1e57a0f7d12988c8bd1c879c2 [file] [log] [blame]
Paul E. McKenney22e40922019-01-17 10:23:39 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Paul E. McKenney9f77da92009-08-22 13:56:45 -07002/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions.
5 *
Paul E. McKenney9f77da92009-08-22 13:56:45 -07006 * Copyright IBM Corporation, 2008
7 *
8 * Author: Ingo Molnar <mingo@elte.hu>
Paul E. McKenney22e40922019-01-17 10:23:39 -08009 * Paul E. McKenney <paulmck@linux.ibm.com>
Paul E. McKenney9f77da92009-08-22 13:56:45 -070010 */
11
12#include <linux/cache.h>
13#include <linux/spinlock.h>
Ingo Molnar037741a2017-02-03 10:08:30 +010014#include <linux/rtmutex.h>
Paul E. McKenney9f77da92009-08-22 13:56:45 -070015#include <linux/threads.h>
16#include <linux/cpumask.h>
17#include <linux/seqlock.h>
Paul Gortmakerabedf8e2016-02-19 09:46:41 +010018#include <linux/swait.h>
Paul E. McKenneyf2425b42017-03-14 12:42:30 -070019#include <linux/rcu_node_tree.h>
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070020
Ingo Molnar45753c52017-05-02 10:31:18 +020021#include "rcu_segcblist.h"
22
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -080023/* Communicate arguments to a workqueue handler. */
24struct rcu_exp_work {
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -080025 unsigned long rew_s;
26 struct work_struct rew_work;
27};
28
Paul E. McKenneyd71df902011-03-29 17:48:28 -070029/* RCU's kthread states for tracing. */
30#define RCU_KTHREAD_STOPPED 0
31#define RCU_KTHREAD_RUNNING 1
32#define RCU_KTHREAD_WAITING 2
Paul E. McKenney15ba0ba2011-04-06 16:01:16 -070033#define RCU_KTHREAD_OFFCPU 3
34#define RCU_KTHREAD_YIELDING 4
35#define RCU_KTHREAD_MAX 4
Paul E. McKenneyd71df902011-03-29 17:48:28 -070036
Paul E. McKenney9f77da92009-08-22 13:56:45 -070037/*
38 * Definition for node within the RCU grace-period-detection hierarchy.
39 */
40struct rcu_node {
Boqun Feng67c583a72015-12-29 12:18:47 +080041 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
42 /* some rcu_state fields as well as */
43 /* following. */
Lihao Liang360fbbb2020-05-14 21:34:34 +010044 unsigned long gp_seq; /* Track rsp->gp_seq. */
Joel Fernandes (Google)adbccdd2018-09-22 19:41:26 -040045 unsigned long gp_seq_needed; /* Track furthest future GP request. */
Paul E. McKenney4bc8d552017-11-27 15:13:56 -080046 unsigned long completedqs; /* All QSes done for this node. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070047 unsigned long qsmask; /* CPUs or groups that need to switch in */
48 /* order for current grace period to proceed.*/
Paul E. McKenney1eba8f82009-09-23 09:50:42 -070049 /* In leaf rcu_node, each bit corresponds to */
50 /* an rcu_data structure, otherwise, each */
51 /* bit corresponds to a child rcu_node */
52 /* structure. */
Paul E. McKenneyf2e2df52018-05-15 16:23:23 -070053 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070054 unsigned long qsmaskinit;
Paul E. McKenneyb9585e92015-07-31 16:04:45 -070055 /* Per-GP initial value for qsmask. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -080056 /* Initialized from ->qsmaskinitnext at the */
57 /* beginning of each grace period. */
58 unsigned long qsmaskinitnext;
Paul E. McKenney4d60b472020-10-13 12:39:23 -070059 unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -080060 /* Online CPUs for next grace period. */
Paul E. McKenneyb9585e92015-07-31 16:04:45 -070061 unsigned long expmask; /* CPUs or groups that need to check in */
62 /* to allow the current expedited GP */
63 /* to complete. */
64 unsigned long expmaskinit;
65 /* Per-GP initial values for expmask. */
66 /* Initialized from ->expmaskinitnext at the */
67 /* beginning of each expedited GP. */
68 unsigned long expmaskinitnext;
69 /* Online CPUs for next expedited GP. */
Paul E. McKenney1de6e562015-09-29 09:45:00 -070070 /* Any CPU that has ever been online will */
71 /* have its bit set. */
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -070072 unsigned long cbovldmask;
73 /* CPUs experiencing callback overload. */
Paul E. McKenney9b9500d2017-08-17 17:05:59 -070074 unsigned long ffmask; /* Fully functional CPUs. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070075 unsigned long grpmask; /* Mask to apply to parent qsmask. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -070076 /* Only one bit will be set in this mask. */
Wei Yanga2dae432020-06-12 10:07:53 +080077 int grplo; /* lowest-numbered CPU here. */
78 int grphi; /* highest-numbered CPU here. */
Wei Yang7a0c2b02020-06-12 10:07:54 +080079 u8 grpnum; /* group number for next level up. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070080 u8 level; /* root is at level 0. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -080081 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
82 /* exit RCU read-side critical sections */
83 /* before propagating offline up the */
84 /* rcu_node tree? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070085 struct rcu_node *parent;
Paul E. McKenney12f5f522010-11-29 21:56:39 -080086 struct list_head blkd_tasks;
87 /* Tasks blocked in RCU read-side critical */
88 /* section. Tasks are placed at the head */
89 /* of this list and age towards the tail. */
90 struct list_head *gp_tasks;
91 /* Pointer to the first task blocking the */
92 /* current grace period, or NULL if there */
93 /* is no such task. */
94 struct list_head *exp_tasks;
95 /* Pointer to the first task blocking the */
96 /* current expedited grace period, or NULL */
97 /* if there is no such task. If there */
98 /* is no current expedited grace period, */
99 /* then there can cannot be any such task. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800100 struct list_head *boost_tasks;
101 /* Pointer to first task that needs to be */
102 /* priority boosted, or NULL if no priority */
103 /* boosting is needed for this rcu_node */
104 /* structure. If there are no tasks */
105 /* queued on this rcu_node structure that */
106 /* are blocking the current grace period, */
107 /* there can be no such task. */
Paul E. McKenneyabaa93d2014-06-12 13:30:25 -0700108 struct rt_mutex boost_mtx;
109 /* Used only for the priority-boosting */
110 /* side effect, not as a lock. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800111 unsigned long boost_time;
112 /* When to start boosting (jiffies). */
113 struct task_struct *boost_kthread_task;
114 /* kthread that takes care of priority */
115 /* boosting for this rcu_node structure. */
Paul E. McKenneyd71df902011-03-29 17:48:28 -0700116 unsigned int boost_kthread_status;
117 /* State of boost_kthread_task for tracing. */
Paul E. McKenney396eba62021-04-06 16:31:42 -0700118 unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800119#ifdef CONFIG_RCU_NOCB_CPU
Paul Gortmakerabedf8e2016-02-19 09:46:41 +0100120 struct swait_queue_head nocb_gp_wq[2];
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800121 /* Place for rcu_nocb_kthread() to wait GP. */
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800122#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney394f2762012-06-26 17:00:35 -0700123 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
Paul E. McKenney385b73c2015-06-24 14:20:08 -0700124
Paul E. McKenneyf6a12f32016-01-30 17:57:35 -0800125 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
126 unsigned long exp_seq_rq;
Paul E. McKenney3b5f6682016-03-16 16:47:55 -0700127 wait_queue_head_t exp_wq[4];
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800128 struct rcu_exp_work rew;
129 bool exp_need_flush; /* Need to flush workitem? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700130} ____cacheline_internodealigned_in_smp;
131
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700132/*
Mark Rutlandbc75e992016-06-03 15:20:04 +0100133 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
134 * are indexed relative to this interval rather than the global CPU ID space.
135 * This generates the bit for a CPU in node-local masks.
136 */
Paul E. McKenneydf63fa5b2018-07-31 09:49:20 -0700137#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
Mark Rutlandbc75e992016-06-03 15:20:04 +0100138
139/*
Paul E. McKenney5b74c452015-08-06 15:16:57 -0700140 * Union to allow "aggregate OR" operation on the need for a quiescent
141 * state by the normal and expedited grace periods.
142 */
143union rcu_noqs {
144 struct {
145 u8 norm;
146 u8 exp;
147 } b; /* Bits. */
148 u16 s; /* Set of bits, aggregate OR here. */
149};
150
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700151/* Per-CPU data for read-copy update. */
152struct rcu_data {
153 /* 1) quiescent-state and grace-period handling : */
Lihao Liang360fbbb2020-05-14 21:34:34 +0100154 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
Joel Fernandes (Google)adbccdd2018-09-22 19:41:26 -0400155 unsigned long gp_seq_needed; /* Track furthest future GP request. */
Paul E. McKenney5b74c452015-08-06 15:16:57 -0700156 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
Ingo Molnara616aec2021-03-22 22:29:10 -0700157 bool core_needs_qs; /* Core waits for quiescent state. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700158 bool beenonline; /* CPU online at least once. */
Paul E. McKenneyff3bb6f2018-05-01 14:34:08 -0700159 bool gpwrap; /* Possible ->gp_seq wrap. */
Paul E. McKenneyc0f97f22020-07-24 20:22:05 -0700160 bool cpu_started; /* RCU watching this onlining CPU. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700161 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
162 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
Paul E. McKenneya858af22012-01-16 13:29:10 -0800163 unsigned long ticks_this_gp; /* The number of scheduling-clock */
164 /* ticks this CPU has handled */
165 /* during and after the last grace */
166 /* period it is aware of. */
Paul E. McKenney0864f052019-04-04 12:19:25 -0700167 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
168 bool defer_qs_iw_pending; /* Scheduler attention pending? */
Paul E. McKenneya657f262020-08-08 07:56:31 -0700169 struct work_struct strict_work; /* Schedule readers for strict GPs. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700170
171 /* 2) batch handling */
Paul E. McKenney15fecf82017-02-08 12:36:42 -0800172 struct rcu_segcblist cblist; /* Segmented callback list, with */
173 /* different callbacks waiting for */
174 /* different grace periods. */
Paul E. McKenney37c72e52009-10-14 10:15:55 -0700175 long qlen_last_fqs_check;
176 /* qlen at last check for QS forcing */
Paul E. McKenneye816d562020-05-01 16:49:48 -0700177 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
Paul E. McKenney37c72e52009-10-14 10:15:55 -0700178 unsigned long n_force_qs_snap;
179 /* did other CPU force QS recently? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700180 long blimit; /* Upper limit on a processed batch */
181
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700182 /* 3) dynticks interface. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700183 int dynticks_snap; /* Per-GP tracking for dynticks. */
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700184 long dynticks_nesting; /* Track process nesting level. */
185 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
186 atomic_t dynticks; /* Even value for idle, else odd. */
187 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
188 bool rcu_urgent_qs; /* GP old need light quiescent state. */
Paul E. McKenney66e4c332019-08-12 16:14:00 -0700189 bool rcu_forced_tick; /* Forced tick to provide QS. */
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800190 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700191
Paul E. McKenney8d8a9d02018-08-04 20:32:07 -0700192 /* 4) rcu_barrier(), OOM callbacks, and expediting. */
Paul E. McKenney06668ef2012-05-28 23:57:46 -0700193 struct rcu_head barrier_head;
Paul E. McKenney0742ac32016-10-11 06:09:59 -0700194 int exp_dynticks_snap; /* Double-check need for IPI. */
Paul E. McKenney06668ef2012-05-28 23:57:46 -0700195
Paul E. McKenney8d8a9d02018-08-04 20:32:07 -0700196 /* 5) Callback offloading. */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700197#ifdef CONFIG_RCU_NOCB_CPU
Paul E. McKenney12f54c3a2019-03-29 16:43:51 -0700198 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
Frederic Weisbeckerd97b0782020-11-13 13:13:19 +0100199 struct swait_queue_head nocb_state_wq; /* For offloading state changes */
Paul E. McKenney12f54c3a2019-03-29 16:43:51 -0700200 struct task_struct *nocb_gp_kthread;
Paul E. McKenney8be6e1b2017-04-29 20:03:20 -0700201 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
Paul E. McKenney81c0b3d2019-05-28 07:18:08 -0700202 atomic_t nocb_lock_contended; /* Contention experienced. */
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700203 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
Paul E. McKenney8be6e1b2017-04-29 20:03:20 -0700204 struct timer_list nocb_timer; /* Enforce finite deferral. */
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -0700205 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
206
207 /* The following fields are used by call_rcu, hence own cacheline. */
208 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
209 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
210 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
211 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
212 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
Paul E. McKenneyfbce7492014-06-24 09:26:11 -0700213
Paul E. McKenney6484fe52019-03-28 15:44:18 -0700214 /* The following fields are used by GP kthread, hence own cacheline. */
Paul E. McKenney4fd8c5f2019-06-02 13:41:08 -0700215 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
Paul E. McKenneyf7a81b12019-06-25 13:32:51 -0700216 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
217 u8 nocb_gp_bypass; /* Found a bypass on last scan? */
218 u8 nocb_gp_gp; /* GP to wait for on last scan? */
219 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
220 unsigned long nocb_gp_loops; /* # passes through wait code. */
Paul E. McKenney12f54c3a2019-03-29 16:43:51 -0700221 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
Paul E. McKenney5d6742b2019-05-15 09:56:40 -0700222 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
Paul E. McKenney12f54c3a2019-03-29 16:43:51 -0700223 struct task_struct *nocb_cb_kthread;
Frederic Weisbecker2ebc45c2021-11-23 01:37:03 +0100224 struct list_head nocb_head_rdp; /*
225 * Head of rcu_data list in wakeup chain,
226 * if rdp_gp.
227 */
228 struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
Paul E. McKenneyfbce7492014-06-24 09:26:11 -0700229
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -0700230 /* The following fields are used by CB kthread, hence new cacheline. */
Paul E. McKenney58bf6f72019-03-28 15:33:59 -0700231 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
Paul E. McKenney6484fe52019-03-28 15:44:18 -0700232 /* GP rdp takes GP-end wakeups. */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700233#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
234
Paul E. McKenney37f62d72018-11-30 16:11:14 -0800235 /* 6) RCU priority boosting. */
236 struct task_struct *rcu_cpu_kthread_task;
237 /* rcuc per-CPU kthread or NULL. */
Paul E. McKenney6ffdde22018-11-30 16:43:05 -0800238 unsigned int rcu_cpu_kthread_status;
Paul E. McKenneyf7e972e2018-11-30 18:21:32 -0800239 char rcu_cpu_has_work;
Paul E. McKenney37f62d72018-11-30 16:11:14 -0800240
241 /* 7) Diagnostic data, including RCU CPU stall warnings. */
Paul E. McKenney62310692013-03-06 13:37:09 -0800242 unsigned int softirq_snap; /* Snapshot of softirq activity. */
Paul E. McKenney9b9500d2017-08-17 17:05:59 -0700243 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
244 struct irq_work rcu_iw; /* Check for non-irq activity. */
245 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
Paul E. McKenney8aa670c2018-04-28 14:15:40 -0700246 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
Paul E. McKenney57738942018-05-08 14:18:57 -0700247 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
248 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
249 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
250 short rcu_onl_gp_flags; /* ->gp_flags at last online. */
Paul E. McKenneyd3052102018-07-25 11:49:47 -0700251 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
Paul E. McKenney62310692013-03-06 13:37:09 -0800252
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700253 int cpu;
254};
255
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700256/* Values for nocb_defer_wakeup field in struct rcu_data. */
Paul E. McKenney511324e2017-04-28 17:04:09 -0700257#define RCU_NOCB_WAKE_NOT 0
Frederic Weisbeckere75bcd42021-02-23 01:10:11 +0100258#define RCU_NOCB_WAKE_BYPASS 1
259#define RCU_NOCB_WAKE 2
260#define RCU_NOCB_WAKE_FORCE 3
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700261
Paul E. McKenney026ad282013-04-03 22:14:11 -0700262#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
263 /* For jiffies_till_first_fqs and */
264 /* and jiffies_till_next_fqs. */
Paul E. McKenney007b0922010-03-05 15:03:26 -0800265
Paul E. McKenney026ad282013-04-03 22:14:11 -0700266#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
267 /* delay between bouts of */
268 /* quiescent-state forcing. */
269
270#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
271 /* at least one scheduling clock */
272 /* irq before ratting on them. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700273
Peter Zijlstra08bca602011-05-20 16:06:29 -0700274#define rcu_wait(cond) \
275do { \
276 for (;;) { \
277 set_current_state(TASK_INTERRUPTIBLE); \
278 if (cond) \
279 break; \
280 schedule(); \
281 } \
282 __set_current_state(TASK_RUNNING); \
283} while (0)
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700284
285/*
286 * RCU global state, including node hierarchy. This hierarchy is
287 * represented in "heap" form in a dense array. The root (first level)
288 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
289 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
290 * and the third level in ->node[m+1] and following (->node[m+1] referenced
291 * by ->level[2]). The number of levels is determined by the number of
292 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
293 * consisting of a single rcu_node.
294 */
295struct rcu_state {
296 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
Alexander Gordeev032dfc82015-07-09 15:34:23 +0200297 struct rcu_node *level[RCU_NUM_LVLS + 1];
298 /* Hierarchy levels (+1 to */
299 /* shut bogus gcc warning) */
Paul E. McKenneyb9585e92015-07-31 16:04:45 -0700300 int ncpus; /* # CPUs seen so far. */
Neeraj Upadhyayed738602020-09-23 12:59:33 +0530301 int n_online_cpus; /* # CPUs online for RCU. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700302
303 /* The following fields are guarded by the root rcu_node's lock. */
304
Petr Mladek77f81fe2015-09-09 12:09:49 -0700305 u8 boost ____cacheline_internodealigned_in_smp;
306 /* Subject to priority boost. */
Paul E. McKenneyde30ad52018-04-26 11:52:09 -0700307 unsigned long gp_seq; /* Grace-period sequence #. */
Wei Yang00943a62020-06-12 10:07:52 +0800308 unsigned long gp_max; /* Maximum GP duration in */
309 /* jiffies. */
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -0700310 struct task_struct *gp_kthread; /* Task for grace periods. */
Paul Gortmakerabedf8e2016-02-19 09:46:41 +0100311 struct swait_queue_head gp_wq; /* Where GP task waits. */
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700312 short gp_flags; /* Commands for GP task. */
313 short gp_state; /* GP kthread sleep state. */
Paul E. McKenneyfd897572018-12-10 16:09:49 -0800314 unsigned long gp_wake_time; /* Last GP kthread wake. */
315 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700316
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800317 /* End of fields guarded by root rcu_node's lock. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700318
Paul E. McKenney7be7f0b2012-05-29 05:18:53 -0700319 struct mutex barrier_mutex; /* Guards barrier fields. */
Paul E. McKenney24ebbca2012-05-29 00:34:56 -0700320 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
Paul E. McKenney7db74df2012-05-29 03:03:37 -0700321 struct completion barrier_completion; /* Wake at barrier end. */
Paul E. McKenney4f525a52015-06-26 11:20:00 -0700322 unsigned long barrier_sequence; /* ++ at start and end of */
Paul E. McKenneydd46a782018-07-10 18:37:30 -0700323 /* rcu_barrier(). */
Paul E. McKenneya4fbe352012-10-07 08:36:12 -0700324 /* End of fields guarded by barrier_mutex. */
325
Paul E. McKenneyf6a12f32016-01-30 17:57:35 -0800326 struct mutex exp_mutex; /* Serialize expedited GP. */
Paul E. McKenney3b5f6682016-03-16 16:47:55 -0700327 struct mutex exp_wake_mutex; /* Serialize wakeup. */
Paul E. McKenneyd6ada2c2015-06-24 10:46:30 -0700328 unsigned long expedited_sequence; /* Take a ticket. */
Peter Zijlstra3a6d7c62015-06-25 11:27:10 -0700329 atomic_t expedited_need_qs; /* # CPUs left to check in. */
Paul Gortmakerabedf8e2016-02-19 09:46:41 +0100330 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
Paul E. McKenneyb9585e92015-07-31 16:04:45 -0700331 int ncpus_snap; /* # CPUs seen last time. */
Paul E. McKenneyb2b00dd2019-10-30 11:56:10 -0700332 u8 cbovld; /* Callback overload now? */
333 u8 cbovldnext; /* ^ ^ next time? */
Paul E. McKenney40694d62012-10-11 15:24:03 -0700334
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700335 unsigned long jiffies_force_qs; /* Time at which to invoke */
336 /* force_quiescent_state(). */
Paul E. McKenney8c7c4822016-01-03 20:29:57 -0800337 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
338 /* kthreads, if configured. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700339 unsigned long n_force_qs; /* Number of calls to */
340 /* force_quiescent_state(). */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700341 unsigned long gp_start; /* Time at which GP started, */
342 /* but in jiffies. */
Paul E. McKenneyc51d7b52018-10-03 17:25:33 -0700343 unsigned long gp_end; /* Time last GP ended, again */
344 /* in jiffies. */
Paul E. McKenney6ccd2ec2014-12-11 10:20:59 -0800345 unsigned long gp_activity; /* Time of last GP kthread */
346 /* activity in jiffies. */
Paul E. McKenney26d950a2018-04-21 20:44:11 -0700347 unsigned long gp_req_activity; /* Time of last GP request */
348 /* in jiffies. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700349 unsigned long jiffies_stall; /* Time at which to check */
350 /* for CPU stalls. */
Paul E. McKenney6193c762013-09-23 13:57:18 -0700351 unsigned long jiffies_resched; /* Time at which to resched */
352 /* a reluctant CPU. */
Paul E. McKenneyfc908ed2014-12-08 09:57:48 -0800353 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
354 /* GP start. */
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400355 const char *name; /* Name of structure. */
Paul E. McKenneya4889852012-12-03 08:16:28 -0800356 char abbr; /* Abbreviated name. */
Paul E. McKenney1e64b152018-05-25 19:23:09 -0700357
Mike Galbraith894d45b2018-08-15 09:05:29 -0700358 raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
Paul E. McKenney1e64b152018-05-25 19:23:09 -0700359 /* Synchronize offline with */
360 /* GP pre-initialization. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700361};
362
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -0700363/* Values for rcu_state structure's gp_flags field. */
364#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
365#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
Paul E. McKenney1fca4d12020-02-22 20:07:09 -0800366#define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */
Paul E. McKenney4cdfc1752012-06-22 17:06:26 -0700367
Paul E. McKenneyc34d2f42015-09-10 11:21:28 -0700368/* Values for rcu_state structure's gp_state field. */
Petr Mladek77f81fe2015-09-09 12:09:49 -0700369#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700370#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
Paul E. McKenney319362c2015-05-19 14:16:52 -0700371#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
Paul E. McKenneyfea3f222018-05-15 15:47:30 -0700372#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
373#define RCU_GP_INIT 4 /* Grace-period initialization. */
374#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
375#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
376#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
377#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700378
Paul E. McKenney358be2d2018-07-03 14:15:31 -0700379/*
380 * In order to export the rcu_state name to the tracing tools, it
381 * needs to be added in the __tracepoint_string section.
382 * This requires defining a separate variable tp_<sname>_varname
383 * that points to the string being used, and this will allow
384 * the tracing userspace tools to be able to decipher the string
385 * address to the matching string.
386 */
387#ifdef CONFIG_PREEMPT_RCU
388#define RCU_ABBR 'p'
389#define RCU_NAME_RAW "rcu_preempt"
390#else /* #ifdef CONFIG_PREEMPT_RCU */
391#define RCU_ABBR 's'
392#define RCU_NAME_RAW "rcu_sched"
393#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
394#ifndef CONFIG_TRACING
395#define RCU_NAME RCU_NAME_RAW
396#else /* #ifdef CONFIG_TRACING */
397static char rcu_name[] = RCU_NAME_RAW;
398static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
399#define RCU_NAME rcu_name
400#endif /* #else #ifdef CONFIG_TRACING */
Paul E. McKenney6b50e112015-11-17 14:39:26 -0800401
Paul E. McKenney32255d52019-01-11 16:57:41 -0800402/* Forward declarations for tree_plugin.h */
Paul E. McKenneydbe01352009-11-10 13:37:19 -0800403static void rcu_bootup_announce(void);
Paul E. McKenney45975c72018-07-02 14:30:37 -0700404static void rcu_qs(void);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800405static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800406#ifdef CONFIG_HOTPLUG_CPU
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -0700407static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800408#endif /* #ifdef CONFIG_HOTPLUG_CPU */
Paul E. McKenney74611ec2015-08-18 10:20:43 -0700409static int rcu_print_task_exp_stall(struct rcu_node *rnp);
Paul E. McKenney81ab59a2018-07-03 17:22:34 -0700410static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
Paul E. McKenneyc98cac62018-11-21 11:35:03 -0800411static void rcu_flavor_sched_clock_irq(int user);
Paul E. McKenney81ab59a2018-07-03 17:22:34 -0700412static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700413static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700414static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
Paul E. McKenneydff16722011-11-29 15:57:13 -0800415static bool rcu_is_callbacks_kthread(void);
Sebastian Andrzej Siewior48d07c02019-03-20 22:13:33 +0100416static void rcu_cpu_kthread_setup(unsigned int cpu);
Paul E. McKenney3ef5a1c2021-04-05 20:42:09 -0700417static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
Paul E. McKenney9386c0b2014-07-13 12:00:53 -0700418static void __init rcu_spawn_boost_kthreads(void);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800419static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
Paul E. McKenney3e310092018-06-21 12:50:01 -0700420static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
421static void rcu_preempt_deferred_qs(struct task_struct *t);
Paul E. McKenneya858af22012-01-16 13:29:10 -0800422static void zero_cpu_stall_ticks(struct rcu_data *rdp);
Paul Gortmakerabedf8e2016-02-19 09:46:41 +0100423static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
424static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800425static void rcu_init_one_nocb(struct rcu_node *rnp);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -0700426static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
427 unsigned long j);
428static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
429 bool *was_alldone, unsigned long flags);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -0700430static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
431 unsigned long flags);
Frederic Weisbecker87090512021-02-23 01:10:10 +0100432static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
Frederic Weisbeckerf8bb5ca2021-02-01 00:05:46 +0100433static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700434static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
Paul E. McKenneyad368d12018-11-27 13:55:53 -0800435static void rcu_spawn_cpu_nocb_kthread(int cpu);
Paul E. McKenney35ce7f22014-07-11 11:30:24 -0700436static void __init rcu_spawn_nocb_kthreads(void);
Paul E. McKenneyf7a81b12019-06-25 13:32:51 -0700437static void show_rcu_nocb_state(struct rcu_data *rdp);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -0700438static void rcu_nocb_lock(struct rcu_data *rdp);
439static void rcu_nocb_unlock(struct rcu_data *rdp);
440static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
441 unsigned long flags);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -0700442static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
Paul E. McKenney35ce7f22014-07-11 11:30:24 -0700443#ifdef CONFIG_RCU_NOCB_CPU
Paul E. McKenney4580b052018-07-03 17:22:34 -0700444static void __init rcu_organize_nocb_kthreads(void);
Frederic Weisbecker118e0d42021-10-11 16:51:30 +0200445
446/*
447 * Disable IRQs before checking offloaded state so that local
448 * locking is safe against concurrent de-offloading.
449 */
450#define rcu_nocb_lock_irqsave(rdp, flags) \
451do { \
452 local_irq_save(flags); \
453 if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
454 raw_spin_lock(&(rdp)->nocb_lock); \
Paul E. McKenney81c0b3d2019-05-28 07:18:08 -0700455} while (0)
456#else /* #ifdef CONFIG_RCU_NOCB_CPU */
457#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
458#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
459
Paul E. McKenneyeb757672013-06-21 17:10:40 -0700460static void rcu_bind_gp_kthread(void);
Paul E. McKenney4580b052018-07-03 17:22:34 -0700461static bool rcu_nohz_full_cpu(void);
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700462static void rcu_dynticks_task_enter(void);
463static void rcu_dynticks_task_exit(void);
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700464static void rcu_dynticks_task_trace_enter(void);
465static void rcu_dynticks_task_trace_exit(void);
Paul E. McKenney32255d52019-01-11 16:57:41 -0800466
467/* Forward declarations for tree_stall.h */
Paul E. McKenney32255d52019-01-11 16:57:41 -0800468static void record_gp_stall_check_time(void);
Paul E. McKenney7ac19072019-01-14 10:19:20 -0800469static void rcu_iw_handler(struct irq_work *iwp);
Paul E. McKenney32255d52019-01-11 16:57:41 -0800470static void check_cpu_stall(struct rcu_data *rdp);
Paul E. McKenneyb51bcbb2019-01-15 07:01:33 -0800471static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
472 const unsigned long gpssdelay);