blob: ed84d59b6dbfa9a4f49519f3ec42f4a2a18c6645 [file] [log] [blame]
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -08001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07008#ifdef CONFIG_TASKS_RCU_GENERIC
Paul E. McKenney5873b8a2020-03-03 11:49:21 -08009
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080016typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
Paul E. McKenney9796e1a2020-03-22 13:18:54 -070018typedef void (*postscan_func_t)(struct list_head *hop);
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080019typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070020typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -080021
Paul E. McKenney07e10512020-03-02 15:16:57 -080022/**
Paul E. McKenneycafafd62021-11-05 21:52:00 -070023 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
Paul E. McKenney07e10512020-03-02 15:16:57 -080024 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
Paul E. McKenneycafafd62021-11-05 21:52:00 -070026 * @cbs_pcpu_lock: Lock protecting per-CPU callback list.
27 */
28struct rcu_tasks_percpu {
29 struct rcu_head *cbs_head;
30 struct rcu_head **cbs_tail;
31 raw_spinlock_t cbs_pcpu_lock;
32};
33
34/**
35 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
Ingo Molnara616aec2021-03-22 22:29:10 -070036 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
Paul E. McKenneycafafd62021-11-05 21:52:00 -070037 * @cbs_gbl_lock: Lock protecting callback list.
Paul E. McKenney07e10512020-03-02 15:16:57 -080038 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080039 * @gp_func: This flavor's grace-period-wait function.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070040 * @gp_state: Grace period's most recent state transition (debugging).
Paul E. McKenney4fe192d2020-09-09 22:05:41 -070041 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
Paul E. McKenney2393a612020-09-09 21:36:34 -070042 * @init_fract: Initial backoff sleep interval.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070043 * @gp_jiffies: Time of last @gp_state transition.
44 * @gp_start: Most recent grace-period start in jiffies.
Paul E. McKenneyb14fb4f2021-11-08 14:14:43 -080045 * @tasks_gp_seq: Number of grace periods completed since boot.
Paul E. McKenney238dbce2020-03-18 10:54:05 -070046 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
Paul E. McKenney7e0669c2020-03-25 14:36:05 -070047 * @n_ipis_fails: Number of IPI-send failures.
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080048 * @pregp_func: This flavor's pre-grace-period function (optional).
49 * @pertask_func: This flavor's per-task scan function (optional).
50 * @postscan_func: This flavor's post-task scan function (optional).
Lukas Bulwahn85b86992021-01-25 08:41:05 +010051 * @holdouts_func: This flavor's holdout-list scan function (optional).
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080052 * @postgp_func: This flavor's post-grace-period function (optional).
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080053 * @call_func: This flavor's call_rcu()-equivalent function.
Paul E. McKenneycafafd62021-11-05 21:52:00 -070054 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
Paul E. McKenney7a308712021-11-08 10:51:13 -080055 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080056 * @name: This flavor's textual name.
57 * @kname: This flavor's kthread name.
Paul E. McKenney07e10512020-03-02 15:16:57 -080058 */
59struct rcu_tasks {
Paul E. McKenney07e10512020-03-02 15:16:57 -080060 struct wait_queue_head cbs_wq;
Paul E. McKenneycafafd62021-11-05 21:52:00 -070061 raw_spinlock_t cbs_gbl_lock;
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070062 int gp_state;
Paul E. McKenney4fe192d2020-09-09 22:05:41 -070063 int gp_sleep;
Paul E. McKenney2393a612020-09-09 21:36:34 -070064 int init_fract;
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -070065 unsigned long gp_jiffies;
Paul E. McKenney88092d02020-03-17 08:57:02 -070066 unsigned long gp_start;
Paul E. McKenneyb14fb4f2021-11-08 14:14:43 -080067 unsigned long tasks_gp_seq;
Paul E. McKenney238dbce2020-03-18 10:54:05 -070068 unsigned long n_ipis;
Paul E. McKenney7e0669c2020-03-25 14:36:05 -070069 unsigned long n_ipis_fails;
Paul E. McKenney07e10512020-03-02 15:16:57 -080070 struct task_struct *kthread_ptr;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080071 rcu_tasks_gp_func_t gp_func;
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -080072 pregp_func_t pregp_func;
73 pertask_func_t pertask_func;
74 postscan_func_t postscan_func;
75 holdouts_func_t holdouts_func;
76 postgp_func_t postgp_func;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -080077 call_rcu_func_t call_func;
Paul E. McKenneycafafd62021-11-05 21:52:00 -070078 struct rcu_tasks_percpu __percpu *rtpcpu;
Paul E. McKenney7a308712021-11-08 10:51:13 -080079 int percpu_enqueue_shift;
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -080080 char *name;
81 char *kname;
Paul E. McKenney07e10512020-03-02 15:16:57 -080082};
83
Paul E. McKenneycafafd62021-11-05 21:52:00 -070084#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
85static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
86 .cbs_pcpu_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
87}; \
88static struct rcu_tasks rt_name = \
89{ \
90 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
91 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
92 .gp_func = gp, \
93 .call_func = call, \
94 .rtpcpu = &rt_name ## __percpu, \
95 .name = n, \
Paul E. McKenney7a308712021-11-08 10:51:13 -080096 .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
Paul E. McKenneycafafd62021-11-05 21:52:00 -070097 .kname = #rt_name, \
Paul E. McKenney07e10512020-03-02 15:16:57 -080098}
99
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800100/* Track exiting tasks in order to allow them to be waited for. */
101DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
102
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -0700103/* Avoid IPIing CPUs early in the grace period. */
Paul E. McKenney574de872020-09-09 21:51:09 -0700104#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -0700105static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
106module_param(rcu_task_ipi_delay, int, 0644);
107
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800108/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
109#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
110static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
111module_param(rcu_task_stall_timeout, int, 0644);
112
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700113/* RCU tasks grace-period state for debugging. */
114#define RTGS_INIT 0
115#define RTGS_WAIT_WAIT_CBS 1
116#define RTGS_WAIT_GP 2
117#define RTGS_PRE_WAIT_GP 3
118#define RTGS_SCAN_TASKLIST 4
119#define RTGS_POST_SCAN_TASKLIST 5
120#define RTGS_WAIT_SCAN_HOLDOUTS 6
121#define RTGS_SCAN_HOLDOUTS 7
122#define RTGS_POST_GP 8
123#define RTGS_WAIT_READERS 9
124#define RTGS_INVOKE_CBS 10
125#define RTGS_WAIT_CBS 11
Paul E. McKenney83444962020-05-28 20:03:48 -0700126#ifndef CONFIG_TINY_RCU
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700127static const char * const rcu_tasks_gp_state_names[] = {
128 "RTGS_INIT",
129 "RTGS_WAIT_WAIT_CBS",
130 "RTGS_WAIT_GP",
131 "RTGS_PRE_WAIT_GP",
132 "RTGS_SCAN_TASKLIST",
133 "RTGS_POST_SCAN_TASKLIST",
134 "RTGS_WAIT_SCAN_HOLDOUTS",
135 "RTGS_SCAN_HOLDOUTS",
136 "RTGS_POST_GP",
137 "RTGS_WAIT_READERS",
138 "RTGS_INVOKE_CBS",
139 "RTGS_WAIT_CBS",
140};
Paul E. McKenney83444962020-05-28 20:03:48 -0700141#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700142
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800143////////////////////////////////////////////////////////////////////////
144//
145// Generic code.
146
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700147/* Record grace-period phase and time. */
148static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
149{
150 rtp->gp_state = newstate;
151 rtp->gp_jiffies = jiffies;
152}
153
Paul E. McKenney83444962020-05-28 20:03:48 -0700154#ifndef CONFIG_TINY_RCU
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700155/* Return state name. */
156static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
157{
158 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
159 int j = READ_ONCE(i); // Prevent the compiler from reading twice
160
161 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
162 return "???";
163 return rcu_tasks_gp_state_names[j];
164}
Paul E. McKenney83444962020-05-28 20:03:48 -0700165#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700166
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700167// Initialize per-CPU callback lists for the specified flavor of
168// Tasks RCU.
169static void cblist_init_generic(struct rcu_tasks *rtp)
170{
171 int cpu;
172 unsigned long flags;
173
174 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
Paul E. McKenney7a308712021-11-08 10:51:13 -0800175 rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids);
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700176 for_each_possible_cpu(cpu) {
177 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
178
179 WARN_ON_ONCE(!rtpcp);
180 if (cpu)
181 raw_spin_lock_init(&rtpcp->cbs_pcpu_lock);
182 raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled.
183 if (!WARN_ON_ONCE(rtpcp->cbs_tail))
184 rtpcp->cbs_tail = &rtpcp->cbs_head;
185 raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled.
186 }
187 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
188
189}
190
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800191// Enqueue a callback for the specified flavor of Tasks RCU.
192static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
193 struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800194{
195 unsigned long flags;
196 bool needwake;
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700197 struct rcu_tasks_percpu *rtpcp;
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800198
199 rhp->next = NULL;
200 rhp->func = func;
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700201 local_irq_save(flags);
Paul E. McKenney7a308712021-11-08 10:51:13 -0800202 rtpcp = per_cpu_ptr(rtp->rtpcpu,
203 smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift));
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700204 raw_spin_lock(&rtpcp->cbs_pcpu_lock);
205 if (!rtpcp->cbs_tail) {
206 raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled.
207 cblist_init_generic(rtp);
208 raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled.
209 }
210 needwake = !rtpcp->cbs_head;
211 WRITE_ONCE(*rtpcp->cbs_tail, rhp);
212 rtpcp->cbs_tail = &rhp->next;
213 raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800214 /* We can't create the thread unless interrupts are enabled. */
Paul E. McKenney07e10512020-03-02 15:16:57 -0800215 if (needwake && READ_ONCE(rtp->kthread_ptr))
216 wake_up(&rtp->cbs_wq);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800217}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800218
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800219// Wait for a grace period for the specified flavor of Tasks RCU.
220static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800221{
222 /* Complain if the scheduler has not started. */
223 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
224 "synchronize_rcu_tasks called too soon");
225
226 /* Wait for the grace period. */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800227 wait_rcu_gp(rtp->call_func);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800228}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800229
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800230/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
231static int __noreturn rcu_tasks_kthread(void *arg)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800232{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800233 unsigned long flags;
234 struct rcu_head *list;
235 struct rcu_head *next;
236 struct rcu_tasks *rtp = arg;
237
238 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
239 housekeeping_affine(current, HK_FLAG_RCU);
240 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
241
242 /*
243 * Each pass through the following loop makes one check for
244 * newly arrived callbacks, and, if there are some, waits for
245 * one RCU-tasks grace period and then invokes the callbacks.
246 * This loop is terminated by the system going down. ;-)
247 */
248 for (;;) {
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700249 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
250
Paul E. McKenney0db7c322021-08-11 09:07:44 -0700251 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800252
253 /* Pick up any new callbacks. */
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700254 raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags);
Paul E. McKenney43766c32020-03-16 20:38:29 -0700255 smp_mb__after_spinlock(); // Order updates vs. GP.
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700256 list = rtpcp->cbs_head;
257 rtpcp->cbs_head = NULL;
258 rtpcp->cbs_tail = &rtpcp->cbs_head;
259 raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800260
261 /* If there were none, wait a bit and start over. */
262 if (!list) {
263 wait_event_interruptible(rtp->cbs_wq,
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700264 READ_ONCE(rtpcp->cbs_head));
265 if (!rtpcp->cbs_head) {
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800266 WARN_ON(signal_pending(current));
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700267 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
Paul E. McKenneyea6eed92020-05-07 16:47:13 -0700268 schedule_timeout_idle(HZ/10);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800269 }
270 continue;
271 }
272
273 // Wait for one grace period.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700274 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
Paul E. McKenney88092d02020-03-17 08:57:02 -0700275 rtp->gp_start = jiffies;
Paul E. McKenneyb14fb4f2021-11-08 14:14:43 -0800276 rcu_seq_start(&rtp->tasks_gp_seq);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800277 rtp->gp_func(rtp);
Paul E. McKenneyb14fb4f2021-11-08 14:14:43 -0800278 rcu_seq_end(&rtp->tasks_gp_seq);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800279
280 /* Invoke the callbacks. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700281 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800282 while (list) {
283 next = list->next;
284 local_bh_disable();
285 list->func(list);
286 local_bh_enable();
287 list = next;
288 cond_resched();
289 }
290 /* Paranoid sleep to keep this from entering a tight loop */
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700291 schedule_timeout_idle(rtp->gp_sleep);
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800292 }
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800293}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800294
Uladzislau Rezki (Sony)1b04fa92020-12-09 21:27:31 +0100295/* Spawn RCU-tasks grace-period kthread. */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800296static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
297{
298 struct task_struct *t;
299
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800300 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
301 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800302 return;
303 smp_mb(); /* Ensure others see full kthread. */
304}
305
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800306#ifndef CONFIG_TINY_RCU
307
308/*
309 * Print any non-default Tasks RCU settings.
310 */
311static void __init rcu_tasks_bootup_oddness(void)
312{
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700313#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800314 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
315 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700316#endif /* #ifdef CONFIG_TASKS_RCU */
317#ifdef CONFIG_TASKS_RCU
318 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800319#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800320#ifdef CONFIG_TASKS_RUDE_RCU
321 pr_info("\tRude variant of Tasks RCU enabled.\n");
322#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700323#ifdef CONFIG_TASKS_TRACE_RCU
324 pr_info("\tTracing variant of Tasks RCU enabled.\n");
325#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800326}
327
328#endif /* #ifndef CONFIG_TINY_RCU */
329
Paul E. McKenney83444962020-05-28 20:03:48 -0700330#ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700331/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
332static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
333{
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700334 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700335 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700336 rtp->kname,
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700337 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700338 jiffies - data_race(rtp->gp_jiffies),
Paul E. McKenneyb14fb4f2021-11-08 14:14:43 -0800339 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
Paul E. McKenney7e0669c2020-03-25 14:36:05 -0700340 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700341 ".k"[!!data_race(rtp->kthread_ptr)],
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700342 ".C"[!!data_race(rtpcp->cbs_head)],
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700343 s);
344}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700345#endif // #ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700346
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700347static void exit_tasks_rcu_finish_trace(struct task_struct *t);
348
349#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800350
351////////////////////////////////////////////////////////////////////////
352//
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800353// Shared code between task-list-scanning variants of Tasks RCU.
354
355/* Wait for one RCU-tasks grace period. */
356static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
357{
358 struct task_struct *g, *t;
359 unsigned long lastreport;
360 LIST_HEAD(holdouts);
361 int fract;
362
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700363 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800364 rtp->pregp_func();
365
366 /*
367 * There were callbacks, so we need to wait for an RCU-tasks
368 * grace period. Start off by scanning the task list for tasks
369 * that are not already voluntarily blocked. Mark these tasks
370 * and make a list of them in holdouts.
371 */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700372 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800373 rcu_read_lock();
374 for_each_process_thread(g, t)
375 rtp->pertask_func(t, &holdouts);
376 rcu_read_unlock();
377
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700378 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
Paul E. McKenney9796e1a2020-03-22 13:18:54 -0700379 rtp->postscan_func(&holdouts);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800380
381 /*
382 * Each pass through the following loop scans the list of holdout
383 * tasks, removing any that are no longer holdouts. When the list
384 * is empty, we are done.
385 */
386 lastreport = jiffies;
387
Paul E. McKenney2393a612020-09-09 21:36:34 -0700388 // Start off with initial wait and slowly back off to 1 HZ wait.
389 fract = rtp->init_fract;
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800390
Paul E. McKenney77dc1742020-09-15 15:41:50 -0700391 while (!list_empty(&holdouts)) {
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800392 bool firstreport;
393 bool needreport;
394 int rtst;
395
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800396 /* Slowly back off waiting for holdouts */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700397 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700398 schedule_timeout_idle(fract);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800399
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700400 if (fract < HZ)
401 fract++;
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800402
403 rtst = READ_ONCE(rcu_task_stall_timeout);
404 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
405 if (needreport)
406 lastreport = jiffies;
407 firstreport = true;
408 WARN_ON(signal_pending(current));
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700409 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800410 rtp->holdouts_func(&holdouts, needreport, &firstreport);
411 }
412
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700413 set_tasks_gp_state(rtp, RTGS_POST_GP);
414 rtp->postgp_func(rtp);
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800415}
416
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700417#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
418
419#ifdef CONFIG_TASKS_RCU
420
Paul E. McKenneyd01aa262020-03-05 17:07:07 -0800421////////////////////////////////////////////////////////////////////////
422//
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800423// Simple variant of RCU whose quiescent states are voluntary context
Paul E. McKenney8af9e2c2021-09-15 09:24:18 -0700424// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800425// As such, grace periods can take one good long time. There are no
426// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
427// because this implementation is intended to get the system into a safe
428// state for some of the manipulations involved in tracing and the like.
429// Finally, this implementation does not support high call_rcu_tasks()
430// rates from multiple CPUs. If this is required, per-CPU callback lists
431// will be needed.
Paul E. McKenney06a3ec92021-03-04 14:41:47 -0800432//
433// The implementation uses rcu_tasks_wait_gp(), which relies on function
434// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
435// function sets these function pointers up so that rcu_tasks_wait_gp()
436// invokes these functions in this order:
437//
438// rcu_tasks_pregp_step():
439// Invokes synchronize_rcu() in order to wait for all in-flight
440// t->on_rq and t->nvcsw transitions to complete. This works because
441// all such transitions are carried out with interrupts disabled.
442// rcu_tasks_pertask(), invoked on every non-idle task:
443// For every runnable non-idle task other than the current one, use
444// get_task_struct() to pin down that task, snapshot that task's
445// number of voluntary context switches, and add that task to the
446// holdout list.
447// rcu_tasks_postscan():
448// Invoke synchronize_srcu() to ensure that all tasks that were
449// in the process of exiting (and which thus might not know to
450// synchronize with this RCU Tasks grace period) have completed
451// exiting.
452// check_all_holdout_tasks(), repeatedly until holdout list is empty:
453// Scans the holdout list, attempting to identify a quiescent state
454// for each task on the list. If there is a quiescent state, the
455// corresponding task is removed from the holdout list.
456// rcu_tasks_postgp():
457// Invokes synchronize_rcu() in order to ensure that all prior
458// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
459// to have happened before the end of this RCU Tasks grace period.
460// Again, this works because all such transitions are carried out
461// with interrupts disabled.
462//
463// For each exiting task, the exit_tasks_rcu_start() and
464// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
465// read-side critical sections waited for by rcu_tasks_postscan().
466//
467// Pre-grace-period update-side code is ordered before the grace via the
468// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
469// code is ordered before the grace period via synchronize_rcu() call
470// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
471// disabling.
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800472
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800473/* Pre-grace-period preparation. */
474static void rcu_tasks_pregp_step(void)
475{
476 /*
477 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
478 * to complete. Invoking synchronize_rcu() suffices because all
479 * these transitions occur with interrupts disabled. Without this
480 * synchronize_rcu(), a read-side critical section that started
481 * before the grace period might be incorrectly seen as having
482 * started after the grace period.
483 *
484 * This synchronize_rcu() also dispenses with the need for a
485 * memory barrier on the first store to t->rcu_tasks_holdout,
486 * as it forces the store to happen after the beginning of the
487 * grace period.
488 */
489 synchronize_rcu();
490}
491
492/* Per-task initial processing. */
493static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
494{
495 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
496 get_task_struct(t);
497 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
498 WRITE_ONCE(t->rcu_tasks_holdout, true);
499 list_add(&t->rcu_tasks_holdout_list, hop);
500 }
501}
502
503/* Processing between scanning taskslist and draining the holdout list. */
Paul E. McKenney04a3c5a2020-05-28 19:27:06 -0700504static void rcu_tasks_postscan(struct list_head *hop)
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800505{
506 /*
507 * Wait for tasks that are in the process of exiting. This
508 * does only part of the job, ensuring that all tasks that were
509 * previously exiting reach the point where they have disabled
510 * preemption, allowing the later synchronize_rcu() to finish
511 * the job.
512 */
513 synchronize_srcu(&tasks_rcu_exit_srcu);
514}
515
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800516/* See if tasks are still holding out, complain if so. */
517static void check_holdout_task(struct task_struct *t,
518 bool needreport, bool *firstreport)
519{
520 int cpu;
521
522 if (!READ_ONCE(t->rcu_tasks_holdout) ||
523 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
524 !READ_ONCE(t->on_rq) ||
525 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
526 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
527 WRITE_ONCE(t->rcu_tasks_holdout, false);
528 list_del_init(&t->rcu_tasks_holdout_list);
529 put_task_struct(t);
530 return;
531 }
532 rcu_request_urgent_qs_task(t);
533 if (!needreport)
534 return;
535 if (*firstreport) {
536 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
537 *firstreport = false;
538 }
539 cpu = task_cpu(t);
540 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
541 t, ".I"[is_idle_task(t)],
542 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
543 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
544 t->rcu_tasks_idle_cpu, cpu);
545 sched_show_task(t);
546}
547
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800548/* Scan the holdout lists for tasks no longer holding out. */
549static void check_all_holdout_tasks(struct list_head *hop,
550 bool needreport, bool *firstreport)
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800551{
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800552 struct task_struct *t, *t1;
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800553
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800554 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
555 check_holdout_task(t, needreport, firstreport);
556 cond_resched();
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800557 }
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800558}
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800559
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800560/* Finish off the Tasks-RCU grace period. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -0700561static void rcu_tasks_postgp(struct rcu_tasks *rtp)
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800562{
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800563 /*
564 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
565 * memory barriers prior to them in the schedule() path, memory
566 * reordering on other CPUs could cause their RCU-tasks read-side
567 * critical sections to extend past the end of the grace period.
568 * However, because these ->nvcsw updates are carried out with
569 * interrupts disabled, we can use synchronize_rcu() to force the
570 * needed ordering on all such CPUs.
571 *
572 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
573 * accesses to be within the grace period, avoiding the need for
574 * memory barriers for ->rcu_tasks_holdout accesses.
575 *
576 * In addition, this synchronize_rcu() waits for exiting tasks
577 * to complete their final preempt_disable() region of execution,
578 * cleaning up after the synchronize_srcu() above.
579 */
580 synchronize_rcu();
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800581}
582
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800583void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800584DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800585
586/**
587 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
588 * @rhp: structure to be used for queueing the RCU updates.
589 * @func: actual callback function to be invoked after the grace period
590 *
591 * The callback function will be invoked some time after a full grace
592 * period elapses, in other words after all currently executing RCU
593 * read-side critical sections have completed. call_rcu_tasks() assumes
594 * that the read-side critical sections end at a voluntary context
Paul E. McKenney8af9e2c2021-09-15 09:24:18 -0700595 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800596 * or transition to usermode execution. As such, there are no read-side
597 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
598 * this primitive is intended to determine that all tasks have passed
Ingo Molnara616aec2021-03-22 22:29:10 -0700599 * through a safe state, not so much for data-structure synchronization.
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800600 *
601 * See the description of call_rcu() for more detailed information on
602 * memory ordering guarantees.
603 */
604void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
605{
606 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
607}
608EXPORT_SYMBOL_GPL(call_rcu_tasks);
609
610/**
611 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
612 *
613 * Control will return to the caller some time after a full rcu-tasks
614 * grace period has elapsed, in other words after all currently
615 * executing rcu-tasks read-side critical sections have elapsed. These
616 * read-side critical sections are delimited by calls to schedule(),
617 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
618 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
619 *
620 * This is a very specialized primitive, intended only for a few uses in
621 * tracing and other situations requiring manipulation of function
622 * preambles and profiling hooks. The synchronize_rcu_tasks() function
623 * is not (yet) intended for heavy use from multiple CPUs.
624 *
625 * See the description of synchronize_rcu() for more detailed information
626 * on memory ordering guarantees.
627 */
628void synchronize_rcu_tasks(void)
629{
630 synchronize_rcu_tasks_generic(&rcu_tasks);
631}
632EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
633
634/**
635 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
636 *
637 * Although the current implementation is guaranteed to wait, it is not
638 * obligated to, for example, if there are no pending callbacks.
639 */
640void rcu_barrier_tasks(void)
641{
642 /* There is only one callback queue, so this is easy. ;-) */
643 synchronize_rcu_tasks();
644}
645EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
646
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800647static int __init rcu_spawn_tasks_kthread(void)
648{
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700649 cblist_init_generic(&rcu_tasks);
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700650 rcu_tasks.gp_sleep = HZ / 10;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -0700651 rcu_tasks.init_fract = HZ / 10;
Paul E. McKenneye4fe5dd2020-03-04 17:31:43 -0800652 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
653 rcu_tasks.pertask_func = rcu_tasks_pertask;
654 rcu_tasks.postscan_func = rcu_tasks_postscan;
655 rcu_tasks.holdouts_func = check_all_holdout_tasks;
656 rcu_tasks.postgp_func = rcu_tasks_postgp;
Paul E. McKenney5873b8a2020-03-03 11:49:21 -0800657 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800658 return 0;
659}
Paul E. McKenneyeacd6f02020-03-02 11:59:20 -0800660
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700661#if !defined(CONFIG_TINY_RCU)
662void show_rcu_tasks_classic_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700663{
664 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
665}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700666EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
667#endif // !defined(CONFIG_TINY_RCU)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700668
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700669/* Do the srcu_read_lock() for the above synchronize_srcu(). */
670void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
671{
672 preempt_disable();
673 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
674 preempt_enable();
675}
676
677/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
678void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
679{
680 struct task_struct *t = current;
681
682 preempt_disable();
683 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
684 preempt_enable();
685 exit_tasks_rcu_finish_trace(t);
686}
687
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700688#else /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenney25246fc2020-04-05 20:49:13 -0700689void exit_tasks_rcu_start(void) { }
690void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700691#endif /* #else #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800692
693#ifdef CONFIG_TASKS_RUDE_RCU
694
695////////////////////////////////////////////////////////////////////////
696//
697// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
698// passing an empty function to schedule_on_each_cpu(). This approach
Paul E. McKenneye4be1f42021-06-22 11:57:15 -0700699// provides an asynchronous call_rcu_tasks_rude() API and batching of
700// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
Paul E. McKenney9fc98e32021-03-04 14:46:59 -0800701// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
702// and induces otherwise unnecessary context switches on all online CPUs,
703// whether idle or not.
704//
705// Callback handling is provided by the rcu_tasks_kthread() function.
706//
707// Ordering is provided by the scheduler's context-switch code.
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800708
709// Empty function to allow workqueues to force a context switch.
710static void rcu_tasks_be_rude(struct work_struct *work)
711{
712}
713
714// Wait for one rude RCU-tasks grace period.
715static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
716{
Paul E. McKenney238dbce2020-03-18 10:54:05 -0700717 rtp->n_ipis += cpumask_weight(cpu_online_mask);
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800718 schedule_on_each_cpu(rcu_tasks_be_rude);
719}
720
721void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
Paul E. McKenneyc97d12a2020-03-03 15:50:31 -0800722DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
723 "RCU Tasks Rude");
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800724
725/**
726 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
727 * @rhp: structure to be used for queueing the RCU updates.
728 * @func: actual callback function to be invoked after the grace period
729 *
730 * The callback function will be invoked some time after a full grace
731 * period elapses, in other words after all currently executing RCU
732 * read-side critical sections have completed. call_rcu_tasks_rude()
733 * assumes that the read-side critical sections end at context switch,
Paul E. McKenney8af9e2c2021-09-15 09:24:18 -0700734 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
Neeraj Upadhyaya6517e92021-08-18 12:58:43 +0530735 * usermode execution is schedulable). As such, there are no read-side
736 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
737 * this primitive is intended to determine that all tasks have passed
738 * through a safe state, not so much for data-structure synchronization.
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800739 *
740 * See the description of call_rcu() for more detailed information on
741 * memory ordering guarantees.
742 */
743void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
744{
745 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
746}
747EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
748
749/**
750 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
751 *
752 * Control will return to the caller some time after a rude rcu-tasks
753 * grace period has elapsed, in other words after all currently
754 * executing rcu-tasks read-side critical sections have elapsed. These
755 * read-side critical sections are delimited by calls to schedule(),
Neeraj Upadhyaya6517e92021-08-18 12:58:43 +0530756 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
757 * context), and (in theory, anyway) cond_resched().
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800758 *
759 * This is a very specialized primitive, intended only for a few uses in
760 * tracing and other situations requiring manipulation of function preambles
761 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
762 * (yet) intended for heavy use from multiple CPUs.
763 *
764 * See the description of synchronize_rcu() for more detailed information
765 * on memory ordering guarantees.
766 */
767void synchronize_rcu_tasks_rude(void)
768{
769 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
770}
771EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
772
773/**
774 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
775 *
776 * Although the current implementation is guaranteed to wait, it is not
777 * obligated to, for example, if there are no pending callbacks.
778 */
779void rcu_barrier_tasks_rude(void)
780{
781 /* There is only one callback queue, so this is easy. ;-) */
782 synchronize_rcu_tasks_rude();
783}
784EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
785
786static int __init rcu_spawn_tasks_rude_kthread(void)
787{
Paul E. McKenneycafafd62021-11-05 21:52:00 -0700788 cblist_init_generic(&rcu_tasks_rude);
Paul E. McKenney4fe192d2020-09-09 22:05:41 -0700789 rcu_tasks_rude.gp_sleep = HZ / 10;
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800790 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
791 return 0;
792}
Paul E. McKenneyc84aad72020-03-02 21:06:43 -0800793
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700794#if !defined(CONFIG_TINY_RCU)
795void show_rcu_tasks_rude_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -0700796{
797 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
798}
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700799EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
800#endif // !defined(CONFIG_TINY_RCU)
801#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700802
803////////////////////////////////////////////////////////////////////////
804//
805// Tracing variant of Tasks RCU. This variant is designed to be used
806// to protect tracing hooks, including those of BPF. This variant
807// therefore:
808//
809// 1. Has explicit read-side markers to allow finite grace periods
810// in the face of in-kernel loops for PREEMPT=n builds.
811//
812// 2. Protects code in the idle loop, exception entry/exit, and
813// CPU-hotplug code paths, similar to the capabilities of SRCU.
814//
Paul E. McKenneyc4f113a2021-08-05 09:54:45 -0700815// 3. Avoids expensive read-side instructions, having overhead similar
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700816// to that of Preemptible RCU.
817//
818// There are of course downsides. The grace-period code can send IPIs to
819// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
820// It is necessary to scan the full tasklist, much as for Tasks RCU. There
821// is a single callback queue guarded by a single lock, again, much as for
822// Tasks RCU. If needed, these downsides can be at least partially remedied.
823//
824// Perhaps most important, this variant of RCU does not affect the vanilla
825// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
826// readers can operate from idle, offline, and exception entry/exit in no
827// way allows rcu_preempt and rcu_sched readers to also do so.
Paul E. McKenneya434dd12021-02-25 10:26:00 -0800828//
829// The implementation uses rcu_tasks_wait_gp(), which relies on function
830// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
831// function sets these function pointers up so that rcu_tasks_wait_gp()
832// invokes these functions in this order:
833//
834// rcu_tasks_trace_pregp_step():
835// Initialize the count of readers and block CPU-hotplug operations.
836// rcu_tasks_trace_pertask(), invoked on every non-idle task:
837// Initialize per-task state and attempt to identify an immediate
838// quiescent state for that task, or, failing that, attempt to
839// set that task's .need_qs flag so that task's next outermost
840// rcu_read_unlock_trace() will report the quiescent state (in which
841// case the count of readers is incremented). If both attempts fail,
Paul E. McKenney45f4b4a2021-05-24 11:26:53 -0700842// the task is added to a "holdout" list. Note that IPIs are used
843// to invoke trc_read_check_handler() in the context of running tasks
844// in order to avoid ordering overhead on common-case shared-variable
845// accessses.
Paul E. McKenneya434dd12021-02-25 10:26:00 -0800846// rcu_tasks_trace_postscan():
847// Initialize state and attempt to identify an immediate quiescent
848// state as above (but only for idle tasks), unblock CPU-hotplug
849// operations, and wait for an RCU grace period to avoid races with
850// tasks that are in the process of exiting.
851// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
852// Scans the holdout list, attempting to identify a quiescent state
853// for each task on the list. If there is a quiescent state, the
854// corresponding task is removed from the holdout list.
855// rcu_tasks_trace_postgp():
856// Wait for the count of readers do drop to zero, reporting any stalls.
857// Also execute full memory barriers to maintain ordering with code
858// executing after the grace period.
859//
860// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
861//
862// Pre-grace-period update-side code is ordered before the grace
863// period via the ->cbs_lock and barriers in rcu_tasks_kthread().
864// Pre-grace-period read-side code is ordered before the grace period by
865// atomic_dec_and_test() of the count of readers (for IPIed readers) and by
866// scheduler context-switch ordering (for locked-down non-running readers).
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700867
868// The lockdep state must be outside of #ifdef to be useful.
869#ifdef CONFIG_DEBUG_LOCK_ALLOC
870static struct lock_class_key rcu_lock_trace_key;
871struct lockdep_map rcu_trace_lock_map =
872 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
873EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
874#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
875
876#ifdef CONFIG_TASKS_TRACE_RCU
877
Paul E. McKenney30d8aa52020-06-09 09:24:51 -0700878static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
879static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700880
881// Record outstanding IPIs to each CPU. No point in sending two...
882static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
883
Paul E. McKenney40471502020-03-22 13:34:34 -0700884// The number of detections of task quiescent state relying on
885// heavyweight readers executing explicit memory barriers.
Paul E. McKenney6731da92020-09-09 14:14:34 -0700886static unsigned long n_heavy_reader_attempts;
887static unsigned long n_heavy_reader_updates;
888static unsigned long n_heavy_reader_ofl_updates;
Paul E. McKenney40471502020-03-22 13:34:34 -0700889
Paul E. McKenneyb0afa0f2020-03-17 11:39:26 -0700890void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
891DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
892 "RCU Tasks Trace");
893
Paul E. McKenneyb38f57c2020-03-20 14:29:08 -0700894/*
895 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
896 * while the scheduler locks are held.
897 */
898static void rcu_read_unlock_iw(struct irq_work *iwp)
899{
900 wake_up(&trc_wait);
901}
902static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
903
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700904/* If we are the last reader, wake up the grace-period kthread. */
Paul E. McKenneya5c071c2021-07-28 12:28:27 -0700905void rcu_read_unlock_trace_special(struct task_struct *t)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700906{
Paul E. McKenneyf8ab3fa2021-05-24 15:36:37 -0700907 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
Paul E. McKenney276c4102020-03-17 16:02:06 -0700908
Paul E. McKenney9ae58d72020-03-18 17:16:37 -0700909 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
910 t->trc_reader_special.b.need_mb)
Paul E. McKenney276c4102020-03-17 16:02:06 -0700911 smp_mb(); // Pairs with update-side barriers.
912 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
913 if (nq)
914 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
Paul E. McKenneya5c071c2021-07-28 12:28:27 -0700915 WRITE_ONCE(t->trc_reader_nesting, 0);
Paul E. McKenney276c4102020-03-17 16:02:06 -0700916 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
Paul E. McKenneyb38f57c2020-03-20 14:29:08 -0700917 irq_work_queue(&rcu_tasks_trace_iw);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700918}
919EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
920
921/* Add a task to the holdout list, if it is not already on the list. */
922static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
923{
924 if (list_empty(&t->trc_holdout_list)) {
925 get_task_struct(t);
926 list_add(&t->trc_holdout_list, bhp);
927 }
928}
929
930/* Remove a task from the holdout list, if it is in fact present. */
931static void trc_del_holdout(struct task_struct *t)
932{
933 if (!list_empty(&t->trc_holdout_list)) {
934 list_del_init(&t->trc_holdout_list);
935 put_task_struct(t);
936 }
937}
938
939/* IPI handler to check task state. */
940static void trc_read_check_handler(void *t_in)
941{
942 struct task_struct *t = current;
943 struct task_struct *texp = t_in;
944
945 // If the task is no longer running on this CPU, leave.
946 if (unlikely(texp != t)) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700947 goto reset_ipi; // Already on holdout list, so will check later.
948 }
949
950 // If the task is not in a read-side critical section, and
951 // if this is the last reader, awaken the grace-period kthread.
Paul E. McKenneybdb0cca2021-05-24 12:48:18 -0700952 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700953 WRITE_ONCE(t->trc_reader_checked, true);
954 goto reset_ipi;
955 }
Paul E. McKenneyba3a86e2020-09-14 15:44:37 -0700956 // If we are racing with an rcu_read_unlock_trace(), try again later.
Paul E. McKenney96017bf2021-07-28 10:53:41 -0700957 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
Paul E. McKenneyba3a86e2020-09-14 15:44:37 -0700958 goto reset_ipi;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700959 WRITE_ONCE(t->trc_reader_checked, true);
960
961 // Get here if the task is in a read-side critical section. Set
962 // its state so that it will awaken the grace-period kthread upon
963 // exit from that critical section.
Paul E. McKenney96017bf2021-07-28 10:53:41 -0700964 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
Paul E. McKenneyf8ab3fa2021-05-24 15:36:37 -0700965 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
Paul E. McKenney276c4102020-03-17 16:02:06 -0700966 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700967
968reset_ipi:
969 // Allow future IPIs to be sent on CPU and for task.
970 // Also order this IPI handler against any later manipulations of
971 // the intended task.
Liu Song8211e922021-06-30 22:08:02 +0800972 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700973 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
974}
975
976/* Callback function for scheduler to check locked-down task. */
Peter Zijlstra9b3c4ab2021-09-21 21:54:32 +0200977static int trc_inspect_reader(struct task_struct *t, void *arg)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -0700978{
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700979 int cpu = task_cpu(t);
Paul E. McKenney18f08e72021-07-28 11:32:28 -0700980 int nesting;
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700981 bool ofl = cpu_is_offline(cpu);
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700982
983 if (task_curr(t)) {
Paul E. McKenney30d8aa52020-06-09 09:24:51 -0700984 WARN_ON_ONCE(ofl && !is_idle_task(t));
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700985
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700986 // If no chance of heavyweight readers, do it the hard way.
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700987 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
Peter Zijlstra9b3c4ab2021-09-21 21:54:32 +0200988 return -EINVAL;
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -0700989
990 // If heavyweight readers are enabled on the remote task,
991 // we can inspect its state despite its currently running.
992 // However, we cannot safely change its state.
Paul E. McKenney40471502020-03-22 13:34:34 -0700993 n_heavy_reader_attempts++;
Paul E. McKenney7e3b70e2020-03-22 11:24:58 -0700994 if (!ofl && // Check for "running" idle tasks on offline CPUs.
995 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
Peter Zijlstra9b3c4ab2021-09-21 21:54:32 +0200996 return -EINVAL; // No quiescent state, do it the hard way.
Paul E. McKenney40471502020-03-22 13:34:34 -0700997 n_heavy_reader_updates++;
Paul E. McKenneyedf37752020-03-22 14:09:45 -0700998 if (ofl)
999 n_heavy_reader_ofl_updates++;
Paul E. McKenney18f08e72021-07-28 11:32:28 -07001000 nesting = 0;
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -07001001 } else {
Paul E. McKenneybdb0cca2021-05-24 12:48:18 -07001002 // The task is not running, so C-language access is safe.
Paul E. McKenney18f08e72021-07-28 11:32:28 -07001003 nesting = t->trc_reader_nesting;
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -07001004 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001005
Paul E. McKenney18f08e72021-07-28 11:32:28 -07001006 // If not exiting a read-side critical section, mark as checked
1007 // so that the grace-period kthread will remove it from the
1008 // holdout list.
1009 t->trc_reader_checked = nesting >= 0;
1010 if (nesting <= 0)
Linus Torvalds6fedc282021-11-01 20:25:38 -07001011 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -07001012
1013 // The task is in a read-side critical section, so set up its
1014 // state so that it will awaken the grace-period kthread upon exit
1015 // from that critical section.
1016 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
Paul E. McKenneyf8ab3fa2021-05-24 15:36:37 -07001017 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
Paul E. McKenney7d0c9c52020-03-19 15:33:12 -07001018 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
Peter Zijlstra9b3c4ab2021-09-21 21:54:32 +02001019 return 0;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001020}
1021
1022/* Attempt to extract the state for the specified task. */
1023static void trc_wait_for_one_reader(struct task_struct *t,
1024 struct list_head *bhp)
1025{
1026 int cpu;
1027
1028 // If a previous IPI is still in flight, let it complete.
1029 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1030 return;
1031
1032 // The current task had better be in a quiescent state.
1033 if (t == current) {
1034 t->trc_reader_checked = true;
Paul E. McKenneybdb0cca2021-05-24 12:48:18 -07001035 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001036 return;
1037 }
1038
1039 // Attempt to nail down the task for inspection.
1040 get_task_struct(t);
Peter Zijlstra9b3c4ab2021-09-21 21:54:32 +02001041 if (!task_call_func(t, trc_inspect_reader, NULL)) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001042 put_task_struct(t);
1043 return;
1044 }
1045 put_task_struct(t);
1046
Paul E. McKenney45f4b4a2021-05-24 11:26:53 -07001047 // If this task is not yet on the holdout list, then we are in
1048 // an RCU read-side critical section. Otherwise, the invocation of
Neeraj Upadhyayd0a85852021-08-18 12:58:39 +05301049 // trc_add_holdout() that added it to the list did the necessary
Paul E. McKenney45f4b4a2021-05-24 11:26:53 -07001050 // get_task_struct(). Either way, the task cannot be freed out
1051 // from under this code.
1052
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001053 // If currently running, send an IPI, either way, add to list.
1054 trc_add_holdout(t, bhp);
Paul E. McKenney574de872020-09-09 21:51:09 -07001055 if (task_curr(t) &&
1056 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001057 // The task is currently running, so try IPIing it.
1058 cpu = task_cpu(t);
1059
1060 // If there is already an IPI outstanding, let it happen.
1061 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1062 return;
1063
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001064 per_cpu(trc_ipi_to_cpu, cpu) = true;
1065 t->trc_ipi_to_cpu = cpu;
Paul E. McKenney238dbce2020-03-18 10:54:05 -07001066 rcu_tasks_trace.n_ipis++;
Paul E. McKenney96017bf2021-07-28 10:53:41 -07001067 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001068 // Just in case there is some other reason for
1069 // failure than the target CPU being offline.
Neeraj Upadhyay46aa8862021-08-27 13:43:35 +05301070 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1071 __func__, cpu);
Paul E. McKenney7e0669c2020-03-25 14:36:05 -07001072 rcu_tasks_trace.n_ipis_fails++;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001073 per_cpu(trc_ipi_to_cpu, cpu) = false;
Neeraj Upadhyay46aa8862021-08-27 13:43:35 +05301074 t->trc_ipi_to_cpu = -1;
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001075 }
1076 }
1077}
1078
1079/* Initialize for a new RCU-tasks-trace grace period. */
1080static void rcu_tasks_trace_pregp_step(void)
1081{
1082 int cpu;
1083
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001084 // Allow for fast-acting IPIs.
1085 atomic_set(&trc_n_readers_need_end, 1);
1086
1087 // There shouldn't be any old IPIs, but...
1088 for_each_possible_cpu(cpu)
1089 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001090
1091 // Disable CPU hotplug across the tasklist scan.
1092 // This also waits for all readers in CPU-hotplug code paths.
1093 cpus_read_lock();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001094}
1095
1096/* Do first-round processing for the specified task. */
1097static void rcu_tasks_trace_pertask(struct task_struct *t,
1098 struct list_head *hop)
1099{
Uladzislau Rezki (Sony)1b04fa92020-12-09 21:27:31 +01001100 // During early boot when there is only the one boot CPU, there
1101 // is no idle task for the other CPUs. Just return.
1102 if (unlikely(t == NULL))
1103 return;
1104
Paul E. McKenney276c4102020-03-17 16:02:06 -07001105 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
Paul E. McKenney43766c32020-03-16 20:38:29 -07001106 WRITE_ONCE(t->trc_reader_checked, false);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001107 t->trc_ipi_to_cpu = -1;
1108 trc_wait_for_one_reader(t, hop);
1109}
1110
Paul E. McKenney9796e1a2020-03-22 13:18:54 -07001111/*
1112 * Do intermediate processing between task and holdout scans and
1113 * pick up the idle tasks.
1114 */
1115static void rcu_tasks_trace_postscan(struct list_head *hop)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001116{
Paul E. McKenney9796e1a2020-03-22 13:18:54 -07001117 int cpu;
1118
1119 for_each_possible_cpu(cpu)
1120 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1121
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001122 // Re-enable CPU hotplug now that the tasklist scan has completed.
1123 cpus_read_unlock();
1124
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001125 // Wait for late-stage exiting tasks to finish exiting.
1126 // These might have passed the call to exit_tasks_rcu_finish().
1127 synchronize_rcu();
1128 // Any tasks that exit after this point will set ->trc_reader_checked.
1129}
1130
Paul E. McKenney4593e772020-03-10 12:13:53 -07001131/* Show the state of a task stalling the current RCU tasks trace GP. */
1132static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1133{
1134 int cpu;
1135
1136 if (*firstreport) {
1137 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1138 *firstreport = false;
1139 }
1140 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1141 cpu = task_cpu(t);
1142 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1143 t->pid,
Neeraj Upadhyayd39ec8f2021-08-18 12:58:41 +05301144 ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0],
Paul E. McKenney4593e772020-03-10 12:13:53 -07001145 ".i"[is_idle_task(t)],
Neeraj Upadhyayd39ec8f2021-08-18 12:58:41 +05301146 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
Paul E. McKenneybdb0cca2021-05-24 12:48:18 -07001147 READ_ONCE(t->trc_reader_nesting),
Paul E. McKenneyf8ab3fa2021-05-24 15:36:37 -07001148 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
Paul E. McKenney4593e772020-03-10 12:13:53 -07001149 cpu);
1150 sched_show_task(t);
1151}
1152
1153/* List stalled IPIs for RCU tasks trace. */
1154static void show_stalled_ipi_trace(void)
1155{
1156 int cpu;
1157
1158 for_each_possible_cpu(cpu)
1159 if (per_cpu(trc_ipi_to_cpu, cpu))
1160 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1161}
1162
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001163/* Do one scan of the holdout list. */
1164static void check_all_holdout_tasks_trace(struct list_head *hop,
Paul E. McKenney4593e772020-03-10 12:13:53 -07001165 bool needreport, bool *firstreport)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001166{
1167 struct task_struct *g, *t;
1168
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001169 // Disable CPU hotplug across the holdout list scan.
1170 cpus_read_lock();
1171
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001172 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1173 // If safe and needed, try to check the current task.
1174 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1175 !READ_ONCE(t->trc_reader_checked))
1176 trc_wait_for_one_reader(t, hop);
1177
1178 // If check succeeded, remove this task from the list.
Paul E. McKenneyf5dbc592021-09-18 20:40:48 -07001179 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1180 READ_ONCE(t->trc_reader_checked))
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001181 trc_del_holdout(t);
Paul E. McKenney4593e772020-03-10 12:13:53 -07001182 else if (needreport)
1183 show_stalled_task_trace(t, firstreport);
1184 }
Paul E. McKenney81b4a7b2020-03-22 10:10:07 -07001185
1186 // Re-enable CPU hotplug now that the holdout list scan has completed.
1187 cpus_read_unlock();
1188
Paul E. McKenney4593e772020-03-10 12:13:53 -07001189 if (needreport) {
Neeraj Upadhyay89401172021-08-18 12:58:40 +05301190 if (*firstreport)
Paul E. McKenney4593e772020-03-10 12:13:53 -07001191 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1192 show_stalled_ipi_trace();
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001193 }
1194}
1195
Paul E. McKenneycbe0d8d2021-07-30 12:17:59 -07001196static void rcu_tasks_trace_empty_fn(void *unused)
1197{
1198}
1199
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001200/* Wait for grace period to complete and provide ordering. */
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001201static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001202{
Paul E. McKenneycbe0d8d2021-07-30 12:17:59 -07001203 int cpu;
Paul E. McKenney4593e772020-03-10 12:13:53 -07001204 bool firstreport;
1205 struct task_struct *g, *t;
1206 LIST_HEAD(holdouts);
1207 long ret;
1208
Paul E. McKenneycbe0d8d2021-07-30 12:17:59 -07001209 // Wait for any lingering IPI handlers to complete. Note that
1210 // if a CPU has gone offline or transitioned to userspace in the
1211 // meantime, all IPI handlers should have been drained beforehand.
1212 // Yes, this assumes that CPUs process IPIs in order. If that ever
1213 // changes, there will need to be a recheck and/or timed wait.
1214 for_each_online_cpu(cpu)
Paul E. McKenneyf5dbc592021-09-18 20:40:48 -07001215 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
Paul E. McKenneycbe0d8d2021-07-30 12:17:59 -07001216 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1217
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001218 // Remove the safety count.
1219 smp_mb__before_atomic(); // Order vs. earlier atomics
1220 atomic_dec(&trc_n_readers_need_end);
1221 smp_mb__after_atomic(); // Order vs. later atomics
1222
1223 // Wait for readers.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001224 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
Paul E. McKenney4593e772020-03-10 12:13:53 -07001225 for (;;) {
1226 ret = wait_event_idle_exclusive_timeout(
1227 trc_wait,
1228 atomic_read(&trc_n_readers_need_end) == 0,
1229 READ_ONCE(rcu_task_stall_timeout));
1230 if (ret)
1231 break; // Count reached zero.
Paul E. McKenneyaf051ca2020-03-16 12:13:33 -07001232 // Stall warning time, so make a list of the offenders.
Paul E. McKenneyf747c7e2020-09-15 14:27:38 -07001233 rcu_read_lock();
Paul E. McKenney4593e772020-03-10 12:13:53 -07001234 for_each_process_thread(g, t)
Paul E. McKenney276c4102020-03-17 16:02:06 -07001235 if (READ_ONCE(t->trc_reader_special.b.need_qs))
Paul E. McKenney4593e772020-03-10 12:13:53 -07001236 trc_add_holdout(t, &holdouts);
Paul E. McKenneyf747c7e2020-09-15 14:27:38 -07001237 rcu_read_unlock();
Paul E. McKenney4593e772020-03-10 12:13:53 -07001238 firstreport = true;
Paul E. McKenney592031c2020-09-15 14:03:34 -07001239 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1240 if (READ_ONCE(t->trc_reader_special.b.need_qs))
Paul E. McKenney4593e772020-03-10 12:13:53 -07001241 show_stalled_task_trace(t, &firstreport);
Paul E. McKenney592031c2020-09-15 14:03:34 -07001242 trc_del_holdout(t); // Release task_struct reference.
1243 }
Paul E. McKenney4593e772020-03-10 12:13:53 -07001244 if (firstreport)
1245 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1246 show_stalled_ipi_trace();
1247 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1248 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001249 smp_mb(); // Caller's code must be ordered after wakeup.
Paul E. McKenney43766c32020-03-16 20:38:29 -07001250 // Pairs with pretty much every ordering primitive.
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001251}
1252
1253/* Report any needed quiescent state for this exiting task. */
Paul E. McKenney25246fc2020-04-05 20:49:13 -07001254static void exit_tasks_rcu_finish_trace(struct task_struct *t)
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001255{
1256 WRITE_ONCE(t->trc_reader_checked, true);
Paul E. McKenneybdb0cca2021-05-24 12:48:18 -07001257 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001258 WRITE_ONCE(t->trc_reader_nesting, 0);
Paul E. McKenney276c4102020-03-17 16:02:06 -07001259 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
Paul E. McKenneya5c071c2021-07-28 12:28:27 -07001260 rcu_read_unlock_trace_special(t);
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001261}
1262
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001263/**
1264 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1265 * @rhp: structure to be used for queueing the RCU updates.
1266 * @func: actual callback function to be invoked after the grace period
1267 *
Neeraj Upadhyayed42c382021-08-25 12:40:50 +05301268 * The callback function will be invoked some time after a trace rcu-tasks
1269 * grace period elapses, in other words after all currently executing
1270 * trace rcu-tasks read-side critical sections have completed. These
1271 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1272 * and rcu_read_unlock_trace().
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001273 *
1274 * See the description of call_rcu() for more detailed information on
1275 * memory ordering guarantees.
1276 */
1277void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1278{
1279 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1280}
1281EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1282
1283/**
1284 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1285 *
1286 * Control will return to the caller some time after a trace rcu-tasks
Paul E. McKenneyc7dcf812020-06-12 13:11:29 -07001287 * grace period has elapsed, in other words after all currently executing
Neeraj Upadhyayed42c382021-08-25 12:40:50 +05301288 * trace rcu-tasks read-side critical sections have elapsed. These read-side
Paul E. McKenneyc7dcf812020-06-12 13:11:29 -07001289 * critical sections are delimited by calls to rcu_read_lock_trace()
1290 * and rcu_read_unlock_trace().
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001291 *
1292 * This is a very specialized primitive, intended only for a few uses in
1293 * tracing and other situations requiring manipulation of function preambles
1294 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1295 * (yet) intended for heavy use from multiple CPUs.
1296 *
1297 * See the description of synchronize_rcu() for more detailed information
1298 * on memory ordering guarantees.
1299 */
1300void synchronize_rcu_tasks_trace(void)
1301{
1302 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1303 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1304}
1305EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1306
1307/**
1308 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1309 *
1310 * Although the current implementation is guaranteed to wait, it is not
1311 * obligated to, for example, if there are no pending callbacks.
1312 */
1313void rcu_barrier_tasks_trace(void)
1314{
1315 /* There is only one callback queue, so this is easy. ;-) */
1316 synchronize_rcu_tasks_trace();
1317}
1318EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1319
1320static int __init rcu_spawn_tasks_trace_kthread(void)
1321{
Paul E. McKenneycafafd62021-11-05 21:52:00 -07001322 cblist_init_generic(&rcu_tasks_trace);
Paul E. McKenney2393a612020-09-09 21:36:34 -07001323 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
Paul E. McKenney4fe192d2020-09-09 22:05:41 -07001324 rcu_tasks_trace.gp_sleep = HZ / 10;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -07001325 rcu_tasks_trace.init_fract = HZ / 10;
Paul E. McKenney2393a612020-09-09 21:36:34 -07001326 } else {
Paul E. McKenney4fe192d2020-09-09 22:05:41 -07001327 rcu_tasks_trace.gp_sleep = HZ / 200;
1328 if (rcu_tasks_trace.gp_sleep <= 0)
1329 rcu_tasks_trace.gp_sleep = 1;
Paul E. McKenney75dc2da2020-09-17 16:17:17 -07001330 rcu_tasks_trace.init_fract = HZ / 200;
Paul E. McKenney2393a612020-09-09 21:36:34 -07001331 if (rcu_tasks_trace.init_fract <= 0)
1332 rcu_tasks_trace.init_fract = 1;
1333 }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001334 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1335 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1336 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1337 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1338 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1339 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1340 return 0;
1341}
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001342
Paul E. McKenney27c0f142020-09-15 17:08:03 -07001343#if !defined(CONFIG_TINY_RCU)
1344void show_rcu_tasks_trace_gp_kthread(void)
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001345{
Paul E. McKenney40471502020-03-22 13:34:34 -07001346 char buf[64];
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001347
Paul E. McKenneyedf37752020-03-22 14:09:45 -07001348 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1349 data_race(n_heavy_reader_ofl_updates),
Paul E. McKenney40471502020-03-22 13:34:34 -07001350 data_race(n_heavy_reader_updates),
1351 data_race(n_heavy_reader_attempts));
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001352 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1353}
Paul E. McKenney27c0f142020-09-15 17:08:03 -07001354EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1355#endif // !defined(CONFIG_TINY_RCU)
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001356
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001357#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney25246fc2020-04-05 20:49:13 -07001358static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
Paul E. McKenneyd5f177d2020-03-09 19:56:53 -07001359#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07001360
Paul E. McKenney83444962020-05-28 20:03:48 -07001361#ifndef CONFIG_TINY_RCU
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001362void show_rcu_tasks_gp_kthreads(void)
1363{
1364 show_rcu_tasks_classic_gp_kthread();
1365 show_rcu_tasks_rude_gp_kthread();
1366 show_rcu_tasks_trace_gp_kthread();
1367}
Paul E. McKenney83444962020-05-28 20:03:48 -07001368#endif /* #ifndef CONFIG_TINY_RCU */
Paul E. McKenneye21408c2020-03-16 11:01:55 -07001369
Uladzislau Rezki (Sony)bfba7ed2020-12-09 21:27:32 +01001370#ifdef CONFIG_PROVE_RCU
1371struct rcu_tasks_test_desc {
1372 struct rcu_head rh;
1373 const char *name;
1374 bool notrun;
1375};
1376
1377static struct rcu_tasks_test_desc tests[] = {
1378 {
1379 .name = "call_rcu_tasks()",
1380 /* If not defined, the test is skipped. */
1381 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1382 },
1383 {
1384 .name = "call_rcu_tasks_rude()",
1385 /* If not defined, the test is skipped. */
1386 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1387 },
1388 {
1389 .name = "call_rcu_tasks_trace()",
1390 /* If not defined, the test is skipped. */
1391 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1392 }
1393};
1394
1395static void test_rcu_tasks_callback(struct rcu_head *rhp)
1396{
1397 struct rcu_tasks_test_desc *rttd =
1398 container_of(rhp, struct rcu_tasks_test_desc, rh);
1399
1400 pr_info("Callback from %s invoked.\n", rttd->name);
1401
1402 rttd->notrun = true;
1403}
1404
1405static void rcu_tasks_initiate_self_tests(void)
1406{
1407 pr_info("Running RCU-tasks wait API self tests\n");
1408#ifdef CONFIG_TASKS_RCU
1409 synchronize_rcu_tasks();
1410 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1411#endif
1412
1413#ifdef CONFIG_TASKS_RUDE_RCU
1414 synchronize_rcu_tasks_rude();
1415 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1416#endif
1417
1418#ifdef CONFIG_TASKS_TRACE_RCU
1419 synchronize_rcu_tasks_trace();
1420 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1421#endif
1422}
1423
1424static int rcu_tasks_verify_self_tests(void)
1425{
1426 int ret = 0;
1427 int i;
1428
1429 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1430 if (!tests[i].notrun) { // still hanging.
1431 pr_err("%s has been failed.\n", tests[i].name);
1432 ret = -1;
1433 }
1434 }
1435
1436 if (ret)
1437 WARN_ON(1);
1438
1439 return ret;
1440}
1441late_initcall(rcu_tasks_verify_self_tests);
1442#else /* #ifdef CONFIG_PROVE_RCU */
1443static void rcu_tasks_initiate_self_tests(void) { }
1444#endif /* #else #ifdef CONFIG_PROVE_RCU */
1445
Uladzislau Rezki (Sony)1b04fa92020-12-09 21:27:31 +01001446void __init rcu_init_tasks_generic(void)
1447{
1448#ifdef CONFIG_TASKS_RCU
1449 rcu_spawn_tasks_kthread();
1450#endif
1451
1452#ifdef CONFIG_TASKS_RUDE_RCU
1453 rcu_spawn_tasks_rude_kthread();
1454#endif
1455
1456#ifdef CONFIG_TASKS_TRACE_RCU
1457 rcu_spawn_tasks_trace_kthread();
1458#endif
Uladzislau Rezki (Sony)bfba7ed2020-12-09 21:27:32 +01001459
1460 // Run the self-tests.
1461 rcu_tasks_initiate_self_tests();
Uladzislau Rezki (Sony)1b04fa92020-12-09 21:27:31 +01001462}
1463
Paul E. McKenney8fd8ca32020-03-15 14:51:20 -07001464#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1465static inline void rcu_tasks_bootup_oddness(void) {}
1466#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */