blob: 33ea446101b30e390095e3099a895799b8e4df6a [file] [log] [blame]
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07003 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08004 *
Josh Triplettb772e1d2006-10-04 02:17:13 -07005 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -08006 *
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -07008 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -08009 *
Mauro Carvalho Chehab43cb5452020-04-21 19:04:06 +020010 * See also: Documentation/RCU/torture.rst
Paul E. McKenneya241ec62005-10-30 15:03:12 -080011 */
Paul E. McKenney60500032018-05-15 12:25:05 -070012
13#define pr_fmt(fmt) fmt
14
Paul E. McKenneya241ec62005-10-30 15:03:12 -080015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -080023#include <linux/rcupdate_wait.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080024#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010026#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070027#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080029#include <linux/completion.h>
30#include <linux/moduleparam.h>
31#include <linux/percpu.h>
32#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080033#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070034#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080035#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080036#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080037#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070038#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070039#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080040#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070041#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080042#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070043#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070044#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070045#include <linux/sched/sysctl.h>
Paul E. McKenneye0aff972018-10-01 17:40:54 -070046#include <linux/oom.h>
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -070047#include <linux/tick.h>
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -070048#include <linux/rcupdate_trace.h>
Wander Lairson Costa5ff7c9f2021-11-10 11:37:45 -030049#include <linux/nmi.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080050
Paul E. McKenney25c36322017-05-03 09:51:55 -070051#include "rcu.h"
52
Paul E. McKenneya241ec62005-10-30 15:03:12 -080053MODULE_LICENSE("GPL");
Paul E. McKenney2e24ce82019-01-17 10:16:42 -080054MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080055
Paul E. McKenney2397d072018-05-25 07:29:25 -070056/* Bits for ->extendables field, extendables param, and related definitions. */
Paul E. McKenney1c3d5392021-09-22 20:49:12 -070057#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
58#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
59#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
60#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070061#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
62#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
63#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
64#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
65#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
Paul E. McKenney1c3d5392021-09-22 20:49:12 -070066#define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
67#define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
68#define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070069#define RCUTORTURE_MAX_EXTEND \
70 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
71 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070072#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
73 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070074#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070075
Paul E. McKenney2397d072018-05-25 07:29:25 -070076torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
77 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080078torture_param(int, fqs_duration, 0,
79 "Duration of fqs bursts (us), 0 to disable");
80torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney82e31002021-11-22 20:55:18 -080082torture_param(int, fwd_progress, 1, "Test grace-period forward progress");
Paul E. McKenney1b272912018-07-18 14:32:31 -070083torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84torture_param(int, fwd_progress_holdoff, 60,
85 "Time between forward-progress tests (s)");
86torture_param(bool, fwd_progress_need_resched, 1,
87 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070088torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080089torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
90torture_param(bool, gp_normal, false,
91 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenney0fd05482020-11-13 20:43:59 -080092torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070093torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080094torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
Paul E. McKenneyd6855142020-08-11 10:33:39 -070095torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
Paul E. McKenney9e250222014-01-27 16:27:00 -080096torture_param(int, n_barrier_cbs, 0,
97 "# of callbacks/kthreads for barrier testing");
98torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
99torture_param(int, nreaders, -1, "Number of RCU reader threads");
100torture_param(int, object_debug, 0,
101 "Enable debug-object double call_rcu() testing");
102torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
103torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -0700104 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney2c4319b2020-09-23 17:39:46 -0700105torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
106torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700107torture_param(int, read_exit_delay, 13,
108 "Delay between read-then-exit episodes (s)");
109torture_param(int, read_exit_burst, 16,
110 "# of read-then-exit bursts per episode, zero to disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800111torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
112torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
113torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
114torture_param(int, stall_cpu_holdoff, 10,
115 "Time to wait before starting stall (s).");
Wander Lairson Costa5ff7c9f2021-11-10 11:37:45 -0300116torture_param(bool, stall_no_softlockup, false,
117 "Avoid softlockup warning during cpu stall.");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700118torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney19a8ff92020-03-11 17:39:12 -0700119torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -0700120torture_param(int, stall_gp_kthread, 0,
121 "Grace-period kthread stall duration (s).");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800122torture_param(int, stat_interval, 60,
123 "Number of seconds between stats printk()s");
124torture_param(int, stutter, 5, "Number of seconds to run/halt test");
125torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
126torture_param(int, test_boost_duration, 4,
127 "Duration of each boost test, seconds.");
128torture_param(int, test_boost_interval, 7,
129 "Interval between boost tests, seconds.");
130torture_param(bool, test_no_idle_hz, true,
131 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700132torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800133 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800134
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800135static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800136module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700137MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700138
Paul E. McKenney2c4319b2020-09-23 17:39:46 -0700139static int nrealnocbers;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800140static int nrealreaders;
141static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700142static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800143static struct task_struct **reader_tasks;
Paul E. McKenney2c4319b2020-09-23 17:39:46 -0700144static struct task_struct **nocb_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800145static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800146static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700147static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800148static struct task_struct *stall_task;
Paul E. McKenney82e31002021-11-22 20:55:18 -0800149static struct task_struct **fwd_prog_tasks;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800150static struct task_struct **barrier_cbs_tasks;
151static struct task_struct *barrier_task;
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700152static struct task_struct *read_exit_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800153
154#define RCU_TORTURE_PIPE_LEN 10
155
Paul E. McKenney00504532020-10-29 15:08:57 -0700156// Mailbox-like structure to check RCU global memory ordering.
157struct rcu_torture_reader_check {
158 unsigned long rtc_myloops;
159 int rtc_chkrdr;
160 unsigned long rtc_chkloops;
161 int rtc_ready;
162 struct rcu_torture_reader_check *rtc_assigner;
163} ____cacheline_internodealigned_in_smp;
164
165// Update-side data structure used to check RCU readers.
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800166struct rcu_torture {
167 struct rcu_head rtort_rcu;
168 int rtort_pipe_count;
169 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800170 int rtort_mbtest;
Paul E. McKenney00504532020-10-29 15:08:57 -0700171 struct rcu_torture_reader_check *rtort_chkp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800172};
173
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800174static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700175static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700176static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800177static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
178static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800179static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
180static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800181static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenney00504532020-10-29 15:08:57 -0700182static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700183static atomic_t n_rcu_torture_alloc;
184static atomic_t n_rcu_torture_alloc_fail;
185static atomic_t n_rcu_torture_free;
186static atomic_t n_rcu_torture_mberror;
Paul E. McKenney00504532020-10-29 15:08:57 -0700187static atomic_t n_rcu_torture_mbchk_fail;
188static atomic_t n_rcu_torture_mbchk_tries;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700189static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800190static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700191static long n_rcu_torture_boost_ktrerror;
192static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700193static long n_rcu_torture_boost_failure;
194static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700195static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800196static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700197static long n_barrier_successes; /* did rcu_barrier test succeed? */
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700198static unsigned long n_read_exits;
Josh Triplette3033732006-10-04 02:17:14 -0700199static struct list_head rcu_torture_removed;
Paul E. McKenney60013d52019-07-10 08:30:00 -0700200static unsigned long shutdown_jiffies;
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -0400201static unsigned long start_gp_seq;
Paul E. McKenney2c4319b2020-09-23 17:39:46 -0700202static atomic_long_t n_nocb_offload;
203static atomic_long_t n_nocb_deoffload;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800204
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800205static int rcu_torture_writer_state;
206#define RTWS_FIXED_DELAY 0
207#define RTWS_DELAY 1
208#define RTWS_REPLACE 2
209#define RTWS_DEF_FREE 3
210#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700211#define RTWS_COND_GET 5
212#define RTWS_COND_SYNC 6
Paul E. McKenney0fd05482020-11-13 20:43:59 -0800213#define RTWS_POLL_GET 7
214#define RTWS_POLL_WAIT 8
215#define RTWS_SYNC 9
216#define RTWS_STUTTER 10
217#define RTWS_STOPPING 11
Paul E. McKenney18aff332015-11-17 13:35:28 -0800218static const char * const rcu_torture_writer_state_names[] = {
219 "RTWS_FIXED_DELAY",
220 "RTWS_DELAY",
221 "RTWS_REPLACE",
222 "RTWS_DEF_FREE",
223 "RTWS_EXP_SYNC",
224 "RTWS_COND_GET",
225 "RTWS_COND_SYNC",
Paul E. McKenney0fd05482020-11-13 20:43:59 -0800226 "RTWS_POLL_GET",
227 "RTWS_POLL_WAIT",
Paul E. McKenney18aff332015-11-17 13:35:28 -0800228 "RTWS_SYNC",
229 "RTWS_STUTTER",
230 "RTWS_STOPPING",
231};
232
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700233/* Record reader segment types and duration for first failing read. */
234struct rt_read_seg {
235 int rt_readstate;
236 unsigned long rt_delay_jiffies;
237 unsigned long rt_delay_ms;
238 unsigned long rt_delay_us;
239 bool rt_preempted;
240};
241static int err_segs_recorded;
242static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
243static int rt_read_nsegs;
244
Paul E. McKenney18aff332015-11-17 13:35:28 -0800245static const char *rcu_torture_writer_state_getname(void)
246{
247 unsigned int i = READ_ONCE(rcu_torture_writer_state);
248
249 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
250 return "???";
251 return rcu_torture_writer_state_names[i];
252}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800253
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500254#ifdef CONFIG_RCU_TRACE
255static u64 notrace rcu_trace_clock_local(void)
256{
257 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700258
259 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500260 return ts;
261}
262#else /* #ifdef CONFIG_RCU_TRACE */
263static u64 notrace rcu_trace_clock_local(void)
264{
265 return 0ULL;
266}
267#endif /* #else #ifdef CONFIG_RCU_TRACE */
268
Paul E. McKenney60013d52019-07-10 08:30:00 -0700269/*
270 * Stop aggressive CPU-hog tests a bit before the end of the test in order
271 * to avoid interfering with test shutdown.
272 */
273static bool shutdown_time_arrived(void)
274{
275 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
276}
277
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700278static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400279static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700280 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800281static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700282static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800283static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
284static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700286
Paul E. McKenney48718482018-08-15 15:32:51 -0700287static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
288
Paul E. McKenney343e9092008-12-15 16:13:07 -0800289/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800290 * Allocate an element from the rcu_tortures pool.
291 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800292static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800293rcu_torture_alloc(void)
294{
295 struct list_head *p;
296
Ingo Molnaradac1662006-01-25 19:50:12 +0100297 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800298 if (list_empty(&rcu_torture_freelist)) {
299 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100300 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800301 return NULL;
302 }
303 atomic_inc(&n_rcu_torture_alloc);
304 p = rcu_torture_freelist.next;
305 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100306 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800307 return container_of(p, struct rcu_torture, rtort_free);
308}
309
310/*
311 * Free an element to the rcu_tortures pool.
312 */
313static void
314rcu_torture_free(struct rcu_torture *p)
315{
316 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100317 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800318 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100319 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800320}
321
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800322/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700323 * Operations vector for selecting different types of tests.
324 */
325
326struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800327 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700328 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700329 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700330 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700331 void (*read_delay)(struct torture_random_state *rrsp,
332 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700333 void (*readunlock)(int idx);
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800334 int (*readlock_held)(void);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700335 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700336 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700337 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700338 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700339 void (*exp_sync)(void);
Paul E. McKenneyfd56f64b2020-11-13 20:14:27 -0800340 unsigned long (*get_gp_state)(void);
Paul E. McKenney0fd05482020-11-13 20:43:59 -0800341 unsigned long (*start_gp_poll)(void);
342 bool (*poll_gp_state)(unsigned long oldstate);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700343 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800344 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200345 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800346 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400347 void (*stats)(void);
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700348 void (*gp_kthread_dbg)(void);
Paul E. McKenney0260b922021-04-08 13:01:14 -0700349 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700350 int (*stall_dur)(void);
Paul E. McKenney613b00f2021-11-23 11:53:52 -0800351 long cbflood_max;
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700352 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700353 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700354 int extendables;
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700355 int slow_gps;
Paul E. McKenney340170f2021-09-24 21:30:26 -0700356 int no_pi_lock;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400357 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700358};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700359
360static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700361
362/*
363 * Definitions for rcu torture testing.
364 */
365
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800366static int torture_readlock_not_held(void)
367{
368 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
369}
370
Josh Tripletta49a4af2006-09-29 01:59:30 -0700371static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700372{
373 rcu_read_lock();
374 return 0;
375}
376
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700377static void
378rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700379{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700380 unsigned long started;
381 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700382 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700383 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700384 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700385
Josh Triplettb8d57a72009-09-08 15:54:35 -0700386 /* We want a short delay sometimes to make a reader delay the grace
387 * period, and we want a long delay occasionally to trigger
388 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700389
Paul E. McKenney102c14d2019-12-21 11:23:50 -0800390 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney48718482018-08-15 15:32:51 -0700391 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700392 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700393 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700394 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
395 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700396 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700397 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700398 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700399 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
400 started, completed);
401 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700402 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700403 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700404 rtrsp->rt_delay_us = shortdelay_us;
405 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800406 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700407 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700408 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700409 rtrsp->rt_preempted = true;
410 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700411}
412
Josh Tripletta49a4af2006-09-29 01:59:30 -0700413static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700414{
415 rcu_read_unlock();
416}
417
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700418/*
419 * Update callback in the pipe. This should be invoked after a grace period.
420 */
421static bool
422rcu_torture_pipe_update_one(struct rcu_torture *rp)
423{
424 int i;
Paul E. McKenney00504532020-10-29 15:08:57 -0700425 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700426
Paul E. McKenney00504532020-10-29 15:08:57 -0700427 if (rtrcp) {
428 WRITE_ONCE(rp->rtort_chkp, NULL);
429 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
430 }
Paul E. McKenney20248912019-12-21 10:41:48 -0800431 i = READ_ONCE(rp->rtort_pipe_count);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700432 if (i > RCU_TORTURE_PIPE_LEN)
433 i = RCU_TORTURE_PIPE_LEN;
434 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -0800435 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
436 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700437 rp->rtort_mbtest = 0;
438 return true;
439 }
440 return false;
441}
442
443/*
444 * Update all callbacks in the pipe. Suitable for synchronous grace-period
445 * primitives.
446 */
447static void
448rcu_torture_pipe_update(struct rcu_torture *old_rp)
449{
450 struct rcu_torture *rp;
451 struct rcu_torture *rp1;
452
453 if (old_rp)
454 list_add(&old_rp->rtort_free, &rcu_torture_removed);
455 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
456 if (rcu_torture_pipe_update_one(rp)) {
457 list_del(&rp->rtort_free);
458 rcu_torture_free(rp);
459 }
460 }
461}
462
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700463static void
464rcu_torture_cb(struct rcu_head *p)
465{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700466 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
467
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800468 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700469 /* Test is ending, just drop callbacks on the floor. */
470 /* The next initialization will pick up the pieces. */
471 return;
472 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700473 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700474 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700475 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700476 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700477}
478
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800479static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800480{
481 return 0;
482}
483
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700484static void rcu_torture_deferred_free(struct rcu_torture *p)
485{
486 call_rcu(&p->rtort_rcu, rcu_torture_cb);
487}
488
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700489static void rcu_sync_torture_init(void)
490{
491 INIT_LIST_HEAD(&rcu_torture_removed);
492}
493
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700494static struct rcu_torture_ops rcu_ops = {
Paul E. McKenney0260b922021-04-08 13:01:14 -0700495 .ttype = RCU_FLAVOR,
496 .init = rcu_sync_torture_init,
497 .readlock = rcu_torture_read_lock,
498 .read_delay = rcu_read_delay,
499 .readunlock = rcu_torture_read_unlock,
500 .readlock_held = torture_readlock_not_held,
501 .get_gp_seq = rcu_get_gp_seq,
502 .gp_diff = rcu_seq_diff,
503 .deferred_free = rcu_torture_deferred_free,
504 .sync = synchronize_rcu,
505 .exp_sync = synchronize_rcu_expedited,
506 .get_gp_state = get_state_synchronize_rcu,
507 .start_gp_poll = start_poll_synchronize_rcu,
508 .poll_gp_state = poll_state_synchronize_rcu,
509 .cond_sync = cond_synchronize_rcu,
510 .call = call_rcu,
511 .cb_barrier = rcu_barrier,
512 .fqs = rcu_force_quiescent_state,
513 .stats = NULL,
514 .gp_kthread_dbg = show_rcu_gp_kthreads,
515 .check_boost_failed = rcu_check_boost_fail,
516 .stall_dur = rcu_jiffies_till_stall_check,
517 .irq_capable = 1,
518 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
519 .extendables = RCUTORTURE_MAX_EXTEND,
520 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700521};
522
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700523/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800524 * Don't even think about trying any of these in real life!!!
525 * The names includes "busted", and they really means it!
526 * The only purpose of these functions is to provide a buggy RCU
527 * implementation to make sure that rcutorture correctly emits
528 * buggy-RCU error messages.
529 */
530static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
531{
532 /* This is a deliberate bug for testing purposes only! */
533 rcu_torture_cb(&p->rtort_rcu);
534}
535
536static void synchronize_rcu_busted(void)
537{
538 /* This is a deliberate bug for testing purposes only! */
539}
540
541static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800542call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800543{
544 /* This is a deliberate bug for testing purposes only! */
545 func(head);
546}
547
548static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800549 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800550 .init = rcu_sync_torture_init,
551 .readlock = rcu_torture_read_lock,
552 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
553 .readunlock = rcu_torture_read_unlock,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800554 .readlock_held = torture_readlock_not_held,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700555 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800556 .deferred_free = rcu_busted_torture_deferred_free,
557 .sync = synchronize_rcu_busted,
558 .exp_sync = synchronize_rcu_busted,
559 .call = call_rcu_busted,
560 .cb_barrier = NULL,
561 .fqs = NULL,
562 .stats = NULL,
563 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700564 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800565};
566
567/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700568 * Definitions for srcu torture testing.
569 */
570
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800571DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700572static struct srcu_struct srcu_ctld;
573static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700574
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700575static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700576{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700577 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700578}
579
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700580static void
581srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700582{
583 long delay;
584 const long uspertick = 1000000 / HZ;
585 const long longdelay = 10;
586
587 /* We want there to be long-running readers, but not all the time. */
588
Paul E. McKenney51b11302014-01-27 11:49:39 -0800589 delay = torture_random(rrsp) %
590 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700591 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700592 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700593 rtrsp->rt_delay_jiffies = longdelay;
594 } else {
595 rcu_read_delay(rrsp, rtrsp);
596 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700597}
598
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700599static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700600{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700601 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700602}
603
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800604static int torture_srcu_read_lock_held(void)
605{
606 return srcu_read_lock_held(srcu_ctlp);
607}
608
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800609static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700610{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700611 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700612}
613
Lai Jiangshan9059c942012-03-19 16:12:14 +0800614static void srcu_torture_deferred_free(struct rcu_torture *rp)
615{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700616 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800617}
618
Josh Triplettb772e1d2006-10-04 02:17:13 -0700619static void srcu_torture_synchronize(void)
620{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700621 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700622}
623
Paul E. McKenney0fd05482020-11-13 20:43:59 -0800624static unsigned long srcu_torture_get_gp_state(void)
625{
626 return get_state_synchronize_srcu(srcu_ctlp);
627}
628
629static unsigned long srcu_torture_start_gp_poll(void)
630{
631 return start_poll_synchronize_srcu(srcu_ctlp);
632}
633
634static bool srcu_torture_poll_gp_state(unsigned long oldstate)
635{
636 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
637}
638
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700639static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800640 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700641{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700642 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700643}
644
645static void srcu_torture_barrier(void)
646{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700647 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700648}
649
Joe Percheseea203f2014-07-14 09:16:15 -0400650static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700651{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700652 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700653}
654
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700655static void srcu_torture_synchronize_expedited(void)
656{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700657 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700658}
659
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700660static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800661 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800662 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700663 .readlock = srcu_torture_read_lock,
664 .read_delay = srcu_read_delay,
665 .readunlock = srcu_torture_read_unlock,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800666 .readlock_held = torture_srcu_read_lock_held,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700667 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800668 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700669 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700670 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenney0fd05482020-11-13 20:43:59 -0800671 .get_gp_state = srcu_torture_get_gp_state,
672 .start_gp_poll = srcu_torture_start_gp_poll,
673 .poll_gp_state = srcu_torture_poll_gp_state,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700674 .call = srcu_torture_call,
675 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700676 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700677 .irq_capable = 1,
Paul E. McKenney340170f2021-09-24 21:30:26 -0700678 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700679 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700680};
681
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700682static void srcu_torture_init(void)
683{
684 rcu_sync_torture_init();
685 WARN_ON(init_srcu_struct(&srcu_ctld));
686 srcu_ctlp = &srcu_ctld;
687}
688
689static void srcu_torture_cleanup(void)
690{
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800691 cleanup_srcu_struct(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700692 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
693}
694
695/* As above, but dynamically allocated. */
696static struct rcu_torture_ops srcud_ops = {
697 .ttype = SRCU_FLAVOR,
698 .init = srcu_torture_init,
699 .cleanup = srcu_torture_cleanup,
700 .readlock = srcu_torture_read_lock,
701 .read_delay = srcu_read_delay,
702 .readunlock = srcu_torture_read_unlock,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800703 .readlock_held = torture_srcu_read_lock_held,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700704 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700705 .deferred_free = srcu_torture_deferred_free,
706 .sync = srcu_torture_synchronize,
707 .exp_sync = srcu_torture_synchronize_expedited,
708 .call = srcu_torture_call,
709 .cb_barrier = srcu_torture_barrier,
710 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700711 .irq_capable = 1,
Paul E. McKenney340170f2021-09-24 21:30:26 -0700712 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700713 .name = "srcud"
714};
715
Paul E. McKenney2397d072018-05-25 07:29:25 -0700716/* As above, but broken due to inappropriate reader extension. */
717static struct rcu_torture_ops busted_srcud_ops = {
718 .ttype = SRCU_FLAVOR,
719 .init = srcu_torture_init,
720 .cleanup = srcu_torture_cleanup,
721 .readlock = srcu_torture_read_lock,
722 .read_delay = rcu_read_delay,
723 .readunlock = srcu_torture_read_unlock,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800724 .readlock_held = torture_srcu_read_lock_held,
Paul E. McKenney2397d072018-05-25 07:29:25 -0700725 .get_gp_seq = srcu_torture_completed,
726 .deferred_free = srcu_torture_deferred_free,
727 .sync = srcu_torture_synchronize,
728 .exp_sync = srcu_torture_synchronize_expedited,
729 .call = srcu_torture_call,
730 .cb_barrier = srcu_torture_barrier,
731 .stats = srcu_torture_stats,
732 .irq_capable = 1,
Paul E. McKenney340170f2021-09-24 21:30:26 -0700733 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
Paul E. McKenney2397d072018-05-25 07:29:25 -0700734 .extendables = RCUTORTURE_MAX_EXTEND,
735 .name = "busted_srcud"
736};
737
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700738/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700739 * Definitions for RCU-tasks torture testing.
740 */
741
742static int tasks_torture_read_lock(void)
743{
744 return 0;
745}
746
747static void tasks_torture_read_unlock(int idx)
748{
749}
750
751static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
752{
753 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
754}
755
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -0800756static void synchronize_rcu_mult_test(void)
757{
758 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
759}
760
Paul E. McKenney69c60452014-07-01 11:59:36 -0700761static struct rcu_torture_ops tasks_ops = {
762 .ttype = RCU_TASKS_FLAVOR,
763 .init = rcu_sync_torture_init,
764 .readlock = tasks_torture_read_lock,
765 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
766 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700767 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700768 .deferred_free = rcu_tasks_torture_deferred_free,
769 .sync = synchronize_rcu_tasks,
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -0800770 .exp_sync = synchronize_rcu_mult_test,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700771 .call = call_rcu_tasks,
772 .cb_barrier = rcu_barrier_tasks,
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700773 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700774 .fqs = NULL,
775 .stats = NULL,
776 .irq_capable = 1,
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700777 .slow_gps = 1,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700778 .name = "tasks"
779};
780
Paul E. McKenneyc682db52019-04-19 07:38:27 -0700781/*
782 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
783 * This implementation does not necessarily work well with CPU hotplug.
784 */
785
786static void synchronize_rcu_trivial(void)
787{
788 int cpu;
789
790 for_each_online_cpu(cpu) {
791 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
792 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
793 }
794}
795
796static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
797{
798 preempt_disable();
799 return 0;
800}
801
802static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
803{
804 preempt_enable();
805}
806
807static struct rcu_torture_ops trivial_ops = {
808 .ttype = RCU_TRIVIAL_FLAVOR,
809 .init = rcu_sync_torture_init,
810 .readlock = rcu_torture_read_lock_trivial,
811 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
812 .readunlock = rcu_torture_read_unlock_trivial,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800813 .readlock_held = torture_readlock_not_held,
Paul E. McKenneyc682db52019-04-19 07:38:27 -0700814 .get_gp_seq = rcu_no_completed,
815 .sync = synchronize_rcu_trivial,
816 .exp_sync = synchronize_rcu_trivial,
817 .fqs = NULL,
818 .stats = NULL,
819 .irq_capable = 1,
820 .name = "trivial"
821};
822
Paul E. McKenney3d6e43c2020-03-03 15:02:50 -0800823/*
824 * Definitions for rude RCU-tasks torture testing.
825 */
826
827static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
828{
829 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
830}
831
832static struct rcu_torture_ops tasks_rude_ops = {
833 .ttype = RCU_TASKS_RUDE_FLAVOR,
834 .init = rcu_sync_torture_init,
835 .readlock = rcu_torture_read_lock_trivial,
836 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
837 .readunlock = rcu_torture_read_unlock_trivial,
838 .get_gp_seq = rcu_no_completed,
839 .deferred_free = rcu_tasks_rude_torture_deferred_free,
840 .sync = synchronize_rcu_tasks_rude,
841 .exp_sync = synchronize_rcu_tasks_rude,
842 .call = call_rcu_tasks_rude,
843 .cb_barrier = rcu_barrier_tasks_rude,
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700844 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
Paul E. McKenney613b00f2021-11-23 11:53:52 -0800845 .cbflood_max = 50000,
Paul E. McKenney3d6e43c2020-03-03 15:02:50 -0800846 .fqs = NULL,
847 .stats = NULL,
848 .irq_capable = 1,
849 .name = "tasks-rude"
850};
851
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -0700852/*
853 * Definitions for tracing RCU-tasks torture testing.
854 */
855
856static int tasks_tracing_torture_read_lock(void)
857{
858 rcu_read_lock_trace();
859 return 0;
860}
861
862static void tasks_tracing_torture_read_unlock(int idx)
863{
864 rcu_read_unlock_trace();
865}
866
867static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
868{
869 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
870}
871
872static struct rcu_torture_ops tasks_tracing_ops = {
873 .ttype = RCU_TASKS_TRACING_FLAVOR,
874 .init = rcu_sync_torture_init,
875 .readlock = tasks_tracing_torture_read_lock,
876 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
877 .readunlock = tasks_tracing_torture_read_unlock,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -0800878 .readlock_held = rcu_read_lock_trace_held,
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -0700879 .get_gp_seq = rcu_no_completed,
880 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
881 .sync = synchronize_rcu_tasks_trace,
882 .exp_sync = synchronize_rcu_tasks_trace,
883 .call = call_rcu_tasks_trace,
884 .cb_barrier = rcu_barrier_tasks_trace,
Paul E. McKenney27c0f142020-09-15 17:08:03 -0700885 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
Paul E. McKenney613b00f2021-11-23 11:53:52 -0800886 .cbflood_max = 50000,
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -0700887 .fqs = NULL,
888 .stats = NULL,
889 .irq_capable = 1,
890 .slow_gps = 1,
891 .name = "tasks-tracing"
892};
893
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700894static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
895{
896 if (!cur_ops->gp_diff)
897 return new - old;
898 return cur_ops->gp_diff(new, old);
899}
900
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700901/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700902 * RCU torture priority-boost testing. Runs one real-time thread per
Paul E. McKenneyea6d9622021-03-30 16:30:32 -0700903 * CPU for moderate bursts, repeatedly starting grace periods and waiting
904 * for them to complete. If a given grace period takes too long, we assume
905 * that priority inversion has occurred.
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700906 */
907
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700908static int old_rt_runtime = -1;
909
910static void rcu_torture_disable_rt_throttle(void)
911{
912 /*
913 * Disable RT throttling so that rcutorture's boost threads don't get
914 * throttled. Only possible if rcutorture is built-in otherwise the
915 * user should manually do this by setting the sched_rt_period_us and
916 * sched_rt_runtime sysctls.
917 */
918 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
919 return;
920
921 old_rt_runtime = sysctl_sched_rt_runtime;
922 sysctl_sched_rt_runtime = -1;
923}
924
925static void rcu_torture_enable_rt_throttle(void)
926{
927 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
928 return;
929
930 sysctl_sched_rt_runtime = old_rt_runtime;
931 old_rt_runtime = -1;
932}
933
Paul E. McKenney063f5a42021-04-14 13:00:10 -0700934static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700935{
Paul E. McKenney0260b922021-04-08 13:01:14 -0700936 int cpu;
Paul E. McKenney5e59fba2021-01-15 13:30:38 -0800937 static int dbg_done;
Paul E. McKenney063f5a42021-04-14 13:00:10 -0700938 unsigned long end = jiffies;
Paul E. McKenneybcd4af42021-04-08 10:46:55 -0700939 bool gp_done;
Paul E. McKenney0260b922021-04-08 13:01:14 -0700940 unsigned long j;
941 static unsigned long last_persist;
942 unsigned long lp;
943 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
Paul E. McKenney5e59fba2021-01-15 13:30:38 -0800944
Paul E. McKenney063f5a42021-04-14 13:00:10 -0700945 if (end - *start > mininterval) {
Paul E. McKenney7b9dad72021-04-07 17:09:37 -0700946 // Recheck after checking time to avoid false positives.
947 smp_mb(); // Time check before grace-period check.
948 if (cur_ops->poll_gp_state(gp_state))
949 return false; // passed, though perhaps just barely
Paul E. McKenney0260b922021-04-08 13:01:14 -0700950 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
951 // At most one persisted message per boost test.
952 j = jiffies;
953 lp = READ_ONCE(last_persist);
954 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
955 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
956 return false; // passed on a technicality
957 }
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700958 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
959 n_rcu_torture_boost_failure++;
Paul E. McKenneyea6d9622021-03-30 16:30:32 -0700960 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
961 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
Paul E. McKenney063f5a42021-04-14 13:00:10 -0700962 current->rt_priority, gp_state, end - *start);
Paul E. McKenney5e59fba2021-01-15 13:30:38 -0800963 cur_ops->gp_kthread_dbg();
Paul E. McKenneybcd4af42021-04-08 10:46:55 -0700964 // Recheck after print to flag grace period ending during splat.
965 gp_done = cur_ops->poll_gp_state(gp_state);
966 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
967 gp_done ? "ended already" : "still pending");
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700968
Paul E. McKenneyea6d9622021-03-30 16:30:32 -0700969 }
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700970
Paul E. McKenney7b9dad72021-04-07 17:09:37 -0700971 return true; // failed
Paul E. McKenney063f5a42021-04-14 13:00:10 -0700972 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
973 *start = jiffies;
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700974 }
975
Paul E. McKenney7b9dad72021-04-07 17:09:37 -0700976 return false; // passed
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700977}
978
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700979static int rcu_torture_boost(void *arg)
980{
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700981 unsigned long endtime;
Paul E. McKenneyea6d9622021-03-30 16:30:32 -0700982 unsigned long gp_state;
983 unsigned long gp_state_time;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700984 unsigned long oldstarttime;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700985
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800986 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700987
988 /* Set real-time priority. */
Peter Zijlstra8b700982020-04-22 13:10:04 +0200989 sched_set_fifo_low(current);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700990
991 /* Each pass through the following loop does one boost-test cycle. */
992 do {
Paul E. McKenney5e59fba2021-01-15 13:30:38 -0800993 bool failed = false; // Test failed already in this test interval
Paul E. McKenneyea6d9622021-03-30 16:30:32 -0700994 bool gp_initiated = false;
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700995
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700996 if (kthread_should_stop())
997 goto checkwait;
998
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700999 /* Wait for the next test interval. */
1000 oldstarttime = boost_starttime;
Paul E. McKenney3c80b402020-04-10 15:37:12 -07001001 while (time_before(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -08001002 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001003 if (stutter_wait("rcu_torture_boost"))
1004 sched_set_fifo_low(current);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001005 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001006 goto checkwait;
1007 }
1008
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001009 // Do one boost-test interval.
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001010 endtime = oldstarttime + test_boost_duration * HZ;
Paul E. McKenney3c80b402020-04-10 15:37:12 -07001011 while (time_before(jiffies, endtime)) {
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001012 // Has current GP gone too long?
1013 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
Paul E. McKenney063f5a42021-04-14 13:00:10 -07001014 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001015 // If we don't have a grace period in flight, start one.
1016 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1017 gp_state = cur_ops->start_gp_poll();
1018 gp_initiated = true;
1019 gp_state_time = jiffies;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001020 }
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001021 if (stutter_wait("rcu_torture_boost")) {
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001022 sched_set_fifo_low(current);
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001023 // If the grace period already ended,
1024 // we don't know when that happened, so
1025 // start over.
1026 if (cur_ops->poll_gp_state(gp_state))
1027 gp_initiated = false;
1028 }
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001029 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001030 goto checkwait;
1031 }
1032
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001033 // In case the grace period extended beyond the end of the loop.
1034 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
Paul E. McKenney063f5a42021-04-14 13:00:10 -07001035 rcu_torture_boost_failed(gp_state, &gp_state_time);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -07001036
1037 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001038 * Set the start time of the next test interval.
1039 * Yes, this is vulnerable to long delays, but such
1040 * delays simply cause a false negative for the next
1041 * interval. Besides, we are running at RT priority,
1042 * so delays should be relatively rare.
1043 */
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001044 while (oldstarttime == boost_starttime && !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001045 if (mutex_trylock(&boost_mutex)) {
Paul E. McKenney8c7ec022021-04-07 20:00:00 -07001046 if (oldstarttime == boost_starttime) {
1047 boost_starttime = jiffies + test_boost_interval * HZ;
1048 n_rcu_torture_boosts++;
1049 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001050 mutex_unlock(&boost_mutex);
1051 break;
1052 }
1053 schedule_timeout_uninterruptible(1);
1054 }
1055
1056 /* Go do the stutter. */
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001057checkwait: if (stutter_wait("rcu_torture_boost"))
1058 sched_set_fifo_low(current);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001059 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001060
1061 /* Clean up and exit. */
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07001062 while (!kthread_should_stop()) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001063 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001064 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001065 }
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001066 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001067 return 0;
1068}
1069
1070/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001071 * RCU torture force-quiescent-state kthread. Repeatedly induces
1072 * bursts of calls to force_quiescent_state(), increasing the probability
1073 * of occurrence of some important types of race conditions.
1074 */
1075static int
1076rcu_torture_fqs(void *arg)
1077{
1078 unsigned long fqs_resume_time;
1079 int fqs_burst_remaining;
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001080 int oldnice = task_nice(current);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001081
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001082 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001083 do {
1084 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney3c80b402020-04-10 15:37:12 -07001085 while (time_before(jiffies, fqs_resume_time) &&
Paul E. McKenney93898fb2011-08-17 12:39:34 -07001086 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001087 schedule_timeout_interruptible(1);
1088 }
1089 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -07001090 while (fqs_burst_remaining > 0 &&
1091 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001092 cur_ops->fqs();
1093 udelay(fqs_holdoff);
1094 fqs_burst_remaining -= fqs_holdoff;
1095 }
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001096 if (stutter_wait("rcu_torture_fqs"))
1097 sched_set_normal(current, oldnice);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001098 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001099 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001100 return 0;
1101}
1102
Paul E. McKenney18fbf302020-11-16 16:46:06 -08001103// Used by writers to randomly choose from the available grace-period
1104// primitives. The only purpose of the initialization is to size the array.
1105static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1106static int nsynctypes;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001107
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001108/*
Paul E. McKenney18fbf302020-11-16 16:46:06 -08001109 * Determine which grace-period primitives are available.
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001110 */
Paul E. McKenney18fbf302020-11-16 16:46:06 -08001111static void rcu_torture_write_types(void)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001112{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001113 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001114 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001115
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001116 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001117 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1118 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
Paul E. McKenneyfd56f64b2020-11-13 20:14:27 -08001119 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001120 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001121 pr_info("%s: Testing conditional GPs.\n", __func__);
Paul E. McKenneyfd56f64b2020-11-13 20:14:27 -08001122 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001123 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001124 }
1125 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001126 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001127 pr_info("%s: Testing expedited GPs.\n", __func__);
1128 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001129 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001130 }
1131 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001132 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001133 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1134 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001135 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001136 }
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001137 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1138 synctype[nsynctypes++] = RTWS_POLL_GET;
1139 pr_info("%s: Testing polling GPs.\n", __func__);
1140 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1141 pr_alert("%s: gp_poll without primitives.\n", __func__);
1142 }
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001143 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001144 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001145 pr_info("%s: Testing normal GPs.\n", __func__);
1146 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001147 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001148 }
Paul E. McKenney18fbf302020-11-16 16:46:06 -08001149}
1150
1151/*
1152 * RCU torture writer kthread. Repeatedly substitutes a new structure
1153 * for that pointed to by rcu_torture_current, freeing the old structure
1154 * after a series of grace periods (the "pipeline").
1155 */
1156static int
1157rcu_torture_writer(void *arg)
1158{
1159 bool boot_ended;
1160 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1161 unsigned long cookie;
1162 int expediting = 0;
1163 unsigned long gp_snap;
1164 int i;
1165 int idx;
1166 int oldnice = task_nice(current);
1167 struct rcu_torture *rp;
1168 struct rcu_torture *old_rp;
1169 static DEFINE_TORTURE_RANDOM(rand);
1170 bool stutter_waited;
1171
1172 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1173 if (!can_expedite)
1174 pr_alert("%s" TORTURE_FLAG
1175 " GP expediting controlled from boot/sysfs for %s.\n",
1176 torture_type, cur_ops->name);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001177 if (WARN_ONCE(nsynctypes == 0,
1178 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001179 /*
1180 * No updates primitives, so don't try updating.
1181 * The resulting test won't be testing much, hence the
1182 * above WARN_ONCE().
1183 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001184 rcu_torture_writer_state = RTWS_STOPPING;
1185 torture_kthread_stopping("rcu_torture_writer");
1186 }
1187
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001188 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001189 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenney1eba0ef2020-11-17 14:12:24 -08001190 torture_hrtimeout_us(500, 1000, &rand);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001191 rp = rcu_torture_alloc();
1192 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001193 continue;
1194 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001195 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001196 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001197 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001198 old_rp = rcu_dereference_check(rcu_torture_current,
1199 current == writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001200 rp->rtort_mbtest = 1;
1201 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001202 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001203 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001204 i = old_rp->rtort_pipe_count;
1205 if (i > RCU_TORTURE_PIPE_LEN)
1206 i = RCU_TORTURE_PIPE_LEN;
1207 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -08001208 WRITE_ONCE(old_rp->rtort_pipe_count,
1209 old_rp->rtort_pipe_count + 1);
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001210 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1211 idx = cur_ops->readlock();
1212 cookie = cur_ops->get_gp_state();
1213 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1214 cur_ops->poll_gp_state(cookie),
1215 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1216 __func__,
1217 rcu_torture_writer_state_getname(),
1218 rcu_torture_writer_state,
1219 cookie, cur_ops->get_gp_state());
1220 cur_ops->readunlock(idx);
1221 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001222 switch (synctype[torture_random(&rand) % nsynctypes]) {
1223 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001224 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001225 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001226 break;
1227 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001228 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001229 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001230 rcu_torture_pipe_update(old_rp);
1231 break;
1232 case RTWS_COND_GET:
1233 rcu_torture_writer_state = RTWS_COND_GET;
Paul E. McKenneyfd56f64b2020-11-13 20:14:27 -08001234 gp_snap = cur_ops->get_gp_state();
Paul E. McKenneyea31fd92020-11-17 11:32:54 -08001235 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001236 rcu_torture_writer_state = RTWS_COND_SYNC;
1237 cur_ops->cond_sync(gp_snap);
1238 rcu_torture_pipe_update(old_rp);
1239 break;
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001240 case RTWS_POLL_GET:
1241 rcu_torture_writer_state = RTWS_POLL_GET;
1242 gp_snap = cur_ops->start_gp_poll();
1243 rcu_torture_writer_state = RTWS_POLL_WAIT;
Paul E. McKenneyea31fd92020-11-17 11:32:54 -08001244 while (!cur_ops->poll_gp_state(gp_snap))
1245 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1246 &rand);
Paul E. McKenney0fd05482020-11-13 20:43:59 -08001247 rcu_torture_pipe_update(old_rp);
1248 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001249 case RTWS_SYNC:
1250 rcu_torture_writer_state = RTWS_SYNC;
1251 cur_ops->sync();
1252 rcu_torture_pipe_update(old_rp);
1253 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001254 default:
1255 WARN_ON_ONCE(1);
1256 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001257 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001258 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001259 WRITE_ONCE(rcu_torture_current_version,
1260 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001261 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1262 if (can_expedite &&
1263 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1264 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1265 if (expediting >= 0)
1266 rcu_expedite_gp();
1267 else
1268 rcu_unexpedite_gp();
1269 if (++expediting > 3)
1270 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001271 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1272 can_expedite = !rcu_gp_is_expedited() &&
1273 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001274 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001275 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenney12a910e2020-11-16 16:01:50 -08001276 boot_ended = rcu_inkernel_boot_has_ended();
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001277 stutter_waited = stutter_wait("rcu_torture_writer");
1278 if (stutter_waited &&
Paul E. McKenney5eabea52019-04-12 09:02:46 -07001279 !READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney3432d762019-04-15 14:50:05 -07001280 !cur_ops->slow_gps &&
Paul E. McKenney59ee0322019-11-28 18:54:06 -08001281 !torture_must_stop() &&
Paul E. McKenney12a910e2020-11-16 16:01:50 -08001282 boot_ended)
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001283 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001284 if (list_empty(&rcu_tortures[i].rtort_free) &&
1285 rcu_access_pointer(rcu_torture_current) !=
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001286 &rcu_tortures[i]) {
1287 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001288 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001289 }
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07001290 if (stutter_waited)
1291 sched_set_normal(current, oldnice);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001292 } while (!torture_must_stop());
Paul E. McKenneycae7cc62020-04-26 19:20:37 -07001293 rcu_torture_current = NULL; // Let stats task know that we are done.
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001294 /* Reset expediting back to unexpedited. */
1295 if (expediting > 0)
1296 expediting = -expediting;
1297 while (can_expedite && expediting++ < 0)
1298 rcu_unexpedite_gp();
1299 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001300 if (!can_expedite)
1301 pr_alert("%s" TORTURE_FLAG
1302 " Dynamic grace-period expediting was disabled.\n",
1303 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001304 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001305 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001306 return 0;
1307}
1308
1309/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001310 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1311 * delay between calls.
1312 */
1313static int
1314rcu_torture_fakewriter(void *arg)
1315{
Paul E. McKenney682189a2020-11-16 17:10:39 -08001316 unsigned long gp_snap;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001317 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001318
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001319 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001320 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001321
1322 do {
Paul E. McKenney1eba0ef2020-11-17 14:12:24 -08001323 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001324 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001325 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001326 cur_ops->cb_barrier();
Paul E. McKenney682189a2020-11-16 17:10:39 -08001327 } else {
1328 switch (synctype[torture_random(&rand) % nsynctypes]) {
1329 case RTWS_DEF_FREE:
1330 break;
1331 case RTWS_EXP_SYNC:
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001332 cur_ops->exp_sync();
Paul E. McKenney682189a2020-11-16 17:10:39 -08001333 break;
1334 case RTWS_COND_GET:
1335 gp_snap = cur_ops->get_gp_state();
Paul E. McKenneyea31fd92020-11-17 11:32:54 -08001336 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
Paul E. McKenney682189a2020-11-16 17:10:39 -08001337 cur_ops->cond_sync(gp_snap);
1338 break;
1339 case RTWS_POLL_GET:
1340 gp_snap = cur_ops->start_gp_poll();
1341 while (!cur_ops->poll_gp_state(gp_snap)) {
Paul E. McKenneyea31fd92020-11-17 11:32:54 -08001342 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1343 &rand);
Paul E. McKenney682189a2020-11-16 17:10:39 -08001344 }
1345 break;
1346 case RTWS_SYNC:
1347 cur_ops->sync();
1348 break;
1349 default:
1350 WARN_ON_ONCE(1);
1351 break;
1352 }
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001353 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001354 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001355 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001356
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001357 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001358 return 0;
1359}
1360
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001361static void rcu_torture_timer_cb(struct rcu_head *rhp)
1362{
1363 kfree(rhp);
1364}
1365
Paul E. McKenney00504532020-10-29 15:08:57 -07001366// Set up and carry out testing of RCU's global memory ordering
1367static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1368 struct torture_random_state *trsp)
1369{
1370 unsigned long loops;
Paul E. McKenney1afb95f2020-12-19 07:34:35 -08001371 int noc = torture_num_online_cpus();
Paul E. McKenney00504532020-10-29 15:08:57 -07001372 int rdrchked;
1373 int rdrchker;
1374 struct rcu_torture_reader_check *rtrcp; // Me.
1375 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1376 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1377 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1378
1379 if (myid < 0)
1380 return; // Don't try this from timer handlers.
1381
1382 // Increment my counter.
1383 rtrcp = &rcu_torture_reader_mbchk[myid];
1384 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1385
1386 // Attempt to assign someone else some checking work.
1387 rdrchked = torture_random(trsp) % nrealreaders;
1388 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1389 rdrchker = torture_random(trsp) % nrealreaders;
1390 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1391 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1392 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1393 !READ_ONCE(rtp->rtort_chkp) &&
1394 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1395 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1396 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1397 rtrcp->rtc_chkrdr = rdrchked;
1398 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1399 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1400 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1401 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1402 }
1403
1404 // If assigned some completed work, do it!
1405 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1406 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1407 return; // No work or work not yet ready.
1408 rdrchked = rtrcp_assigner->rtc_chkrdr;
1409 if (WARN_ON_ONCE(rdrchked < 0))
1410 return;
1411 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1412 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1413 atomic_inc(&n_rcu_torture_mbchk_tries);
1414 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1415 atomic_inc(&n_rcu_torture_mbchk_fail);
1416 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1417 rtrcp_assigner->rtc_ready = 0;
1418 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1419 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1420}
1421
Josh Triplettb772e1d2006-10-04 02:17:13 -07001422/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001423 * Do one extension of an RCU read-side critical section using the
1424 * current reader state in readstate (set to zero for initial entry
1425 * to extended critical section), set the new state as specified by
1426 * newstate (set to zero for final exit from extended critical section),
1427 * and random-number-generator state in trsp. If this is neither the
1428 * beginning or end of the critical section and if there was actually a
1429 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001430 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001431static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001432 struct torture_random_state *trsp,
1433 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001434{
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001435 unsigned long flags;
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001436 int idxnew1 = -1;
1437 int idxnew2 = -1;
1438 int idxold1 = *readstate;
1439 int idxold2 = idxold1;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001440 int statesnew = ~*readstate & newstate;
1441 int statesold = *readstate & ~newstate;
1442
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001443 WARN_ON_ONCE(idxold2 < 0);
1444 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001445 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001446
1447 /* First, put new protection in place to avoid critical-section gap. */
1448 if (statesnew & RCUTORTURE_RDR_BH)
1449 local_bh_disable();
Scott Wood71921a92021-08-20 09:42:36 +02001450 if (statesnew & RCUTORTURE_RDR_RBH)
1451 rcu_read_lock_bh();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001452 if (statesnew & RCUTORTURE_RDR_IRQ)
1453 local_irq_disable();
1454 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1455 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001456 if (statesnew & RCUTORTURE_RDR_SCHED)
1457 rcu_read_lock_sched();
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001458 if (statesnew & RCUTORTURE_RDR_RCU_1)
1459 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1460 if (statesnew & RCUTORTURE_RDR_RCU_2)
1461 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001462
Scott Wood71921a92021-08-20 09:42:36 +02001463 /*
1464 * Next, remove old protection, in decreasing order of strength
1465 * to avoid unlock paths that aren't safe in the stronger
1466 * context. Namely: BH can not be enabled with disabled interrupts.
1467 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1468 * context.
1469 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001470 if (statesold & RCUTORTURE_RDR_IRQ)
1471 local_irq_enable();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001472 if (statesold & RCUTORTURE_RDR_PREEMPT)
1473 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001474 if (statesold & RCUTORTURE_RDR_SCHED)
1475 rcu_read_unlock_sched();
Scott Wood71921a92021-08-20 09:42:36 +02001476 if (statesold & RCUTORTURE_RDR_BH)
1477 local_bh_enable();
1478 if (statesold & RCUTORTURE_RDR_RBH)
1479 rcu_read_unlock_bh();
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001480 if (statesold & RCUTORTURE_RDR_RCU_2) {
1481 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1482 WARN_ON_ONCE(idxnew2 != -1);
1483 idxold2 = 0;
1484 }
1485 if (statesold & RCUTORTURE_RDR_RCU_1) {
Paul E. McKenney340170f2021-09-24 21:30:26 -07001486 bool lockit;
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001487
Paul E. McKenney340170f2021-09-24 21:30:26 -07001488 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001489 if (lockit)
1490 raw_spin_lock_irqsave(&current->pi_lock, flags);
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001491 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1492 WARN_ON_ONCE(idxnew1 != -1);
1493 idxold1 = 0;
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001494 if (lockit)
1495 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1496 }
Paul E. McKenney2397d072018-05-25 07:29:25 -07001497
1498 /* Delay if neither beginning nor end and there was a change. */
1499 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001500 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001501
1502 /* Update the reader state. */
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001503 if (idxnew1 == -1)
1504 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1505 WARN_ON_ONCE(idxnew1 < 0);
1506 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1507 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1508 if (idxnew2 == -1)
1509 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1510 WARN_ON_ONCE(idxnew2 < 0);
1511 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1512 *readstate = idxnew1 | idxnew2 | newstate;
1513 WARN_ON_ONCE(*readstate < 0);
1514 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1515 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001516}
1517
1518/* Return the biggest extendables mask given current RCU and boot parameters. */
1519static int rcutorture_extend_mask_max(void)
1520{
1521 int mask;
1522
1523 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1524 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001525 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001526 return mask;
1527}
1528
1529/* Return a random protection state mask, but with at least one bit set. */
1530static int
1531rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1532{
1533 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001534 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001535 unsigned long randmask2 = randmask1 >> 3;
Scott Wood71921a92021-08-20 09:42:36 +02001536 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1537 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1538 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001539
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001540 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
Paul E. McKenneya3b0e1e52019-02-28 15:06:13 -08001541 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001542 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001543 mask = mask & randmask2;
1544 else
1545 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Scott Wood71921a92021-08-20 09:42:36 +02001546
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001547 // Can't have nested RCU reader without outer RCU reader.
1548 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1549 if (oldmask & RCUTORTURE_RDR_RCU_1)
1550 mask &= ~RCUTORTURE_RDR_RCU_2;
1551 else
1552 mask |= RCUTORTURE_RDR_RCU_1;
1553 }
1554
Scott Wood71921a92021-08-20 09:42:36 +02001555 /*
1556 * Can't enable bh w/irq disabled.
1557 */
1558 if (mask & RCUTORTURE_RDR_IRQ)
1559 mask |= oldmask & bhs;
1560
1561 /*
1562 * Ideally these sequences would be detected in debug builds
1563 * (regardless of RT), but until then don't stop testing
1564 * them on non-RT.
1565 */
1566 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1567 /* Can't modify BH in atomic context */
1568 if (oldmask & preempts_irq)
1569 mask &= ~bhs;
1570 if ((oldmask | mask) & preempts_irq)
1571 mask |= oldmask & bhs;
1572 }
1573
Paul E. McKenney1c3d5392021-09-22 20:49:12 -07001574 return mask ?: RCUTORTURE_RDR_RCU_1;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001575}
1576
1577/*
1578 * Do a randomly selected number of extensions of an existing RCU read-side
1579 * critical section.
1580 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001581static struct rt_read_seg *
1582rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1583 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001584{
1585 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001586 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001587 int mask = rcutorture_extend_mask_max();
1588
1589 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1590 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001591 return rtrsp; /* Current RCU reader not extendable. */
1592 /* Bias towards larger numbers of loops. */
1593 i = (torture_random(trsp) >> 3);
1594 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1595 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001596 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001597 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001598 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001599 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001600}
1601
1602/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001603 * Do one read-side critical section, returning false if there was
1604 * no data to read. Can be invoked both from process context and
1605 * from a timer handler.
1606 */
Paul E. McKenney00504532020-10-29 15:08:57 -07001607static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001608{
Paul E. McKenneybc480a62020-11-15 12:45:57 -08001609 unsigned long cookie;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001610 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001611 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001612 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001613 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001614 struct rcu_torture *p;
1615 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001616 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001617 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1618 struct rt_read_seg *rtrsp = &rtseg[0];
1619 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001620 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001621
Paul E. McKenney77522752020-06-11 16:43:14 -07001622 WARN_ON_ONCE(!rcu_is_watching());
Paul E. McKenney2397d072018-05-25 07:29:25 -07001623 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001624 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenneybc480a62020-11-15 12:45:57 -08001625 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1626 cookie = cur_ops->get_gp_state();
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001627 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001628 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001629 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenneya5c095e2021-03-13 20:05:31 -08001630 !cur_ops->readlock_held || cur_ops->readlock_held());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001631 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001632 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001633 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001634 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001635 }
1636 if (p->rtort_mbtest == 0)
1637 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenney00504532020-10-29 15:08:57 -07001638 rcu_torture_reader_do_mbchk(myid, p, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001639 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001640 preempt_disable();
Paul E. McKenney20248912019-12-21 10:41:48 -08001641 pipe_count = READ_ONCE(p->rtort_pipe_count);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001642 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1643 /* Should not happen, but... */
1644 pipe_count = RCU_TORTURE_PIPE_LEN;
1645 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001646 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001647 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001648 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1649 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001650 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001651 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001652 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001653 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001654 if (completed > RCU_TORTURE_PIPE_LEN) {
1655 /* Should not happen, but... */
1656 completed = RCU_TORTURE_PIPE_LEN;
1657 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001658 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001659 preempt_enable();
Paul E. McKenneybc480a62020-11-15 12:45:57 -08001660 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1661 WARN_ONCE(cur_ops->poll_gp_state(cookie),
Paul E. McKenney7ac3fdf2021-02-25 20:56:10 -08001662 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
Paul E. McKenneybc480a62020-11-15 12:45:57 -08001663 __func__,
1664 rcu_torture_writer_state_getname(),
1665 rcu_torture_writer_state,
1666 cookie, cur_ops->get_gp_state());
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001667 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney902d82e62021-09-22 20:31:44 -07001668 WARN_ON_ONCE(readstate);
Paul E. McKenneyd6855142020-08-11 10:33:39 -07001669 // This next splat is expected behavior if leakpointer, especially
1670 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1671 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001672
1673 /* If error or close call, record the sequence of reader protections. */
1674 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1675 i = 0;
1676 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1677 err_segs[i++] = *rtrsp1;
1678 rt_read_nsegs = i;
1679 }
1680
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001681 return true;
1682}
1683
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001684static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1685
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001686/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001687 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1688 * incrementing the corresponding element of the pipeline array. The
1689 * counter in the element should never be greater than 1, otherwise, the
1690 * RCU implementation is broken.
1691 */
1692static void rcu_torture_timer(struct timer_list *unused)
1693{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001694 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney00504532020-10-29 15:08:57 -07001695 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001696
1697 /* Test call_rcu() invocation from interrupt handler. */
1698 if (cur_ops->call) {
1699 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1700
1701 if (rhp)
1702 cur_ops->call(rhp, rcu_torture_timer_cb);
1703 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001704}
1705
1706/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001707 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1708 * incrementing the corresponding element of the pipeline array. The
1709 * counter in the element should never be greater than 1, otherwise, the
1710 * RCU implementation is broken.
1711 */
1712static int
1713rcu_torture_reader(void *arg)
1714{
Paul E. McKenney444da512018-07-04 14:14:42 -07001715 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001716 long myid = (long)arg;
1717 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001718 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001719 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001720
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001721 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001722 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001723 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001724 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001725 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001726 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001727 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001728 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001729 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001730 }
Paul E. McKenney00504532020-10-29 15:08:57 -07001731 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001732 schedule_timeout_interruptible(HZ);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001733 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
Paul E. McKenney1eba0ef2020-11-17 14:12:24 -08001734 torture_hrtimeout_us(500, 1000, &rand);
Paul E. McKenney444da512018-07-04 14:14:42 -07001735 lastsleep = jiffies + 10;
1736 }
Paul E. McKenney1afb95f2020-12-19 07:34:35 -08001737 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001738 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001739 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001740 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001741 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001742 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001743 destroy_timer_on_stack(&t);
1744 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001745 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001746 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001747 return 0;
1748}
1749
1750/*
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07001751 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
1752 * increase race probabilities and fuzzes the interval between toggling.
1753 */
1754static int rcu_nocb_toggle(void *arg)
1755{
1756 int cpu;
1757 int maxcpu = -1;
1758 int oldnice = task_nice(current);
1759 long r;
1760 DEFINE_TORTURE_RANDOM(rand);
1761 ktime_t toggle_delay;
1762 unsigned long toggle_fuzz;
1763 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1764
1765 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1766 while (!rcu_inkernel_boot_has_ended())
1767 schedule_timeout_interruptible(HZ / 10);
1768 for_each_online_cpu(cpu)
1769 maxcpu = cpu;
1770 WARN_ON(maxcpu < 0);
1771 if (toggle_interval > ULONG_MAX)
1772 toggle_fuzz = ULONG_MAX >> 3;
1773 else
1774 toggle_fuzz = toggle_interval >> 3;
1775 if (toggle_fuzz <= 0)
1776 toggle_fuzz = NSEC_PER_USEC;
1777 do {
1778 r = torture_random(&rand);
1779 cpu = (r >> 4) % (maxcpu + 1);
1780 if (r & 0x1) {
1781 rcu_nocb_cpu_offload(cpu);
1782 atomic_long_inc(&n_nocb_offload);
1783 } else {
1784 rcu_nocb_cpu_deoffload(cpu);
1785 atomic_long_inc(&n_nocb_deoffload);
1786 }
1787 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1788 set_current_state(TASK_INTERRUPTIBLE);
1789 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1790 if (stutter_wait("rcu_nocb_toggle"))
1791 sched_set_normal(current, oldnice);
1792 } while (!torture_must_stop());
1793 torture_kthread_stopping("rcu_nocb_toggle");
1794 return 0;
1795}
1796
1797/*
Joe Percheseea203f2014-07-14 09:16:15 -04001798 * Print torture statistics. Caller must ensure that there is only
1799 * one call to this function at a given time!!! This is normally
1800 * accomplished by relying on the module system to only have one copy
1801 * of the module loaded, and then by giving the rcu_torture_stats
1802 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1803 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001804 */
Chen Gangd1008952013-11-07 10:30:25 +08001805static void
Joe Percheseea203f2014-07-14 09:16:15 -04001806rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001807{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001808 int cpu;
1809 int i;
1810 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1811 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenney5396d312020-01-08 19:58:13 -08001812 struct rcu_torture *rtcp;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001813 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001814 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001815 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001816
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001817 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001818 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Paul E. McKenneyf042a432020-01-03 16:27:00 -08001819 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1820 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001821 }
1822 }
1823 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1824 if (pipesummary[i] != 0)
1825 break;
1826 }
Joe Percheseea203f2014-07-14 09:16:15 -04001827
1828 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney5396d312020-01-08 19:58:13 -08001829 rtcp = rcu_access_pointer(rcu_torture_current);
Paul E. McKenney354ea052019-05-25 12:36:53 -07001830 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
Paul E. McKenney5396d312020-01-08 19:58:13 -08001831 rtcp,
1832 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
Joe Percheseea203f2014-07-14 09:16:15 -04001833 rcu_torture_current_version,
1834 list_empty(&rcu_torture_freelist),
1835 atomic_read(&n_rcu_torture_alloc),
1836 atomic_read(&n_rcu_torture_alloc_fail),
1837 atomic_read(&n_rcu_torture_free));
Paul E. McKenney00504532020-10-29 15:08:57 -07001838 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001839 atomic_read(&n_rcu_torture_mberror),
Paul E. McKenney00504532020-10-29 15:08:57 -07001840 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
SeongJae Park472213a2016-08-13 15:54:35 +09001841 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001842 n_rcu_torture_boost_ktrerror,
1843 n_rcu_torture_boost_rterror);
1844 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1845 n_rcu_torture_boost_failure,
1846 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001847 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001848 torture_onoff_stats();
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001849 pr_cont("barrier: %ld/%ld:%ld ",
Paul E. McKenneyc9527be2020-02-18 13:41:02 -08001850 data_race(n_barrier_successes),
1851 data_race(n_barrier_attempts),
1852 data_race(n_rcu_torture_barrier_error));
Paul E. McKenneyf7590812020-12-21 11:17:16 -08001853 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07001854 pr_cont("nocb-toggles: %ld:%ld\n",
1855 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
Joe Percheseea203f2014-07-14 09:16:15 -04001856
1857 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001858 if (atomic_read(&n_rcu_torture_mberror) ||
Paul E. McKenney00504532020-10-29 15:08:57 -07001859 atomic_read(&n_rcu_torture_mbchk_fail) ||
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001860 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1861 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001862 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001863 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001864 atomic_inc(&n_rcu_torture_error);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001865 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
Paul E. McKenney00504532020-10-29 15:08:57 -07001866 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001867 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1868 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1869 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
Paul E. McKenney5e59fba2021-01-15 13:30:38 -08001870 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001871 WARN_ON_ONCE(i > 1); // Too-short grace period
Paul E. McKenney996417d2005-11-18 01:10:50 -08001872 }
Joe Percheseea203f2014-07-14 09:16:15 -04001873 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001874 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001875 pr_cont(" %ld", pipesummary[i]);
1876 pr_cont("\n");
1877
1878 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1879 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001880 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001881 pr_cont(" %ld", batchsummary[i]);
1882 pr_cont("\n");
1883
1884 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1885 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001886 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001887 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001888 }
Joe Percheseea203f2014-07-14 09:16:15 -04001889 pr_cont("\n");
1890
Josh Triplettc8e5b162007-05-08 00:33:20 -07001891 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001892 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001893 if (rtcv_snap == rcu_torture_current_version &&
Paul E. McKenney5396d312020-01-08 19:58:13 -08001894 rcu_access_pointer(rcu_torture_current) &&
1895 !rcu_stall_is_suppressed()) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001896 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001897 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001898
1899 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001900 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001901 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001902 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001903 wtp = READ_ONCE(writer_task);
Peter Zijlstra2f064a52021-06-11 10:28:17 +02001904 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001905 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001906 rcu_torture_writer_state, gp_seq, flags,
Peter Zijlstra2f064a52021-06-11 10:28:17 +02001907 wtp == NULL ? ~0U : wtp->__state,
Paul E. McKenney808de392017-06-19 10:03:22 -07001908 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001909 if (!splatted && wtp) {
1910 sched_show_task(wtp);
1911 splatted = true;
1912 }
Paul E. McKenney27c0f142020-09-15 17:08:03 -07001913 if (cur_ops->gp_kthread_dbg)
1914 cur_ops->gp_kthread_dbg();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001915 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001916 }
1917 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001918}
1919
1920/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001921 * Periodically prints torture statistics, if periodic statistics printing
1922 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001923 */
1924static int
1925rcu_torture_stats(void *arg)
1926{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001927 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001928 do {
1929 schedule_timeout_interruptible(stat_interval * HZ);
1930 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001931 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001932 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001933 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001934 return 0;
1935}
1936
Paul E. McKenney7ab2bd32021-05-02 19:56:05 -07001937/* Test mem_dump_obj() and friends. */
1938static void rcu_torture_mem_dump_obj(void)
1939{
1940 struct rcu_head *rhp;
1941 struct kmem_cache *kcp;
1942 static int z;
1943
1944 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1945 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1946 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1947 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1948 mem_dump_obj(ZERO_SIZE_PTR);
1949 pr_alert("mem_dump_obj(NULL):");
1950 mem_dump_obj(NULL);
1951 pr_alert("mem_dump_obj(%px):", &rhp);
1952 mem_dump_obj(&rhp);
1953 pr_alert("mem_dump_obj(%px):", rhp);
1954 mem_dump_obj(rhp);
1955 pr_alert("mem_dump_obj(%px):", &rhp->func);
1956 mem_dump_obj(&rhp->func);
1957 pr_alert("mem_dump_obj(%px):", &z);
1958 mem_dump_obj(&z);
1959 kmem_cache_free(kcp, rhp);
1960 kmem_cache_destroy(kcp);
1961 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
1962 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1963 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
1964 mem_dump_obj(rhp);
1965 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
1966 mem_dump_obj(&rhp->func);
1967 kfree(rhp);
1968 rhp = vmalloc(4096);
1969 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1970 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
1971 mem_dump_obj(rhp);
1972 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
1973 mem_dump_obj(&rhp->func);
1974 vfree(rhp);
1975}
1976
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001977static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001978rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001979{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001980 pr_alert("%s" TORTURE_FLAG
1981 "--- %s: nreaders=%d nfakewriters=%d "
1982 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1983 "shuffle_interval=%d stutter=%d irqreader=%d "
1984 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1985 "test_boost=%d/%d test_boost_interval=%d "
1986 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001987 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001988 "stall_cpu_block=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001989 "n_barrier_cbs=%d "
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001990 "onoff_interval=%d onoff_holdoff=%d "
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07001991 "read_exit_delay=%d read_exit_burst=%d "
1992 "nocbs_nthreads=%d nocbs_toggle=%d\n",
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001993 torture_type, tag, nrealreaders, nfakewriters,
1994 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1995 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1996 test_boost, cur_ops->can_boost,
1997 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001998 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001999 stall_cpu_block,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07002000 n_barrier_cbs,
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002001 onoff_interval, onoff_holdoff,
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07002002 read_exit_delay, read_exit_burst,
2003 nocbs_nthreads, nocbs_toggle);
Paul E. McKenney95c38322006-03-24 03:15:58 -08002004}
2005
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002006static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002007{
2008 struct task_struct *t;
2009
2010 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002011 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002012 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002013 t = boost_tasks[cpu];
2014 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07002015 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002016 mutex_unlock(&boost_mutex);
2017
2018 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002019 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002020 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002021}
2022
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002023static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002024{
2025 int retval;
2026
2027 if (boost_tasks[cpu] != NULL)
2028 return 0; /* Already created, nothing more to do. */
2029
2030 /* Don't allow time recalculation while creating a new task. */
2031 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07002032 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002033 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07002034 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
2035 cpu_to_node(cpu),
2036 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002037 if (IS_ERR(boost_tasks[cpu])) {
2038 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002039 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002040 n_rcu_torture_boost_ktrerror++;
2041 boost_tasks[cpu] = NULL;
2042 mutex_unlock(&boost_mutex);
2043 return retval;
2044 }
2045 kthread_bind(boost_tasks[cpu], cpu);
2046 wake_up_process(boost_tasks[cpu]);
2047 mutex_unlock(&boost_mutex);
2048 return 0;
2049}
2050
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07002051/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002052 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2053 * induces a CPU stall for the time specified by stall_cpu.
2054 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04002055static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002056{
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002057 int idx;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002058 unsigned long stop_at;
2059
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002060 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002061 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002062 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002063 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002064 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002065 }
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07002066 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2067 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2068 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2069 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2070 if (kthread_should_stop())
2071 break;
2072 schedule_timeout_uninterruptible(HZ);
2073 }
2074 }
2075 if (!kthread_should_stop() && stall_cpu > 0) {
2076 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
Arnd Bergmann622be33f2018-06-18 16:47:34 +02002077 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002078 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002079 idx = cur_ops->readlock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07002080 if (stall_cpu_irqsoff)
2081 local_irq_disable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002082 else if (!stall_cpu_block)
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07002083 preempt_disable();
Stephen Zhang0a27fff2021-01-23 17:54:17 +08002084 pr_alert("%s start on CPU %d.\n",
2085 __func__, raw_smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02002086 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2087 stop_at))
Paul E. McKenney59e83662021-05-16 21:17:27 -07002088 if (stall_cpu_block) {
2089#ifdef CONFIG_PREEMPTION
2090 preempt_schedule();
2091#else
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002092 schedule_timeout_uninterruptible(HZ);
Paul E. McKenney59e83662021-05-16 21:17:27 -07002093#endif
Wander Lairson Costa5ff7c9f2021-11-10 11:37:45 -03002094 } else if (stall_no_softlockup) {
2095 touch_softlockup_watchdog();
Paul E. McKenney59e83662021-05-16 21:17:27 -07002096 }
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07002097 if (stall_cpu_irqsoff)
2098 local_irq_enable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002099 else if (!stall_cpu_block)
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07002100 preempt_enable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07002101 cur_ops->readunlock(idx);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002102 }
Stephen Zhang0a27fff2021-01-23 17:54:17 +08002103 pr_alert("%s end.\n", __func__);
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08002104 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002105 while (!kthread_should_stop())
2106 schedule_timeout_interruptible(10 * HZ);
2107 return 0;
2108}
2109
2110/* Spawn CPU-stall kthread, if stall_cpu specified. */
2111static int __init rcu_torture_stall_init(void)
2112{
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07002113 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002114 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002115 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08002116}
2117
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07002118/* State structure for forward-progress self-propagating RCU callback. */
2119struct fwd_cb_state {
2120 struct rcu_head rh;
2121 int stop;
2122};
2123
2124/*
2125 * Forward-progress self-propagating RCU callback function. Because
2126 * callbacks run from softirq, this function is an implicit RCU read-side
2127 * critical section.
2128 */
2129static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2130{
2131 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2132
2133 if (READ_ONCE(fcsp->stop)) {
2134 WRITE_ONCE(fcsp->stop, 2);
2135 return;
2136 }
2137 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2138}
2139
Paul E. McKenney48718482018-08-15 15:32:51 -07002140/* State for continuous-flood RCU callbacks. */
2141struct rcu_fwd_cb {
2142 struct rcu_head rh;
2143 struct rcu_fwd_cb *rfc_next;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002144 struct rcu_fwd *rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07002145 int rfc_gps;
2146};
Paul E. McKenneya289e602019-11-05 08:31:56 -08002147
Paul E. McKenney48718482018-08-15 15:32:51 -07002148#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2149#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2150#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
Paul E. McKenney2e57bf92018-10-05 16:43:09 -07002151#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
Paul E. McKenneya289e602019-11-05 08:31:56 -08002152#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2153
Paul E. McKenneycd618d12019-01-08 13:41:26 -08002154struct rcu_launder_hist {
2155 long n_launders;
2156 unsigned long launder_gp_seq;
2157};
Paul E. McKenney48718482018-08-15 15:32:51 -07002158
Paul E. McKenneya289e602019-11-05 08:31:56 -08002159struct rcu_fwd {
2160 spinlock_t rcu_fwd_lock;
2161 struct rcu_fwd_cb *rcu_fwd_cb_head;
2162 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2163 long n_launders_cb;
2164 unsigned long rcu_fwd_startat;
2165 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2166 unsigned long rcu_launder_gp_seq_start;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002167 int rcu_fwd_id;
Paul E. McKenneya289e602019-11-05 08:31:56 -08002168};
2169
Paul E. McKenney57f60202020-07-20 08:34:07 -07002170static DEFINE_MUTEX(rcu_fwd_mutex);
Jason Yanafbc1572020-04-09 19:42:38 +08002171static struct rcu_fwd *rcu_fwds;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002172static unsigned long rcu_fwd_seq;
Paul E. McKenney53b541f2021-11-23 13:51:11 -08002173static atomic_long_t rcu_fwd_max_cbs;
Jason Yanafbc1572020-04-09 19:42:38 +08002174static bool rcu_fwd_emergency_stop;
Paul E. McKenney48718482018-08-15 15:32:51 -07002175
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002176static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
Paul E. McKenney1a682752018-10-03 12:33:41 -07002177{
Paul E. McKenneycd618d12019-01-08 13:41:26 -08002178 unsigned long gps;
2179 unsigned long gps_old;
Paul E. McKenney1a682752018-10-03 12:33:41 -07002180 int i;
2181 int j;
2182
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002183 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2184 if (rfp->n_launders_hist[i].n_launders > 0)
Paul E. McKenney1a682752018-10-03 12:33:41 -07002185 break;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002186 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2187 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2188 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002189 gps_old = rfp->rcu_launder_gp_seq_start;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08002190 for (j = 0; j <= i; j++) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002191 gps = rfp->n_launders_hist[j].launder_gp_seq;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08002192 pr_cont(" %ds/%d: %ld:%ld",
Paul E. McKenneya289e602019-11-05 08:31:56 -08002193 j + 1, FWD_CBS_HIST_DIV,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002194 rfp->n_launders_hist[j].n_launders,
Paul E. McKenneycd618d12019-01-08 13:41:26 -08002195 rcutorture_seq_diff(gps, gps_old));
2196 gps_old = gps;
2197 }
Paul E. McKenney1a682752018-10-03 12:33:41 -07002198 pr_cont("\n");
Paul E. McKenney82e31002021-11-22 20:55:18 -08002199 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenney1a682752018-10-03 12:33:41 -07002200}
2201
Paul E. McKenney48718482018-08-15 15:32:51 -07002202/* Callback function for continuous-flood RCU callbacks. */
2203static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2204{
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002205 unsigned long flags;
Paul E. McKenney48718482018-08-15 15:32:51 -07002206 int i;
2207 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2208 struct rcu_fwd_cb **rfcpp;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002209 struct rcu_fwd *rfp = rfcp->rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07002210
2211 rfcp->rfc_next = NULL;
2212 rfcp->rfc_gps++;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002213 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2214 rfcpp = rfp->rcu_fwd_cb_tail;
2215 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
Paul E. McKenney48718482018-08-15 15:32:51 -07002216 WRITE_ONCE(*rfcpp, rfcp);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002217 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2218 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2219 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2220 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2221 rfp->n_launders_hist[i].n_launders++;
2222 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2223 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002224}
2225
Paul E. McKenneyab21f602019-04-14 18:30:22 -07002226// Give the scheduler a chance, even on nohz_full CPUs.
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07002227static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
Paul E. McKenneyab21f602019-04-14 18:30:22 -07002228{
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02002229 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07002230 // Real call_rcu() floods hit userspace, so emulate that.
2231 if (need_resched() || (iter & 0xfff))
Paul E. McKenneyab21f602019-04-14 18:30:22 -07002232 schedule();
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002233 return;
Paul E. McKenneyab21f602019-04-14 18:30:22 -07002234 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002235 // No userspace emulation: CB invocation throttles call_rcu()
2236 cond_resched();
Paul E. McKenneyab21f602019-04-14 18:30:22 -07002237}
2238
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002239/*
2240 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2241 * test is over or because we hit an OOM event.
2242 */
Paul E. McKenney67641002019-11-06 08:20:20 -08002243static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002244{
2245 unsigned long flags;
2246 unsigned long freed = 0;
2247 struct rcu_fwd_cb *rfcp;
2248
2249 for (;;) {
Paul E. McKenney67641002019-11-06 08:20:20 -08002250 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2251 rfcp = rfp->rcu_fwd_cb_head;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07002252 if (!rfcp) {
Paul E. McKenney67641002019-11-06 08:20:20 -08002253 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002254 break;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07002255 }
Paul E. McKenney67641002019-11-06 08:20:20 -08002256 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2257 if (!rfp->rcu_fwd_cb_head)
2258 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2259 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002260 kfree(rfcp);
2261 freed++;
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07002262 rcu_torture_fwd_prog_cond_resched(freed);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07002263 if (tick_nohz_full_enabled()) {
2264 local_irq_save(flags);
2265 rcu_momentary_dyntick_idle();
2266 local_irq_restore(flags);
2267 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002268 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002269 return freed;
Paul E. McKenney48718482018-08-15 15:32:51 -07002270}
2271
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002272/* Carry out need_resched()/cond_resched() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002273static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2274 int *tested, int *tested_tries)
Paul E. McKenney1b272912018-07-18 14:32:31 -07002275{
Paul E. McKenney119248b2018-07-18 15:39:37 -07002276 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002277 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07002278 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07002279 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002280 int idx;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002281 int sd;
2282 int sd4;
2283 bool selfpropcb = false;
2284 unsigned long stopat;
2285 static DEFINE_TORTURE_RANDOM(trs);
2286
Paul E. McKenneya7eb9372020-10-09 19:51:55 -07002287 if (!cur_ops->sync)
2288 return; // Cannot do need_resched() forward progress testing without ->sync.
2289 if (cur_ops->call && cur_ops->cb_barrier) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002290 init_rcu_head_on_stack(&fcs.rh);
2291 selfpropcb = true;
2292 }
2293
2294 /* Tight loop containing cond_resched(). */
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002295 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2296 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002297 if (selfpropcb) {
2298 WRITE_ONCE(fcs.stop, 0);
2299 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2300 }
2301 cver = READ_ONCE(rcu_torture_current_version);
2302 gps = cur_ops->get_gp_seq();
2303 sd = cur_ops->stall_dur() + 1;
2304 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2305 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002306 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2307 stopat = rfp->rcu_fwd_startat + dur;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002308 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07002309 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002310 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002311 idx = cur_ops->readlock();
2312 udelay(10);
2313 cur_ops->readunlock(idx);
2314 if (!fwd_progress_need_resched || need_resched())
Paul E. McKenneyfbbd5e32019-08-15 11:43:53 -07002315 cond_resched();
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002316 }
2317 (*tested_tries)++;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002318 if (!time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07002319 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002320 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002321 (*tested)++;
2322 cver = READ_ONCE(rcu_torture_current_version) - cver;
2323 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2324 WARN_ON(!cver && gps < 2);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002325 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2326 rfp->rcu_fwd_id, dur, cver, gps);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002327 }
2328 if (selfpropcb) {
2329 WRITE_ONCE(fcs.stop, 1);
2330 cur_ops->sync(); /* Wait for running CB to complete. */
2331 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2332 }
2333
2334 if (selfpropcb) {
2335 WARN_ON(READ_ONCE(fcs.stop) != 2);
2336 destroy_rcu_head_on_stack(&fcs.rh);
2337 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002338 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2339 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002340}
2341
2342/* Carry out call_rcu() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002343static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002344{
2345 unsigned long cver;
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07002346 unsigned long flags;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002347 unsigned long gps;
2348 int i;
Paul E. McKenney48718482018-08-15 15:32:51 -07002349 long n_launders;
2350 long n_launders_cb_snap;
2351 long n_launders_sa;
2352 long n_max_cbs;
2353 long n_max_gps;
2354 struct rcu_fwd_cb *rfcp;
2355 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002356 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07002357 unsigned long stoppedat;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002358
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002359 if (READ_ONCE(rcu_fwd_emergency_stop))
2360 return; /* Get out of the way quickly, no GP wait! */
Paul E. McKenneyc682db52019-04-19 07:38:27 -07002361 if (!cur_ops->call)
2362 return; /* Can't do call_rcu() fwd prog without ->call. */
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002363
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002364 /* Loop continuously posting RCU callbacks. */
2365 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2366 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002367 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2368 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002369 n_launders = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002370 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002371 n_launders_sa = 0;
2372 n_max_cbs = 0;
2373 n_max_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002374 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2375 rfp->n_launders_hist[i].n_launders = 0;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002376 cver = READ_ONCE(rcu_torture_current_version);
2377 gps = cur_ops->get_gp_seq();
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002378 rfp->rcu_launder_gp_seq_start = gps;
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002379 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002380 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07002381 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002382 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002383 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002384 rfcpn = NULL;
2385 if (rfcp)
2386 rfcpn = READ_ONCE(rfcp->rfc_next);
2387 if (rfcpn) {
2388 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2389 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2390 break;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002391 rfp->rcu_fwd_cb_head = rfcpn;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002392 n_launders++;
2393 n_launders_sa++;
Paul E. McKenney613b00f2021-11-23 11:53:52 -08002394 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002395 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2396 if (WARN_ON_ONCE(!rfcp)) {
2397 schedule_timeout_interruptible(1);
2398 continue;
2399 }
2400 n_max_cbs++;
2401 n_launders_sa = 0;
2402 rfcp->rfc_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002403 rfcp->rfc_rfp = rfp;
Paul E. McKenney613b00f2021-11-23 11:53:52 -08002404 } else {
2405 rfcp = NULL;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002406 }
Paul E. McKenney613b00f2021-11-23 11:53:52 -08002407 if (rfcp)
2408 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07002409 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07002410 if (tick_nohz_full_enabled()) {
2411 local_irq_save(flags);
2412 rcu_momentary_dyntick_idle();
2413 local_irq_restore(flags);
2414 }
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002415 }
2416 stoppedat = jiffies;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002417 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002418 cver = READ_ONCE(rcu_torture_current_version) - cver;
2419 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2420 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
Paul E. McKenney67641002019-11-06 08:20:20 -08002421 (void)rcu_torture_fwd_prog_cbfree(rfp);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002422
Paul E. McKenney60013d52019-07-10 08:30:00 -07002423 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2424 !shutdown_time_arrived()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002425 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2426 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2427 __func__,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002428 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002429 n_launders + n_max_cbs - n_launders_cb_snap,
2430 n_launders, n_launders_sa,
2431 n_max_gps, n_max_cbs, cver, gps);
Paul E. McKenney53b541f2021-11-23 13:51:11 -08002432 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002433 rcu_torture_fwd_cb_hist(rfp);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002434 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002435 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002436 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002437 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002438}
2439
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002440
2441/*
2442 * OOM notifier, but this only prints diagnostic information for the
2443 * current forward-progress test.
2444 */
2445static int rcutorture_oom_notify(struct notifier_block *self,
2446 unsigned long notused, void *nfreed)
2447{
Paul E. McKenney82e31002021-11-22 20:55:18 -08002448 int i;
2449 long ncbs;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002450 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002451
Paul E. McKenney57f60202020-07-20 08:34:07 -07002452 mutex_lock(&rcu_fwd_mutex);
2453 rfp = rcu_fwds;
2454 if (!rfp) {
2455 mutex_unlock(&rcu_fwd_mutex);
2456 return NOTIFY_OK;
2457 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002458 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2459 __func__);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002460 for (i = 0; i < fwd_progress; i++) {
2461 rcu_torture_fwd_cb_hist(&rfp[i]);
2462 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2463 }
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002464 WRITE_ONCE(rcu_fwd_emergency_stop, true);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002465 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
Paul E. McKenney82e31002021-11-22 20:55:18 -08002466 ncbs = 0;
2467 for (i = 0; i < fwd_progress; i++)
2468 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2469 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002470 rcu_barrier();
Paul E. McKenney82e31002021-11-22 20:55:18 -08002471 ncbs = 0;
2472 for (i = 0; i < fwd_progress; i++)
2473 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2474 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002475 rcu_barrier();
Paul E. McKenney82e31002021-11-22 20:55:18 -08002476 ncbs = 0;
2477 for (i = 0; i < fwd_progress; i++)
2478 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2479 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002480 smp_mb(); /* Frees before return to avoid redoing OOM. */
2481 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2482 pr_info("%s returning after OOM processing.\n", __func__);
Paul E. McKenney57f60202020-07-20 08:34:07 -07002483 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002484 return NOTIFY_OK;
2485}
2486
2487static struct notifier_block rcutorture_oom_nb = {
2488 .notifier_call = rcutorture_oom_notify
2489};
2490
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002491/* Carry out grace-period forward-progress testing. */
2492static int rcu_torture_fwd_prog(void *args)
2493{
Paul E. McKenney53b541f2021-11-23 13:51:11 -08002494 bool firsttime = true;
2495 long max_cbs;
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07002496 int oldnice = task_nice(current);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002497 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002498 struct rcu_fwd *rfp = args;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002499 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002500 int tested_tries = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002501
2502 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenney5ab7ab82018-09-21 18:08:09 -07002503 rcu_bind_current_to_nocb();
Paul E. McKenneyfecad502018-07-20 12:18:11 -07002504 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2505 set_user_nice(current, MAX_NICE);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002506 do {
Paul E. McKenney82e31002021-11-22 20:55:18 -08002507 if (!rfp->rcu_fwd_id) {
2508 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2509 WRITE_ONCE(rcu_fwd_emergency_stop, false);
Paul E. McKenney53b541f2021-11-23 13:51:11 -08002510 if (!firsttime) {
2511 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2512 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2513 }
2514 firsttime = false;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002515 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2516 } else {
2517 while (READ_ONCE(rcu_fwd_seq) == oldseq)
2518 schedule_timeout_interruptible(1);
2519 oldseq = READ_ONCE(rcu_fwd_seq);
2520 }
2521 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2522 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
Paul E. McKenney43550802019-12-04 15:58:41 -08002523 rcu_torture_fwd_prog_cr(rfp);
Paul E. McKenney613b00f2021-11-23 11:53:52 -08002524 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2525 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2526 (rcu_inkernel_boot_has_ended() &&
2527 torture_num_online_cpus() > rfp->rcu_fwd_id)))
Paul E. McKenney82e31002021-11-22 20:55:18 -08002528 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
Paul E. McKenney48718482018-08-15 15:32:51 -07002529
Paul E. McKenney1b272912018-07-18 14:32:31 -07002530 /* Avoid slow periods, better to test when busy. */
Paul E. McKenneyab1b7882020-09-22 16:42:42 -07002531 if (stutter_wait("rcu_torture_fwd_prog"))
2532 sched_set_normal(current, oldnice);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002533 } while (!torture_must_stop());
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002534 /* Short runs might not contain a valid forward-progress attempt. */
Paul E. McKenney82e31002021-11-22 20:55:18 -08002535 if (!rfp->rcu_fwd_id) {
2536 WARN_ON(!tested && tested_tries >= 5);
2537 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2538 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07002539 torture_kthread_stopping("rcu_torture_fwd_prog");
2540 return 0;
2541}
2542
2543/* If forward-progress checking is requested and feasible, spawn the thread. */
2544static int __init rcu_torture_fwd_prog_init(void)
2545{
Paul E. McKenney82e31002021-11-22 20:55:18 -08002546 int i;
2547 int ret = 0;
Paul E. McKenney5155be92019-11-06 08:35:08 -08002548 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002549
Paul E. McKenney1b272912018-07-18 14:32:31 -07002550 if (!fwd_progress)
2551 return 0; /* Not requested, so don't do it. */
Paul E. McKenney82e31002021-11-22 20:55:18 -08002552 if (fwd_progress >= nr_cpu_ids) {
2553 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2554 fwd_progress = nr_cpu_ids;
2555 } else if (fwd_progress < 0) {
2556 fwd_progress = nr_cpu_ids;
2557 }
Paul E. McKenneya7eb9372020-10-09 19:51:55 -07002558 if ((!cur_ops->sync && !cur_ops->call) ||
Paul E. McKenney613b00f2021-11-23 11:53:52 -08002559 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2560 cur_ops == &rcu_busted_ops) {
Paul E. McKenney1b272912018-07-18 14:32:31 -07002561 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
Paul E. McKenney82e31002021-11-22 20:55:18 -08002562 fwd_progress = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002563 return 0;
2564 }
2565 if (stall_cpu > 0) {
2566 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
Paul E. McKenney82e31002021-11-22 20:55:18 -08002567 fwd_progress = 0;
Zhouyi Zhou3ac85872021-07-26 05:43:33 +08002568 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
Paul E. McKenney1b272912018-07-18 14:32:31 -07002569 return -EINVAL; /* In module, can fail back to user. */
2570 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2571 return 0;
2572 }
2573 if (fwd_progress_holdoff <= 0)
2574 fwd_progress_holdoff = 1;
2575 if (fwd_progress_div <= 0)
2576 fwd_progress_div = 4;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002577 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2578 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2579 if (!rfp || !fwd_prog_tasks) {
2580 kfree(rfp);
2581 kfree(fwd_prog_tasks);
2582 fwd_prog_tasks = NULL;
2583 fwd_progress = 0;
Paul E. McKenney5155be92019-11-06 08:35:08 -08002584 return -ENOMEM;
Paul E. McKenney82e31002021-11-22 20:55:18 -08002585 }
2586 for (i = 0; i < fwd_progress; i++) {
2587 spin_lock_init(&rfp[i].rcu_fwd_lock);
2588 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2589 rfp[i].rcu_fwd_id = i;
2590 }
Paul E. McKenney57f60202020-07-20 08:34:07 -07002591 mutex_lock(&rcu_fwd_mutex);
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002592 rcu_fwds = rfp;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002593 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenney299c7d92020-07-22 10:45:12 -07002594 register_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002595 for (i = 0; i < fwd_progress; i++) {
2596 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2597 if (ret) {
2598 fwd_progress = i;
2599 return ret;
2600 }
2601 }
2602 return 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002603}
2604
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002605static void rcu_torture_fwd_prog_cleanup(void)
2606{
Paul E. McKenney82e31002021-11-22 20:55:18 -08002607 int i;
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002608 struct rcu_fwd *rfp;
2609
Paul E. McKenney82e31002021-11-22 20:55:18 -08002610 if (!rcu_fwds || !fwd_prog_tasks)
2611 return;
2612 for (i = 0; i < fwd_progress; i++)
2613 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2614 unregister_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney57f60202020-07-20 08:34:07 -07002615 mutex_lock(&rcu_fwd_mutex);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002616 rfp = rcu_fwds;
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002617 rcu_fwds = NULL;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002618 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002619 kfree(rfp);
Paul E. McKenney82e31002021-11-22 20:55:18 -08002620 kfree(fwd_prog_tasks);
2621 fwd_prog_tasks = NULL;
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002622}
2623
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002624/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05302625static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002626{
2627 atomic_inc(&barrier_cbs_invoked);
2628}
2629
Paul E. McKenney50d4b622020-02-04 15:00:56 -08002630/* IPI handler to get callback posted on desired CPU, if online. */
2631static void rcu_torture_barrier1cb(void *rcu_void)
2632{
2633 struct rcu_head *rhp = rcu_void;
2634
2635 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2636}
2637
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002638/* kthread function to register callbacks used to test RCU barriers. */
2639static int rcu_torture_barrier_cbs(void *arg)
2640{
2641 long myid = (long)arg;
Jules Irenge8f43d592020-06-01 19:45:48 +01002642 bool lastphase = false;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002643 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002644 struct rcu_head rcu;
2645
2646 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002647 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07002648 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002649 do {
2650 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002651 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002652 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002653 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002654 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002655 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002656 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002657 /*
2658 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07002659 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002660 */
Paul E. McKenney50d4b622020-02-04 15:00:56 -08002661 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2662 &rcu, 1)) {
2663 // IPI failed, so use direct call from current CPU.
2664 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2665 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002666 if (atomic_dec_and_test(&barrier_cbs_count))
2667 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002668 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07002669 if (cur_ops->cb_barrier != NULL)
2670 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002671 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002672 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002673 return 0;
2674}
2675
2676/* kthread function to drive and coordinate RCU barrier testing. */
2677static int rcu_torture_barrier(void *arg)
2678{
2679 int i;
2680
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002681 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002682 do {
2683 atomic_set(&barrier_cbs_invoked, 0);
2684 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002685 /* Ensure barrier_phase ordered after prior assignments. */
2686 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002687 for (i = 0; i < n_barrier_cbs; i++)
2688 wake_up(&barrier_cbs_wq[i]);
2689 wait_event(barrier_wq,
2690 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002691 torture_must_stop());
2692 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002693 break;
2694 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002695 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002696 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2697 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08002698 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2699 atomic_read(&barrier_cbs_invoked),
2700 n_barrier_cbs);
Paul E. McKenney9470a182020-02-05 12:54:34 -08002701 WARN_ON(1);
2702 // Wait manually for the remaining callbacks
2703 i = 0;
2704 do {
2705 if (WARN_ON(i++ > HZ))
2706 i = INT_MIN;
2707 schedule_timeout_interruptible(1);
2708 cur_ops->cb_barrier();
2709 } while (atomic_read(&barrier_cbs_invoked) !=
2710 n_barrier_cbs &&
2711 !torture_must_stop());
2712 smp_mb(); // Can't trust ordering if broken.
2713 if (!torture_must_stop())
2714 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2715 atomic_read(&barrier_cbs_invoked));
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002716 } else {
2717 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002718 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002719 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002720 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002721 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002722 return 0;
2723}
2724
2725/* Initialize RCU barrier testing. */
2726static int rcu_torture_barrier_init(void)
2727{
2728 int i;
2729 int ret;
2730
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07002731 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002732 return 0;
2733 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002734 pr_alert("%s" TORTURE_FLAG
2735 " Call or barrier ops missing for %s,\n",
2736 torture_type, cur_ops->name);
2737 pr_alert("%s" TORTURE_FLAG
2738 " RCU barrier testing omitted from run.\n",
2739 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002740 return 0;
2741 }
2742 atomic_set(&barrier_cbs_count, 0);
2743 atomic_set(&barrier_cbs_invoked, 0);
2744 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002745 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002746 GFP_KERNEL);
2747 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002748 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05002749 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002750 return -ENOMEM;
2751 for (i = 0; i < n_barrier_cbs; i++) {
2752 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002753 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2754 (void *)(long)i,
2755 barrier_cbs_tasks[i]);
2756 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002757 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002758 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002759 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002760}
2761
2762/* Clean up after RCU barrier testing. */
2763static void rcu_torture_barrier_cleanup(void)
2764{
2765 int i;
2766
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002767 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002768 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002769 for (i = 0; i < n_barrier_cbs; i++)
2770 torture_stop_kthread(rcu_torture_barrier_cbs,
2771 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002772 kfree(barrier_cbs_tasks);
2773 barrier_cbs_tasks = NULL;
2774 }
2775 if (barrier_cbs_wq != NULL) {
2776 kfree(barrier_cbs_wq);
2777 barrier_cbs_wq = NULL;
2778 }
2779}
2780
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002781static bool rcu_torture_can_boost(void)
2782{
2783 static int boost_warn_once;
2784 int prio;
2785
2786 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2787 return false;
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07002788 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
Paul E. McKenney5e59fba2021-01-15 13:30:38 -08002789 return false;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002790
2791 prio = rcu_get_gp_kthreads_prio();
2792 if (!prio)
2793 return false;
2794
2795 if (prio < 2) {
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07002796 if (boost_warn_once == 1)
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002797 return false;
2798
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002799 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002800 boost_warn_once = 1;
2801 return false;
2802 }
2803
2804 return true;
2805}
2806
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002807static bool read_exit_child_stop;
2808static bool read_exit_child_stopped;
2809static wait_queue_head_t read_exit_wq;
2810
2811// Child kthread which just does an rcutorture reader and exits.
2812static int rcu_torture_read_exit_child(void *trsp_in)
2813{
2814 struct torture_random_state *trsp = trsp_in;
2815
2816 set_user_nice(current, MAX_NICE);
2817 // Minimize time between reading and exiting.
2818 while (!kthread_should_stop())
2819 schedule_timeout_uninterruptible(1);
Paul E. McKenney00504532020-10-29 15:08:57 -07002820 (void)rcu_torture_one_read(trsp, -1);
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002821 return 0;
2822}
2823
2824// Parent kthread which creates and destroys read-exit child kthreads.
2825static int rcu_torture_read_exit(void *unused)
2826{
2827 int count = 0;
2828 bool errexit = false;
2829 int i;
2830 struct task_struct *tsp;
2831 DEFINE_TORTURE_RANDOM(trs);
2832
2833 // Allocate and initialize.
2834 set_user_nice(current, MAX_NICE);
2835 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2836
2837 // Each pass through this loop does one read-exit episode.
2838 do {
2839 if (++count > read_exit_burst) {
2840 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2841 rcu_barrier(); // Wait for task_struct free, avoid OOM.
2842 for (i = 0; i < read_exit_delay; i++) {
2843 schedule_timeout_uninterruptible(HZ);
2844 if (READ_ONCE(read_exit_child_stop))
2845 break;
2846 }
2847 if (!READ_ONCE(read_exit_child_stop))
2848 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2849 count = 0;
2850 }
2851 if (READ_ONCE(read_exit_child_stop))
2852 break;
2853 // Spawn child.
2854 tsp = kthread_run(rcu_torture_read_exit_child,
2855 &trs, "%s",
2856 "rcu_torture_read_exit_child");
2857 if (IS_ERR(tsp)) {
Li Zhijian81faa4f2021-11-03 16:30:28 +08002858 TOROUT_ERRSTRING("out of memory");
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002859 errexit = true;
2860 tsp = NULL;
2861 break;
2862 }
2863 cond_resched();
2864 kthread_stop(tsp);
2865 n_read_exits ++;
2866 stutter_wait("rcu_torture_read_exit");
2867 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2868
2869 // Clean up and exit.
2870 smp_store_release(&read_exit_child_stopped, true); // After reaping.
2871 smp_mb(); // Store before wakeup.
2872 wake_up(&read_exit_wq);
2873 while (!torture_must_stop())
2874 schedule_timeout_uninterruptible(1);
2875 torture_kthread_stopping("rcu_torture_read_exit");
2876 return 0;
2877}
2878
2879static int rcu_torture_read_exit_init(void)
2880{
2881 if (read_exit_burst <= 0)
Paul E. McKenneyfda84862021-08-03 17:42:25 -07002882 return 0;
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002883 init_waitqueue_head(&read_exit_wq);
2884 read_exit_child_stop = false;
2885 read_exit_child_stopped = false;
2886 return torture_create_kthread(rcu_torture_read_exit, NULL,
2887 read_exit_task);
2888}
2889
2890static void rcu_torture_read_exit_cleanup(void)
2891{
2892 if (!read_exit_task)
2893 return;
2894 WRITE_ONCE(read_exit_child_stop, true);
2895 smp_mb(); // Above write before wait.
2896 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2897 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2898}
2899
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002900static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002901
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002902static void
2903rcu_torture_cleanup(void)
2904{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002905 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07002906 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002907 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002908 int i;
2909
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002910 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08002911 if (cur_ops->cb_barrier != NULL)
2912 cur_ops->cb_barrier();
2913 return;
2914 }
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002915 if (!cur_ops) {
2916 torture_cleanup_end();
2917 return;
2918 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002919
Paul E. McKenney27c0f142020-09-15 17:08:03 -07002920 if (cur_ops->gp_kthread_dbg)
2921 cur_ops->gp_kthread_dbg();
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002922 rcu_torture_read_exit_cleanup();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002923 rcu_torture_barrier_cleanup();
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002924 rcu_torture_fwd_prog_cleanup();
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002925 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002926 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002927
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07002928 if (nocb_tasks) {
2929 for (i = 0; i < nrealnocbers; i++)
2930 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2931 kfree(nocb_tasks);
2932 nocb_tasks = NULL;
2933 }
2934
Josh Triplettc8e5b162007-05-08 00:33:20 -07002935 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002936 for (i = 0; i < nrealreaders; i++)
2937 torture_stop_kthread(rcu_torture_reader,
2938 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002939 kfree(reader_tasks);
Paul E. McKenney293b93d2020-09-23 16:46:36 -07002940 reader_tasks = NULL;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002941 }
Paul E. McKenney00504532020-10-29 15:08:57 -07002942 kfree(rcu_torture_reader_mbchk);
2943 rcu_torture_reader_mbchk = NULL;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002944
Josh Triplettc8e5b162007-05-08 00:33:20 -07002945 if (fakewriter_tasks) {
Paul E. McKenney293b93d2020-09-23 16:46:36 -07002946 for (i = 0; i < nfakewriters; i++)
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002947 torture_stop_kthread(rcu_torture_fakewriter,
2948 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002949 kfree(fakewriter_tasks);
2950 fakewriter_tasks = NULL;
2951 }
2952
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002953 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2954 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04002955 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2956 cur_ops->name, (long)gp_seq, flags,
2957 rcutorture_seq_diff(gp_seq, start_gp_seq));
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002958 torture_stop_kthread(rcu_torture_stats, stats_task);
2959 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Paul E. McKenneyfd13fe12021-08-06 08:57:26 -07002960 if (rcu_torture_can_boost() && rcutor_hp >= 0)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002961 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002962
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002963 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002964 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002965 * cleanup operations.
2966 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002967 if (cur_ops->cb_barrier != NULL)
2968 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002969 if (cur_ops->cleanup != NULL)
2970 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002971
Paul E. McKenney7ab2bd32021-05-02 19:56:05 -07002972 rcu_torture_mem_dump_obj();
2973
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002974 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002975
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002976 if (err_segs_recorded) {
2977 pr_alert("Failure/close-call rcutorture reader segments:\n");
2978 if (rt_read_nsegs == 0)
2979 pr_alert("\t: No segments recorded!!!\n");
2980 firsttime = 1;
2981 for (i = 0; i < rt_read_nsegs; i++) {
2982 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2983 if (err_segs[i].rt_delay_jiffies != 0) {
2984 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2985 err_segs[i].rt_delay_jiffies);
2986 firsttime = 0;
2987 }
2988 if (err_segs[i].rt_delay_ms != 0) {
2989 pr_cont("%s%ldms", firsttime ? "" : "+",
2990 err_segs[i].rt_delay_ms);
2991 firsttime = 0;
2992 }
2993 if (err_segs[i].rt_delay_us != 0) {
2994 pr_cont("%s%ldus", firsttime ? "" : "+",
2995 err_segs[i].rt_delay_us);
2996 firsttime = 0;
2997 }
2998 pr_cont("%s\n",
2999 err_segs[i].rt_preempted ? "preempted" : "");
3000
3001 }
3002 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08003003 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003004 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08003005 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08003006 rcu_torture_print_module_parms(cur_ops,
3007 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08003008 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003009 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07003010 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003011}
3012
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003013#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3014static void rcu_torture_leak_cb(struct rcu_head *rhp)
3015{
3016}
3017
3018static void rcu_torture_err_cb(struct rcu_head *rhp)
3019{
3020 /*
3021 * This -might- happen due to race conditions, but is unlikely.
3022 * The scenario that leads to this happening is that the
3023 * first of the pair of duplicate callbacks is queued,
3024 * someone else starts a grace period that includes that
3025 * callback, then the second of the pair must wait for the
3026 * next grace period. Unlikely, but can happen. If it
3027 * does happen, the debug-objects subsystem won't have splatted.
3028 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08003029 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003030}
3031#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3032
3033/*
3034 * Verify that double-free causes debug-objects to complain, but only
3035 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3036 * cannot be carried out.
3037 */
3038static void rcu_test_debug_objects(void)
3039{
3040#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3041 struct rcu_head rh1;
3042 struct rcu_head rh2;
Paul E. McKenneyedf7b842020-12-02 17:52:07 -08003043 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003044
3045 init_rcu_head_on_stack(&rh1);
3046 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08003047 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003048
3049 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3050 preempt_disable(); /* Prevent preemption from interrupting test. */
3051 rcu_read_lock(); /* Make it impossible to finish a grace period. */
3052 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3053 local_irq_disable(); /* Make it harder to start a new grace period. */
3054 call_rcu(&rh2, rcu_torture_leak_cb);
3055 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
Paul E. McKenneyedf7b842020-12-02 17:52:07 -08003056 if (rhp) {
3057 call_rcu(rhp, rcu_torture_leak_cb);
3058 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3059 }
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003060 local_irq_enable();
3061 rcu_read_unlock();
3062 preempt_enable();
3063
3064 /* Wait for them all to get done so we can safely return. */
3065 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08003066 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003067 destroy_rcu_head_on_stack(&rh1);
3068 destroy_rcu_head_on_stack(&rh2);
3069#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08003070 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003071#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3072}
3073
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08003074static void rcutorture_sync(void)
3075{
3076 static unsigned long n;
3077
3078 if (cur_ops->sync && !(++n & 0xfff))
3079 cur_ops->sync();
3080}
3081
Josh Triplett6f8bc5002007-05-08 00:25:24 -07003082static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003083rcu_torture_init(void)
3084{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07003085 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003086 int cpu;
3087 int firsterr = 0;
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04003088 int flags = 0;
3089 unsigned long gp_seq = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07003090 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07003091 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -07003092 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
3093 &tasks_tracing_ops, &trivial_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07003094 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003095
Paul E. McKenneya2f25772017-11-21 20:19:17 -08003096 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07003097 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08003098
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003099 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07003100 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07003101 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07003102 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07003103 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07003104 }
Josh Triplettade5fb82007-05-08 00:33:22 -07003105 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07003106 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3107 torture_type);
3108 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07003109 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07003110 pr_cont(" %s", torture_ops[i]->name);
3111 pr_cont("\n");
Paul E. McKenney889d4872015-08-24 11:37:58 -07003112 firsterr = -EINVAL;
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07003113 cur_ops = NULL;
Paul E. McKenney889d4872015-08-24 11:37:58 -07003114 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07003115 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08003116 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07003117 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08003118 fqs_duration = 0;
3119 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07003120 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07003121 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07003122
Paul E. McKenney64e4b432014-03-12 10:26:35 -07003123 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003124 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07003125 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07003126 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07003127 if (nrealreaders <= 0)
3128 nrealreaders = 1;
3129 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003130 rcu_torture_print_module_parms(cur_ops, "Start of test");
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04003131 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3132 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3133 start_gp_seq = gp_seq;
3134 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3135 cur_ops->name, (long)gp_seq, flags);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003136
3137 /* Set up the freelist. */
3138
3139 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07003140 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08003141 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003142 list_add_tail(&rcu_tortures[i].rtort_free,
3143 &rcu_torture_freelist);
3144 }
3145
3146 /* Initialize the statistics so that each run gets its own numbers. */
3147
3148 rcu_torture_current = NULL;
3149 rcu_torture_current_version = 0;
3150 atomic_set(&n_rcu_torture_alloc, 0);
3151 atomic_set(&n_rcu_torture_alloc_fail, 0);
3152 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08003153 atomic_set(&n_rcu_torture_mberror, 0);
Paul E. McKenney00504532020-10-29 15:08:57 -07003154 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3155 atomic_set(&n_rcu_torture_mbchk_tries, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08003156 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08003157 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003158 n_rcu_torture_boost_ktrerror = 0;
3159 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003160 n_rcu_torture_boost_failure = 0;
3161 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003162 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3163 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003164 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003165 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3166 per_cpu(rcu_torture_count, cpu)[i] = 0;
3167 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3168 }
3169 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07003170 err_segs_recorded = 0;
3171 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003172
3173 /* Start up the kthreads. */
3174
Paul E. McKenney18fbf302020-11-16 16:46:06 -08003175 rcu_torture_write_types();
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08003176 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3177 writer_task);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003178 if (torture_init_error(firsterr))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003179 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07003180 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08003181 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07003182 sizeof(fakewriter_tasks[0]),
3183 GFP_KERNEL);
3184 if (fakewriter_tasks == NULL) {
Li Zhijian81faa4f2021-11-03 16:30:28 +08003185 TOROUT_ERRSTRING("out of memory");
Paul E. McKenney4444d852015-05-14 15:42:40 -07003186 firsterr = -ENOMEM;
3187 goto unwind;
3188 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07003189 }
3190 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08003191 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3192 NULL, fakewriter_tasks[i]);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003193 if (torture_init_error(firsterr))
Josh Triplettb772e1d2006-10-04 02:17:13 -07003194 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07003195 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08003196 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003197 GFP_KERNEL);
Paul E. McKenney00504532020-10-29 15:08:57 -07003198 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3199 GFP_KERNEL);
3200 if (!reader_tasks || !rcu_torture_reader_mbchk) {
Li Zhijian81faa4f2021-11-03 16:30:28 +08003201 TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003202 firsterr = -ENOMEM;
3203 goto unwind;
3204 }
3205 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenney00504532020-10-29 15:08:57 -07003206 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07003207 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08003208 reader_tasks[i]);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003209 if (torture_init_error(firsterr))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003210 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003211 }
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07003212 nrealnocbers = nocbs_nthreads;
3213 if (WARN_ON(nrealnocbers < 0))
3214 nrealnocbers = 1;
3215 if (WARN_ON(nocbs_toggle < 0))
3216 nocbs_toggle = HZ;
3217 if (nrealnocbers > 0) {
3218 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3219 if (nocb_tasks == NULL) {
Li Zhijian81faa4f2021-11-03 16:30:28 +08003220 TOROUT_ERRSTRING("out of memory");
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07003221 firsterr = -ENOMEM;
3222 goto unwind;
3223 }
3224 } else {
3225 nocb_tasks = NULL;
3226 }
3227 for (i = 0; i < nrealnocbers; i++) {
3228 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003229 if (torture_init_error(firsterr))
Paul E. McKenney2c4319b2020-09-23 17:39:46 -07003230 goto unwind;
3231 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003232 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08003233 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3234 stats_task);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003235 if (torture_init_error(firsterr))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003236 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003237 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07003238 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08003239 firsterr = torture_shuffle_init(shuffle_interval * HZ);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003240 if (torture_init_error(firsterr))
Rusty Russell73d0a4b2009-03-30 22:05:16 -06003241 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08003242 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07003243 if (stutter < 0)
3244 stutter = 0;
3245 if (stutter) {
Paul E. McKenneyff3bf922019-04-09 14:44:49 -07003246 int t;
3247
3248 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3249 firsterr = torture_stutter_init(stutter * HZ, t);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003250 if (torture_init_error(firsterr))
Paul E. McKenneyd120f652008-06-18 05:21:44 -07003251 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07003252 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08003253 if (fqs_duration < 0)
3254 fqs_duration = 0;
3255 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08003256 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07003257 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3258 fqs_task);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003259 if (torture_init_error(firsterr))
Paul E. McKenneybf66f182010-01-04 15:09:10 -08003260 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08003261 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003262 if (test_boost_interval < 1)
3263 test_boost_interval = 1;
3264 if (test_boost_duration < 2)
3265 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07003266 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003267
3268 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02003269
3270 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3271 rcutorture_booster_init,
3272 rcutorture_booster_cleanup);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02003273 rcutor_hp = firsterr;
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003274 if (torture_init_error(firsterr))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003275 goto unwind;
Paul E. McKenneyea6d9622021-03-30 16:30:32 -07003276
3277 // Testing RCU priority boosting requires rcutorture do
3278 // some serious abuse. Counter this by running ksoftirqd
3279 // at higher priority.
3280 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3281 for_each_online_cpu(cpu) {
3282 struct sched_param sp;
3283 struct task_struct *t;
3284
3285 t = per_cpu(ksoftirqd, cpu);
3286 WARN_ON_ONCE(!t);
3287 sp.sched_priority = 2;
3288 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3289 }
3290 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07003291 }
Paul E. McKenney60013d52019-07-10 08:30:00 -07003292 shutdown_jiffies = jiffies + shutdown_secs * HZ;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08003293 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003294 if (torture_init_error(firsterr))
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08003295 goto unwind;
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08003296 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3297 rcutorture_sync);
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003298 if (torture_init_error(firsterr))
Paul E. McKenney37e377d2012-02-17 22:12:18 -08003299 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08003300 firsterr = rcu_torture_stall_init();
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003301 if (torture_init_error(firsterr))
Paul E. McKenney37e377d2012-02-17 22:12:18 -08003302 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07003303 firsterr = rcu_torture_fwd_prog_init();
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003304 if (torture_init_error(firsterr))
Paul E. McKenney1b272912018-07-18 14:32:31 -07003305 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08003306 firsterr = rcu_torture_barrier_init();
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003307 if (torture_init_error(firsterr))
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08003308 goto unwind;
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07003309 firsterr = rcu_torture_read_exit_init();
Paul E. McKenneyefeff6b2021-08-05 13:28:24 -07003310 if (torture_init_error(firsterr))
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07003311 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07003312 if (object_debug)
3313 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08003314 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003315 return 0;
3316
3317unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08003318 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003319 rcu_torture_cleanup();
Paul E. McKenney49946842020-09-18 13:30:33 -07003320 if (shutdown_secs) {
3321 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3322 kernel_power_off();
3323 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003324 return firsterr;
3325}
3326
3327module_init(rcu_torture_init);
3328module_exit(rcu_torture_cleanup);