blob: 916ea4f66e4b2380c3cc7d3bd1c10bb0601cd3c9 [file] [log] [blame]
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07003 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08004 *
Josh Triplettb772e1d2006-10-04 02:17:13 -07005 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -08006 *
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -07008 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -08009 *
Mauro Carvalho Chehab43cb5452020-04-21 19:04:06 +020010 * See also: Documentation/RCU/torture.rst
Paul E. McKenneya241ec62005-10-30 15:03:12 -080011 */
Paul E. McKenney60500032018-05-15 12:25:05 -070012
13#define pr_fmt(fmt) fmt
14
Paul E. McKenneya241ec62005-10-30 15:03:12 -080015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -080023#include <linux/rcupdate_wait.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080024#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010026#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070027#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080029#include <linux/completion.h>
30#include <linux/moduleparam.h>
31#include <linux/percpu.h>
32#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080033#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070034#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080035#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080036#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080037#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070038#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070039#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080040#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070041#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080042#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070043#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070044#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070045#include <linux/sched/sysctl.h>
Paul E. McKenneye0aff972018-10-01 17:40:54 -070046#include <linux/oom.h>
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -070047#include <linux/tick.h>
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -070048#include <linux/rcupdate_trace.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080049
Paul E. McKenney25c36322017-05-03 09:51:55 -070050#include "rcu.h"
51
Paul E. McKenneya241ec62005-10-30 15:03:12 -080052MODULE_LICENSE("GPL");
Paul E. McKenney2e24ce82019-01-17 10:16:42 -080053MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080054
Paul E. McKenney2397d072018-05-25 07:29:25 -070055/* Bits for ->extendables field, extendables param, and related definitions. */
56#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
57#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070058#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
59#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
60#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
61#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
62#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
63#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
64#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
65#define RCUTORTURE_MAX_EXTEND \
66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070068#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
69 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070070#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070071
Paul E. McKenney2397d072018-05-25 07:29:25 -070072torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080074torture_param(int, fqs_duration, 0,
75 "Duration of fqs bursts (us), 0 to disable");
76torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070078torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80torture_param(int, fwd_progress_holdoff, 60,
81 "Time between forward-progress tests (s)");
82torture_param(bool, fwd_progress_need_resched, 1,
83 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070084torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080085torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86torture_param(bool, gp_normal, false,
87 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070088torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080089torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
Paul E. McKenneyd6855142020-08-11 10:33:39 -070090torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
Paul E. McKenney9e250222014-01-27 16:27:00 -080091torture_param(int, n_barrier_cbs, 0,
92 "# of callbacks/kthreads for barrier testing");
93torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
94torture_param(int, nreaders, -1, "Number of RCU reader threads");
95torture_param(int, object_debug, 0,
96 "Enable debug-object double call_rcu() testing");
97torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
98torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -070099 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700100torture_param(int, read_exit_delay, 13,
101 "Delay between read-then-exit episodes (s)");
102torture_param(int, read_exit_burst, 16,
103 "# of read-then-exit bursts per episode, zero to disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800104torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
105torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
106torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
107torture_param(int, stall_cpu_holdoff, 10,
108 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700109torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney19a8ff92020-03-11 17:39:12 -0700110torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -0700111torture_param(int, stall_gp_kthread, 0,
112 "Grace-period kthread stall duration (s).");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800113torture_param(int, stat_interval, 60,
114 "Number of seconds between stats printk()s");
115torture_param(int, stutter, 5, "Number of seconds to run/halt test");
116torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
117torture_param(int, test_boost_duration, 4,
118 "Duration of each boost test, seconds.");
119torture_param(int, test_boost_interval, 7,
120 "Interval between boost tests, seconds.");
121torture_param(bool, test_no_idle_hz, true,
122 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700123torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800124 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800125
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800126static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800127module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700128MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700129
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800130static int nrealreaders;
131static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700132static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800133static struct task_struct **reader_tasks;
134static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800135static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700136static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800137static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700138static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800139static struct task_struct **barrier_cbs_tasks;
140static struct task_struct *barrier_task;
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700141static struct task_struct *read_exit_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800142
143#define RCU_TORTURE_PIPE_LEN 10
144
145struct rcu_torture {
146 struct rcu_head rtort_rcu;
147 int rtort_pipe_count;
148 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800149 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800150};
151
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800152static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700153static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700154static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800155static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
156static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800157static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
158static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800159static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700160static atomic_t n_rcu_torture_alloc;
161static atomic_t n_rcu_torture_alloc_fail;
162static atomic_t n_rcu_torture_free;
163static atomic_t n_rcu_torture_mberror;
164static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800165static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700166static long n_rcu_torture_boost_ktrerror;
167static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700168static long n_rcu_torture_boost_failure;
169static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700170static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800171static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700172static long n_barrier_successes; /* did rcu_barrier test succeed? */
Paul E. McKenney4a5f1332020-04-24 11:21:40 -0700173static unsigned long n_read_exits;
Josh Triplette3033732006-10-04 02:17:14 -0700174static struct list_head rcu_torture_removed;
Paul E. McKenney60013d52019-07-10 08:30:00 -0700175static unsigned long shutdown_jiffies;
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -0400176static unsigned long start_gp_seq;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800177
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800178static int rcu_torture_writer_state;
179#define RTWS_FIXED_DELAY 0
180#define RTWS_DELAY 1
181#define RTWS_REPLACE 2
182#define RTWS_DEF_FREE 3
183#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700184#define RTWS_COND_GET 5
185#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700186#define RTWS_SYNC 7
187#define RTWS_STUTTER 8
188#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800189static const char * const rcu_torture_writer_state_names[] = {
190 "RTWS_FIXED_DELAY",
191 "RTWS_DELAY",
192 "RTWS_REPLACE",
193 "RTWS_DEF_FREE",
194 "RTWS_EXP_SYNC",
195 "RTWS_COND_GET",
196 "RTWS_COND_SYNC",
197 "RTWS_SYNC",
198 "RTWS_STUTTER",
199 "RTWS_STOPPING",
200};
201
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700202/* Record reader segment types and duration for first failing read. */
203struct rt_read_seg {
204 int rt_readstate;
205 unsigned long rt_delay_jiffies;
206 unsigned long rt_delay_ms;
207 unsigned long rt_delay_us;
208 bool rt_preempted;
209};
210static int err_segs_recorded;
211static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
212static int rt_read_nsegs;
213
Paul E. McKenney18aff332015-11-17 13:35:28 -0800214static const char *rcu_torture_writer_state_getname(void)
215{
216 unsigned int i = READ_ONCE(rcu_torture_writer_state);
217
218 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
219 return "???";
220 return rcu_torture_writer_state_names[i];
221}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800222
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700223#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700224#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700225#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700226#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700227#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700228
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500229#ifdef CONFIG_RCU_TRACE
230static u64 notrace rcu_trace_clock_local(void)
231{
232 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700233
234 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500235 return ts;
236}
237#else /* #ifdef CONFIG_RCU_TRACE */
238static u64 notrace rcu_trace_clock_local(void)
239{
240 return 0ULL;
241}
242#endif /* #else #ifdef CONFIG_RCU_TRACE */
243
Paul E. McKenney60013d52019-07-10 08:30:00 -0700244/*
245 * Stop aggressive CPU-hog tests a bit before the end of the test in order
246 * to avoid interfering with test shutdown.
247 */
248static bool shutdown_time_arrived(void)
249{
250 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
251}
252
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700253static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400254static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700255 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800256static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700257static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800258static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
259static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
260static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700261
Paul E. McKenney48718482018-08-15 15:32:51 -0700262static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
263
Paul E. McKenney343e9092008-12-15 16:13:07 -0800264/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800265 * Allocate an element from the rcu_tortures pool.
266 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800267static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800268rcu_torture_alloc(void)
269{
270 struct list_head *p;
271
Ingo Molnaradac1662006-01-25 19:50:12 +0100272 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800273 if (list_empty(&rcu_torture_freelist)) {
274 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100275 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800276 return NULL;
277 }
278 atomic_inc(&n_rcu_torture_alloc);
279 p = rcu_torture_freelist.next;
280 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100281 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800282 return container_of(p, struct rcu_torture, rtort_free);
283}
284
285/*
286 * Free an element to the rcu_tortures pool.
287 */
288static void
289rcu_torture_free(struct rcu_torture *p)
290{
291 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100292 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800293 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100294 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800295}
296
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800297/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700298 * Operations vector for selecting different types of tests.
299 */
300
301struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800302 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700303 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700304 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700305 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700306 void (*read_delay)(struct torture_random_state *rrsp,
307 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700308 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700309 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700310 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700311 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700312 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700313 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700314 unsigned long (*get_state)(void);
315 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800316 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200317 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800318 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400319 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700320 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700321 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700322 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700323 int extendables;
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700324 int slow_gps;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400325 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700326};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700327
328static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700329
330/*
331 * Definitions for rcu torture testing.
332 */
333
Josh Tripletta49a4af2006-09-29 01:59:30 -0700334static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700335{
336 rcu_read_lock();
337 return 0;
338}
339
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700340static void
341rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700342{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700343 unsigned long started;
344 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700345 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700346 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700347 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700348
Josh Triplettb8d57a72009-09-08 15:54:35 -0700349 /* We want a short delay sometimes to make a reader delay the grace
350 * period, and we want a long delay occasionally to trigger
351 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700352
Paul E. McKenney102c14d2019-12-21 11:23:50 -0800353 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney48718482018-08-15 15:32:51 -0700354 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700355 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700356 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700357 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
358 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700359 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700360 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700361 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700362 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
363 started, completed);
364 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700365 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700366 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700367 rtrsp->rt_delay_us = shortdelay_us;
368 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800369 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700370 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700371 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700372 rtrsp->rt_preempted = true;
373 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700374}
375
Josh Tripletta49a4af2006-09-29 01:59:30 -0700376static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700377{
378 rcu_read_unlock();
379}
380
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700381/*
382 * Update callback in the pipe. This should be invoked after a grace period.
383 */
384static bool
385rcu_torture_pipe_update_one(struct rcu_torture *rp)
386{
387 int i;
388
Paul E. McKenney20248912019-12-21 10:41:48 -0800389 i = READ_ONCE(rp->rtort_pipe_count);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700390 if (i > RCU_TORTURE_PIPE_LEN)
391 i = RCU_TORTURE_PIPE_LEN;
392 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -0800393 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
394 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700395 rp->rtort_mbtest = 0;
396 return true;
397 }
398 return false;
399}
400
401/*
402 * Update all callbacks in the pipe. Suitable for synchronous grace-period
403 * primitives.
404 */
405static void
406rcu_torture_pipe_update(struct rcu_torture *old_rp)
407{
408 struct rcu_torture *rp;
409 struct rcu_torture *rp1;
410
411 if (old_rp)
412 list_add(&old_rp->rtort_free, &rcu_torture_removed);
413 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
414 if (rcu_torture_pipe_update_one(rp)) {
415 list_del(&rp->rtort_free);
416 rcu_torture_free(rp);
417 }
418 }
419}
420
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700421static void
422rcu_torture_cb(struct rcu_head *p)
423{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700424 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
425
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800426 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700427 /* Test is ending, just drop callbacks on the floor. */
428 /* The next initialization will pick up the pieces. */
429 return;
430 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700431 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700432 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700433 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700434 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700435}
436
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800437static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800438{
439 return 0;
440}
441
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700442static void rcu_torture_deferred_free(struct rcu_torture *p)
443{
444 call_rcu(&p->rtort_rcu, rcu_torture_cb);
445}
446
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700447static void rcu_sync_torture_init(void)
448{
449 INIT_LIST_HEAD(&rcu_torture_removed);
450}
451
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700452static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800453 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700454 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700455 .readlock = rcu_torture_read_lock,
456 .read_delay = rcu_read_delay,
457 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700458 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700459 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700460 .deferred_free = rcu_torture_deferred_free,
461 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700462 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700463 .get_state = get_state_synchronize_rcu,
464 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800465 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700466 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800467 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700468 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700469 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700470 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700471 .can_boost = rcu_can_boost(),
Paul E. McKenneyc0335742018-06-21 16:17:46 -0700472 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700473 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700474};
475
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700476/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800477 * Don't even think about trying any of these in real life!!!
478 * The names includes "busted", and they really means it!
479 * The only purpose of these functions is to provide a buggy RCU
480 * implementation to make sure that rcutorture correctly emits
481 * buggy-RCU error messages.
482 */
483static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
484{
485 /* This is a deliberate bug for testing purposes only! */
486 rcu_torture_cb(&p->rtort_rcu);
487}
488
489static void synchronize_rcu_busted(void)
490{
491 /* This is a deliberate bug for testing purposes only! */
492}
493
494static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800495call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800496{
497 /* This is a deliberate bug for testing purposes only! */
498 func(head);
499}
500
501static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800502 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800503 .init = rcu_sync_torture_init,
504 .readlock = rcu_torture_read_lock,
505 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
506 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700507 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800508 .deferred_free = rcu_busted_torture_deferred_free,
509 .sync = synchronize_rcu_busted,
510 .exp_sync = synchronize_rcu_busted,
511 .call = call_rcu_busted,
512 .cb_barrier = NULL,
513 .fqs = NULL,
514 .stats = NULL,
515 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700516 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800517};
518
519/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700520 * Definitions for srcu torture testing.
521 */
522
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800523DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700524static struct srcu_struct srcu_ctld;
525static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700526
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700527static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700528{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700529 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700530}
531
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700532static void
533srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700534{
535 long delay;
536 const long uspertick = 1000000 / HZ;
537 const long longdelay = 10;
538
539 /* We want there to be long-running readers, but not all the time. */
540
Paul E. McKenney51b11302014-01-27 11:49:39 -0800541 delay = torture_random(rrsp) %
542 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700543 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700544 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700545 rtrsp->rt_delay_jiffies = longdelay;
546 } else {
547 rcu_read_delay(rrsp, rtrsp);
548 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700549}
550
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700551static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700552{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700553 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700554}
555
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800556static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700557{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700558 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700559}
560
Lai Jiangshan9059c942012-03-19 16:12:14 +0800561static void srcu_torture_deferred_free(struct rcu_torture *rp)
562{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700563 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800564}
565
Josh Triplettb772e1d2006-10-04 02:17:13 -0700566static void srcu_torture_synchronize(void)
567{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700568 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700569}
570
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700571static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800572 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700573{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700574 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700575}
576
577static void srcu_torture_barrier(void)
578{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700579 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700580}
581
Joe Percheseea203f2014-07-14 09:16:15 -0400582static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700583{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700584 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700585}
586
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700587static void srcu_torture_synchronize_expedited(void)
588{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700589 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700590}
591
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700592static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800593 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800594 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700595 .readlock = srcu_torture_read_lock,
596 .read_delay = srcu_read_delay,
597 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700598 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800599 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700600 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700601 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700602 .call = srcu_torture_call,
603 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700604 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700605 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700606 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700607};
608
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700609static void srcu_torture_init(void)
610{
611 rcu_sync_torture_init();
612 WARN_ON(init_srcu_struct(&srcu_ctld));
613 srcu_ctlp = &srcu_ctld;
614}
615
616static void srcu_torture_cleanup(void)
617{
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800618 cleanup_srcu_struct(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700619 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
620}
621
622/* As above, but dynamically allocated. */
623static struct rcu_torture_ops srcud_ops = {
624 .ttype = SRCU_FLAVOR,
625 .init = srcu_torture_init,
626 .cleanup = srcu_torture_cleanup,
627 .readlock = srcu_torture_read_lock,
628 .read_delay = srcu_read_delay,
629 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700630 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700631 .deferred_free = srcu_torture_deferred_free,
632 .sync = srcu_torture_synchronize,
633 .exp_sync = srcu_torture_synchronize_expedited,
634 .call = srcu_torture_call,
635 .cb_barrier = srcu_torture_barrier,
636 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700637 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700638 .name = "srcud"
639};
640
Paul E. McKenney2397d072018-05-25 07:29:25 -0700641/* As above, but broken due to inappropriate reader extension. */
642static struct rcu_torture_ops busted_srcud_ops = {
643 .ttype = SRCU_FLAVOR,
644 .init = srcu_torture_init,
645 .cleanup = srcu_torture_cleanup,
646 .readlock = srcu_torture_read_lock,
647 .read_delay = rcu_read_delay,
648 .readunlock = srcu_torture_read_unlock,
649 .get_gp_seq = srcu_torture_completed,
650 .deferred_free = srcu_torture_deferred_free,
651 .sync = srcu_torture_synchronize,
652 .exp_sync = srcu_torture_synchronize_expedited,
653 .call = srcu_torture_call,
654 .cb_barrier = srcu_torture_barrier,
655 .stats = srcu_torture_stats,
656 .irq_capable = 1,
657 .extendables = RCUTORTURE_MAX_EXTEND,
658 .name = "busted_srcud"
659};
660
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700661/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700662 * Definitions for RCU-tasks torture testing.
663 */
664
665static int tasks_torture_read_lock(void)
666{
667 return 0;
668}
669
670static void tasks_torture_read_unlock(int idx)
671{
672}
673
674static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
675{
676 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
677}
678
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -0800679static void synchronize_rcu_mult_test(void)
680{
681 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
682}
683
Paul E. McKenney69c60452014-07-01 11:59:36 -0700684static struct rcu_torture_ops tasks_ops = {
685 .ttype = RCU_TASKS_FLAVOR,
686 .init = rcu_sync_torture_init,
687 .readlock = tasks_torture_read_lock,
688 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
689 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700690 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700691 .deferred_free = rcu_tasks_torture_deferred_free,
692 .sync = synchronize_rcu_tasks,
Paul E. McKenney9cf8fc62020-03-06 14:00:46 -0800693 .exp_sync = synchronize_rcu_mult_test,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700694 .call = call_rcu_tasks,
695 .cb_barrier = rcu_barrier_tasks,
696 .fqs = NULL,
697 .stats = NULL,
698 .irq_capable = 1,
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700699 .slow_gps = 1,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700700 .name = "tasks"
701};
702
Paul E. McKenneyc682db52019-04-19 07:38:27 -0700703/*
704 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
705 * This implementation does not necessarily work well with CPU hotplug.
706 */
707
708static void synchronize_rcu_trivial(void)
709{
710 int cpu;
711
712 for_each_online_cpu(cpu) {
713 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
714 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
715 }
716}
717
718static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
719{
720 preempt_disable();
721 return 0;
722}
723
724static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
725{
726 preempt_enable();
727}
728
729static struct rcu_torture_ops trivial_ops = {
730 .ttype = RCU_TRIVIAL_FLAVOR,
731 .init = rcu_sync_torture_init,
732 .readlock = rcu_torture_read_lock_trivial,
733 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
734 .readunlock = rcu_torture_read_unlock_trivial,
735 .get_gp_seq = rcu_no_completed,
736 .sync = synchronize_rcu_trivial,
737 .exp_sync = synchronize_rcu_trivial,
738 .fqs = NULL,
739 .stats = NULL,
740 .irq_capable = 1,
741 .name = "trivial"
742};
743
Paul E. McKenney3d6e43c2020-03-03 15:02:50 -0800744/*
745 * Definitions for rude RCU-tasks torture testing.
746 */
747
748static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
749{
750 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
751}
752
753static struct rcu_torture_ops tasks_rude_ops = {
754 .ttype = RCU_TASKS_RUDE_FLAVOR,
755 .init = rcu_sync_torture_init,
756 .readlock = rcu_torture_read_lock_trivial,
757 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
758 .readunlock = rcu_torture_read_unlock_trivial,
759 .get_gp_seq = rcu_no_completed,
760 .deferred_free = rcu_tasks_rude_torture_deferred_free,
761 .sync = synchronize_rcu_tasks_rude,
762 .exp_sync = synchronize_rcu_tasks_rude,
763 .call = call_rcu_tasks_rude,
764 .cb_barrier = rcu_barrier_tasks_rude,
765 .fqs = NULL,
766 .stats = NULL,
767 .irq_capable = 1,
768 .name = "tasks-rude"
769};
770
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -0700771/*
772 * Definitions for tracing RCU-tasks torture testing.
773 */
774
775static int tasks_tracing_torture_read_lock(void)
776{
777 rcu_read_lock_trace();
778 return 0;
779}
780
781static void tasks_tracing_torture_read_unlock(int idx)
782{
783 rcu_read_unlock_trace();
784}
785
786static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
787{
788 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
789}
790
791static struct rcu_torture_ops tasks_tracing_ops = {
792 .ttype = RCU_TASKS_TRACING_FLAVOR,
793 .init = rcu_sync_torture_init,
794 .readlock = tasks_tracing_torture_read_lock,
795 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
796 .readunlock = tasks_tracing_torture_read_unlock,
797 .get_gp_seq = rcu_no_completed,
798 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
799 .sync = synchronize_rcu_tasks_trace,
800 .exp_sync = synchronize_rcu_tasks_trace,
801 .call = call_rcu_tasks_trace,
802 .cb_barrier = rcu_barrier_tasks_trace,
803 .fqs = NULL,
804 .stats = NULL,
805 .irq_capable = 1,
806 .slow_gps = 1,
807 .name = "tasks-tracing"
808};
809
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700810static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
811{
812 if (!cur_ops->gp_diff)
813 return new - old;
814 return cur_ops->gp_diff(new, old);
815}
816
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700817static bool __maybe_unused torturing_tasks(void)
818{
Paul E. McKenney3d6e43c2020-03-03 15:02:50 -0800819 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700820}
821
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700822/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700823 * RCU torture priority-boost testing. Runs one real-time thread per
824 * CPU for moderate bursts, repeatedly registering RCU callbacks and
825 * spinning waiting for them to be invoked. If a given callback takes
826 * too long to be invoked, we assume that priority inversion has occurred.
827 */
828
829struct rcu_boost_inflight {
830 struct rcu_head rcu;
831 int inflight;
832};
833
834static void rcu_torture_boost_cb(struct rcu_head *head)
835{
836 struct rcu_boost_inflight *rbip =
837 container_of(head, struct rcu_boost_inflight, rcu);
838
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700839 /* Ensure RCU-core accesses precede clearing ->inflight */
840 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700841}
842
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700843static int old_rt_runtime = -1;
844
845static void rcu_torture_disable_rt_throttle(void)
846{
847 /*
848 * Disable RT throttling so that rcutorture's boost threads don't get
849 * throttled. Only possible if rcutorture is built-in otherwise the
850 * user should manually do this by setting the sched_rt_period_us and
851 * sched_rt_runtime sysctls.
852 */
853 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
854 return;
855
856 old_rt_runtime = sysctl_sched_rt_runtime;
857 sysctl_sched_rt_runtime = -1;
858}
859
860static void rcu_torture_enable_rt_throttle(void)
861{
862 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
863 return;
864
865 sysctl_sched_rt_runtime = old_rt_runtime;
866 old_rt_runtime = -1;
867}
868
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700869static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
870{
871 if (end - start > test_boost_duration * HZ - HZ / 2) {
872 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
873 n_rcu_torture_boost_failure++;
874
875 return true; /* failed */
876 }
877
878 return false; /* passed */
879}
880
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700881static int rcu_torture_boost(void *arg)
882{
883 unsigned long call_rcu_time;
884 unsigned long endtime;
885 unsigned long oldstarttime;
886 struct rcu_boost_inflight rbi = { .inflight = 0 };
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700887
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800888 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700889
890 /* Set real-time priority. */
Peter Zijlstra8b700982020-04-22 13:10:04 +0200891 sched_set_fifo_low(current);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700892
Paul E. McKenney561190e2011-03-30 09:10:44 -0700893 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700894 /* Each pass through the following loop does one boost-test cycle. */
895 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700896 /* Track if the test failed already in this test interval? */
897 bool failed = false;
898
899 /* Increment n_rcu_torture_boosts once per boost-test */
900 while (!kthread_should_stop()) {
901 if (mutex_trylock(&boost_mutex)) {
902 n_rcu_torture_boosts++;
903 mutex_unlock(&boost_mutex);
904 break;
905 }
906 schedule_timeout_uninterruptible(1);
907 }
908 if (kthread_should_stop())
909 goto checkwait;
910
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700911 /* Wait for the next test interval. */
912 oldstarttime = boost_starttime;
Paul E. McKenney3c80b402020-04-10 15:37:12 -0700913 while (time_before(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800914 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800915 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800916 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700917 goto checkwait;
918 }
919
920 /* Do one boost-test interval. */
921 endtime = oldstarttime + test_boost_duration * HZ;
922 call_rcu_time = jiffies;
Paul E. McKenney3c80b402020-04-10 15:37:12 -0700923 while (time_before(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700924 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700925 if (!smp_load_acquire(&rbi.inflight)) {
926 /* RCU core before ->inflight = 1. */
927 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700928 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700929 /* Check if the boost test failed */
930 failed = failed ||
931 rcu_torture_boost_failed(call_rcu_time,
932 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700933 call_rcu_time = jiffies;
934 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800935 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800936 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700937 goto checkwait;
938 }
939
940 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700941 * If boost never happened, then inflight will always be 1, in
942 * this case the boost check would never happen in the above
943 * loop so do another one here.
944 */
945 if (!failed && smp_load_acquire(&rbi.inflight))
946 rcu_torture_boost_failed(call_rcu_time, jiffies);
947
948 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700949 * Set the start time of the next test interval.
950 * Yes, this is vulnerable to long delays, but such
951 * delays simply cause a false negative for the next
952 * interval. Besides, we are running at RT priority,
953 * so delays should be relatively rare.
954 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700955 while (oldstarttime == boost_starttime &&
956 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700957 if (mutex_trylock(&boost_mutex)) {
958 boost_starttime = jiffies +
959 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700960 mutex_unlock(&boost_mutex);
961 break;
962 }
963 schedule_timeout_uninterruptible(1);
964 }
965
966 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800967checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800968 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700969
970 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700971 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800972 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700973 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800974 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700975 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800976 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700977 return 0;
978}
979
980/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800981 * RCU torture force-quiescent-state kthread. Repeatedly induces
982 * bursts of calls to force_quiescent_state(), increasing the probability
983 * of occurrence of some important types of race conditions.
984 */
985static int
986rcu_torture_fqs(void *arg)
987{
988 unsigned long fqs_resume_time;
989 int fqs_burst_remaining;
990
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800991 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800992 do {
993 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney3c80b402020-04-10 15:37:12 -0700994 while (time_before(jiffies, fqs_resume_time) &&
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700995 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800996 schedule_timeout_interruptible(1);
997 }
998 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700999 while (fqs_burst_remaining > 0 &&
1000 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001001 cur_ops->fqs();
1002 udelay(fqs_holdoff);
1003 fqs_burst_remaining -= fqs_holdoff;
1004 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001005 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001006 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001007 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001008 return 0;
1009}
1010
1011/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001012 * RCU torture writer kthread. Repeatedly substitutes a new structure
1013 * for that pointed to by rcu_torture_current, freeing the old structure
1014 * after a series of grace periods (the "pipeline").
1015 */
1016static int
1017rcu_torture_writer(void *arg)
1018{
Paul E. McKenney9efafb82015-12-31 18:11:47 -08001019 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001020 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001021 unsigned long gp_snap;
1022 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001023 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001024 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001025 struct rcu_torture *rp;
1026 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001027 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001028 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1029 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001030 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001031
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001032 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001033 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001034 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001035 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001036 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001037
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001038 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -08001039 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001040 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001041 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001042 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001043 pr_info("%s: Testing conditional GPs.\n", __func__);
1044 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001045 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001046 }
1047 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001048 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001049 pr_info("%s: Testing expedited GPs.\n", __func__);
1050 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001051 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001052 }
1053 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001054 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001055 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1056 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001057 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001058 }
1059 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001060 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001061 pr_info("%s: Testing normal GPs.\n", __func__);
1062 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001063 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001064 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001065 if (WARN_ONCE(nsynctypes == 0,
1066 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001067 /*
1068 * No updates primitives, so don't try updating.
1069 * The resulting test won't be testing much, hence the
1070 * above WARN_ONCE().
1071 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001072 rcu_torture_writer_state = RTWS_STOPPING;
1073 torture_kthread_stopping("rcu_torture_writer");
1074 }
1075
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001076 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001077 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001078 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001079 rp = rcu_torture_alloc();
1080 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001081 continue;
1082 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001083 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001084 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001085 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001086 old_rp = rcu_dereference_check(rcu_torture_current,
1087 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001088 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001089 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001090 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001091 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001092 i = old_rp->rtort_pipe_count;
1093 if (i > RCU_TORTURE_PIPE_LEN)
1094 i = RCU_TORTURE_PIPE_LEN;
1095 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -08001096 WRITE_ONCE(old_rp->rtort_pipe_count,
1097 old_rp->rtort_pipe_count + 1);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001098 switch (synctype[torture_random(&rand) % nsynctypes]) {
1099 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001100 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001101 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001102 break;
1103 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001104 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001105 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001106 rcu_torture_pipe_update(old_rp);
1107 break;
1108 case RTWS_COND_GET:
1109 rcu_torture_writer_state = RTWS_COND_GET;
1110 gp_snap = cur_ops->get_state();
1111 i = torture_random(&rand) % 16;
1112 if (i != 0)
1113 schedule_timeout_interruptible(i);
1114 udelay(torture_random(&rand) % 1000);
1115 rcu_torture_writer_state = RTWS_COND_SYNC;
1116 cur_ops->cond_sync(gp_snap);
1117 rcu_torture_pipe_update(old_rp);
1118 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001119 case RTWS_SYNC:
1120 rcu_torture_writer_state = RTWS_SYNC;
1121 cur_ops->sync();
1122 rcu_torture_pipe_update(old_rp);
1123 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001124 default:
1125 WARN_ON_ONCE(1);
1126 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001127 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001128 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001129 WRITE_ONCE(rcu_torture_current_version,
1130 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001131 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1132 if (can_expedite &&
1133 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1134 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1135 if (expediting >= 0)
1136 rcu_expedite_gp();
1137 else
1138 rcu_unexpedite_gp();
1139 if (++expediting > 3)
1140 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001141 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1142 can_expedite = !rcu_gp_is_expedited() &&
1143 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001144 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001145 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001146 if (stutter_wait("rcu_torture_writer") &&
Paul E. McKenney5eabea52019-04-12 09:02:46 -07001147 !READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney3432d762019-04-15 14:50:05 -07001148 !cur_ops->slow_gps &&
Paul E. McKenney59ee0322019-11-28 18:54:06 -08001149 !torture_must_stop() &&
1150 rcu_inkernel_boot_has_ended())
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001151 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001152 if (list_empty(&rcu_tortures[i].rtort_free) &&
1153 rcu_access_pointer(rcu_torture_current) !=
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001154 &rcu_tortures[i]) {
1155 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001156 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001157 }
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001158 } while (!torture_must_stop());
Paul E. McKenneycae7cc62020-04-26 19:20:37 -07001159 rcu_torture_current = NULL; // Let stats task know that we are done.
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001160 /* Reset expediting back to unexpedited. */
1161 if (expediting > 0)
1162 expediting = -expediting;
1163 while (can_expedite && expediting++ < 0)
1164 rcu_unexpedite_gp();
1165 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001166 if (!can_expedite)
1167 pr_alert("%s" TORTURE_FLAG
1168 " Dynamic grace-period expediting was disabled.\n",
1169 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001170 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001171 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001172 return 0;
1173}
1174
1175/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001176 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1177 * delay between calls.
1178 */
1179static int
1180rcu_torture_fakewriter(void *arg)
1181{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001182 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001183
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001184 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001185 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001186
1187 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001188 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1189 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001190 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001191 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001192 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001193 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001194 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001195 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001196 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001197 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001198 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001199 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001200 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001201 cur_ops->exp_sync();
1202 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001203 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001204 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001205
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001206 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001207 return 0;
1208}
1209
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001210static void rcu_torture_timer_cb(struct rcu_head *rhp)
1211{
1212 kfree(rhp);
1213}
1214
Josh Triplettb772e1d2006-10-04 02:17:13 -07001215/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001216 * Do one extension of an RCU read-side critical section using the
1217 * current reader state in readstate (set to zero for initial entry
1218 * to extended critical section), set the new state as specified by
1219 * newstate (set to zero for final exit from extended critical section),
1220 * and random-number-generator state in trsp. If this is neither the
1221 * beginning or end of the critical section and if there was actually a
1222 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001223 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001224static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001225 struct torture_random_state *trsp,
1226 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001227{
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001228 unsigned long flags;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001229 int idxnew = -1;
1230 int idxold = *readstate;
1231 int statesnew = ~*readstate & newstate;
1232 int statesold = *readstate & ~newstate;
1233
1234 WARN_ON_ONCE(idxold < 0);
1235 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001236 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001237
1238 /* First, put new protection in place to avoid critical-section gap. */
1239 if (statesnew & RCUTORTURE_RDR_BH)
1240 local_bh_disable();
1241 if (statesnew & RCUTORTURE_RDR_IRQ)
1242 local_irq_disable();
1243 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1244 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001245 if (statesnew & RCUTORTURE_RDR_RBH)
1246 rcu_read_lock_bh();
1247 if (statesnew & RCUTORTURE_RDR_SCHED)
1248 rcu_read_lock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001249 if (statesnew & RCUTORTURE_RDR_RCU)
1250 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1251
1252 /* Next, remove old protection, irq first due to bh conflict. */
1253 if (statesold & RCUTORTURE_RDR_IRQ)
1254 local_irq_enable();
1255 if (statesold & RCUTORTURE_RDR_BH)
1256 local_bh_enable();
1257 if (statesold & RCUTORTURE_RDR_PREEMPT)
1258 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001259 if (statesold & RCUTORTURE_RDR_RBH)
1260 rcu_read_unlock_bh();
1261 if (statesold & RCUTORTURE_RDR_SCHED)
1262 rcu_read_unlock_sched();
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001263 if (statesold & RCUTORTURE_RDR_RCU) {
1264 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1265
1266 if (lockit)
1267 raw_spin_lock_irqsave(&current->pi_lock, flags);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001268 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenney52b1fc32020-03-28 18:53:25 -07001269 if (lockit)
1270 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1271 }
Paul E. McKenney2397d072018-05-25 07:29:25 -07001272
1273 /* Delay if neither beginning nor end and there was a change. */
1274 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001275 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001276
1277 /* Update the reader state. */
1278 if (idxnew == -1)
1279 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1280 WARN_ON_ONCE(idxnew < 0);
1281 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1282 *readstate = idxnew | newstate;
1283 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1284 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1285}
1286
1287/* Return the biggest extendables mask given current RCU and boot parameters. */
1288static int rcutorture_extend_mask_max(void)
1289{
1290 int mask;
1291
1292 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1293 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1294 mask = mask | RCUTORTURE_RDR_RCU;
1295 return mask;
1296}
1297
1298/* Return a random protection state mask, but with at least one bit set. */
1299static int
1300rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1301{
1302 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001303 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001304 unsigned long randmask2 = randmask1 >> 3;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001305
1306 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneya3b0e1e52019-02-28 15:06:13 -08001307 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001308 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001309 mask = mask & randmask2;
1310 else
1311 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001312 /* Can't enable bh w/irq disabled. */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001313 if ((mask & RCUTORTURE_RDR_IRQ) &&
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001314 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1315 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1316 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001317 return mask ?: RCUTORTURE_RDR_RCU;
1318}
1319
1320/*
1321 * Do a randomly selected number of extensions of an existing RCU read-side
1322 * critical section.
1323 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001324static struct rt_read_seg *
1325rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1326 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001327{
1328 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001329 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001330 int mask = rcutorture_extend_mask_max();
1331
1332 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1333 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001334 return rtrsp; /* Current RCU reader not extendable. */
1335 /* Bias towards larger numbers of loops. */
1336 i = (torture_random(trsp) >> 3);
1337 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1338 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001339 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001340 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001341 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001342 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001343}
1344
1345/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001346 * Do one read-side critical section, returning false if there was
1347 * no data to read. Can be invoked both from process context and
1348 * from a timer handler.
1349 */
1350static bool rcu_torture_one_read(struct torture_random_state *trsp)
1351{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001352 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001353 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001354 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001355 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001356 struct rcu_torture *p;
1357 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001358 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001359 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1360 struct rt_read_seg *rtrsp = &rtseg[0];
1361 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001362 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001363
Paul E. McKenney77522752020-06-11 16:43:14 -07001364 WARN_ON_ONCE(!rcu_is_watching());
Paul E. McKenney2397d072018-05-25 07:29:25 -07001365 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001366 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001367 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001368 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001369 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001370 rcu_read_lock_bh_held() ||
1371 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001372 srcu_read_lock_held(srcu_ctlp) ||
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -07001373 rcu_read_lock_trace_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001374 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001375 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001376 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001377 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001378 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001379 }
1380 if (p->rtort_mbtest == 0)
1381 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001382 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001383 preempt_disable();
Paul E. McKenney20248912019-12-21 10:41:48 -08001384 pipe_count = READ_ONCE(p->rtort_pipe_count);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001385 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1386 /* Should not happen, but... */
1387 pipe_count = RCU_TORTURE_PIPE_LEN;
1388 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001389 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001390 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001391 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1392 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001393 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001394 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001395 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001396 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001397 if (completed > RCU_TORTURE_PIPE_LEN) {
1398 /* Should not happen, but... */
1399 completed = RCU_TORTURE_PIPE_LEN;
1400 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001401 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001402 preempt_enable();
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001403 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001404 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenneyd6855142020-08-11 10:33:39 -07001405 // This next splat is expected behavior if leakpointer, especially
1406 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1407 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001408
1409 /* If error or close call, record the sequence of reader protections. */
1410 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1411 i = 0;
1412 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1413 err_segs[i++] = *rtrsp1;
1414 rt_read_nsegs = i;
1415 }
1416
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001417 return true;
1418}
1419
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001420static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1421
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001422/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001423 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1424 * incrementing the corresponding element of the pipeline array. The
1425 * counter in the element should never be greater than 1, otherwise, the
1426 * RCU implementation is broken.
1427 */
1428static void rcu_torture_timer(struct timer_list *unused)
1429{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001430 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001431 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001432
1433 /* Test call_rcu() invocation from interrupt handler. */
1434 if (cur_ops->call) {
1435 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1436
1437 if (rhp)
1438 cur_ops->call(rhp, rcu_torture_timer_cb);
1439 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001440}
1441
1442/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001443 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1444 * incrementing the corresponding element of the pipeline array. The
1445 * counter in the element should never be greater than 1, otherwise, the
1446 * RCU implementation is broken.
1447 */
1448static int
1449rcu_torture_reader(void *arg)
1450{
Paul E. McKenney444da512018-07-04 14:14:42 -07001451 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001452 long myid = (long)arg;
1453 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001454 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001455 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001456
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001457 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001458 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001459 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001460 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001461 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001462 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001463 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001464 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001465 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001466 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001467 if (!rcu_torture_one_read(&rand) && !torture_must_stop())
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001468 schedule_timeout_interruptible(HZ);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001469 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
Paul E. McKenney444da512018-07-04 14:14:42 -07001470 schedule_timeout_interruptible(1);
1471 lastsleep = jiffies + 10;
1472 }
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001473 while (num_online_cpus() < mynumonline && !torture_must_stop())
1474 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001475 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001476 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001477 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001478 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001479 destroy_timer_on_stack(&t);
1480 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001481 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001482 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001483 return 0;
1484}
1485
1486/*
Joe Percheseea203f2014-07-14 09:16:15 -04001487 * Print torture statistics. Caller must ensure that there is only
1488 * one call to this function at a given time!!! This is normally
1489 * accomplished by relying on the module system to only have one copy
1490 * of the module loaded, and then by giving the rcu_torture_stats
1491 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1492 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001493 */
Chen Gangd1008952013-11-07 10:30:25 +08001494static void
Joe Percheseea203f2014-07-14 09:16:15 -04001495rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001496{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001497 int cpu;
1498 int i;
1499 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1500 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenney5396d312020-01-08 19:58:13 -08001501 struct rcu_torture *rtcp;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001502 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001503 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001504 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001505
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001506 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001507 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Paul E. McKenneyf042a432020-01-03 16:27:00 -08001508 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1509 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001510 }
1511 }
1512 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1513 if (pipesummary[i] != 0)
1514 break;
1515 }
Joe Percheseea203f2014-07-14 09:16:15 -04001516
1517 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney5396d312020-01-08 19:58:13 -08001518 rtcp = rcu_access_pointer(rcu_torture_current);
Paul E. McKenney354ea052019-05-25 12:36:53 -07001519 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
Paul E. McKenney5396d312020-01-08 19:58:13 -08001520 rtcp,
1521 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
Joe Percheseea203f2014-07-14 09:16:15 -04001522 rcu_torture_current_version,
1523 list_empty(&rcu_torture_freelist),
1524 atomic_read(&n_rcu_torture_alloc),
1525 atomic_read(&n_rcu_torture_alloc_fail),
1526 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001527 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001528 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001529 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001530 n_rcu_torture_boost_ktrerror,
1531 n_rcu_torture_boost_rterror);
1532 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1533 n_rcu_torture_boost_failure,
1534 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001535 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001536 torture_onoff_stats();
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001537 pr_cont("barrier: %ld/%ld:%ld ",
Paul E. McKenneyc9527be2020-02-18 13:41:02 -08001538 data_race(n_barrier_successes),
1539 data_race(n_barrier_attempts),
1540 data_race(n_rcu_torture_barrier_error));
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001541 pr_cont("read-exits: %ld\n", data_race(n_read_exits));
Joe Percheseea203f2014-07-14 09:16:15 -04001542
1543 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001544 if (atomic_read(&n_rcu_torture_mberror) ||
1545 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1546 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001547 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001548 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001549 atomic_inc(&n_rcu_torture_error);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001550 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1551 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1552 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1553 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1554 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1555 WARN_ON_ONCE(i > 1); // Too-short grace period
Paul E. McKenney996417d2005-11-18 01:10:50 -08001556 }
Joe Percheseea203f2014-07-14 09:16:15 -04001557 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001558 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001559 pr_cont(" %ld", pipesummary[i]);
1560 pr_cont("\n");
1561
1562 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1563 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001564 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001565 pr_cont(" %ld", batchsummary[i]);
1566 pr_cont("\n");
1567
1568 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1569 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001570 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001571 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001572 }
Joe Percheseea203f2014-07-14 09:16:15 -04001573 pr_cont("\n");
1574
Josh Triplettc8e5b162007-05-08 00:33:20 -07001575 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001576 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001577 if (rtcv_snap == rcu_torture_current_version &&
Paul E. McKenney5396d312020-01-08 19:58:13 -08001578 rcu_access_pointer(rcu_torture_current) &&
1579 !rcu_stall_is_suppressed()) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001580 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001581 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001582
1583 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001584 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001585 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001586 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001587 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001588 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001589 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001590 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001591 wtp == NULL ? ~0UL : wtp->state,
1592 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001593 if (!splatted && wtp) {
1594 sched_show_task(wtp);
1595 splatted = true;
1596 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001597 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001598 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001599 }
1600 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001601}
1602
1603/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001604 * Periodically prints torture statistics, if periodic statistics printing
1605 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001606 */
1607static int
1608rcu_torture_stats(void *arg)
1609{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001610 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001611 do {
1612 schedule_timeout_interruptible(stat_interval * HZ);
1613 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001614 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001615 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001616 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001617 return 0;
1618}
1619
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001620static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001621rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001622{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001623 pr_alert("%s" TORTURE_FLAG
1624 "--- %s: nreaders=%d nfakewriters=%d "
1625 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1626 "shuffle_interval=%d stutter=%d irqreader=%d "
1627 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1628 "test_boost=%d/%d test_boost_interval=%d "
1629 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001630 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001631 "stall_cpu_block=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001632 "n_barrier_cbs=%d "
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001633 "onoff_interval=%d onoff_holdoff=%d "
1634 "read_exit_delay=%d read_exit_burst=%d\n",
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001635 torture_type, tag, nrealreaders, nfakewriters,
1636 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1637 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1638 test_boost, cur_ops->can_boost,
1639 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001640 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001641 stall_cpu_block,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001642 n_barrier_cbs,
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07001643 onoff_interval, onoff_holdoff,
1644 read_exit_delay, read_exit_burst);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001645}
1646
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001647static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001648{
1649 struct task_struct *t;
1650
1651 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001652 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001653 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001654 t = boost_tasks[cpu];
1655 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001656 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001657 mutex_unlock(&boost_mutex);
1658
1659 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001660 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001661 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001662}
1663
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001664static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001665{
1666 int retval;
1667
1668 if (boost_tasks[cpu] != NULL)
1669 return 0; /* Already created, nothing more to do. */
1670
1671 /* Don't allow time recalculation while creating a new task. */
1672 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001673 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001674 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001675 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1676 cpu_to_node(cpu),
1677 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001678 if (IS_ERR(boost_tasks[cpu])) {
1679 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001680 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001681 n_rcu_torture_boost_ktrerror++;
1682 boost_tasks[cpu] = NULL;
1683 mutex_unlock(&boost_mutex);
1684 return retval;
1685 }
1686 kthread_bind(boost_tasks[cpu], cpu);
1687 wake_up_process(boost_tasks[cpu]);
1688 mutex_unlock(&boost_mutex);
1689 return 0;
1690}
1691
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001692/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001693 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1694 * induces a CPU stall for the time specified by stall_cpu.
1695 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001696static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001697{
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001698 int idx;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001699 unsigned long stop_at;
1700
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001701 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001702 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001703 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001704 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001705 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001706 }
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001707 if (!kthread_should_stop() && stall_gp_kthread > 0) {
1708 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1709 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1710 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1711 if (kthread_should_stop())
1712 break;
1713 schedule_timeout_uninterruptible(HZ);
1714 }
1715 }
1716 if (!kthread_should_stop() && stall_cpu > 0) {
1717 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001718 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001719 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001720 idx = cur_ops->readlock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001721 if (stall_cpu_irqsoff)
1722 local_irq_disable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001723 else if (!stall_cpu_block)
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001724 preempt_disable();
1725 pr_alert("rcu_torture_stall start on CPU %d.\n",
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001726 raw_smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001727 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1728 stop_at))
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001729 if (stall_cpu_block)
1730 schedule_timeout_uninterruptible(HZ);
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001731 if (stall_cpu_irqsoff)
1732 local_irq_enable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001733 else if (!stall_cpu_block)
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001734 preempt_enable();
Paul E. McKenney19a8ff92020-03-11 17:39:12 -07001735 cur_ops->readunlock(idx);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001736 }
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001737 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001738 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001739 while (!kthread_should_stop())
1740 schedule_timeout_interruptible(10 * HZ);
1741 return 0;
1742}
1743
1744/* Spawn CPU-stall kthread, if stall_cpu specified. */
1745static int __init rcu_torture_stall_init(void)
1746{
Paul E. McKenney55b2dcf2020-04-01 19:57:52 -07001747 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001748 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001749 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001750}
1751
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001752/* State structure for forward-progress self-propagating RCU callback. */
1753struct fwd_cb_state {
1754 struct rcu_head rh;
1755 int stop;
1756};
1757
1758/*
1759 * Forward-progress self-propagating RCU callback function. Because
1760 * callbacks run from softirq, this function is an implicit RCU read-side
1761 * critical section.
1762 */
1763static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1764{
1765 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1766
1767 if (READ_ONCE(fcsp->stop)) {
1768 WRITE_ONCE(fcsp->stop, 2);
1769 return;
1770 }
1771 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1772}
1773
Paul E. McKenney48718482018-08-15 15:32:51 -07001774/* State for continuous-flood RCU callbacks. */
1775struct rcu_fwd_cb {
1776 struct rcu_head rh;
1777 struct rcu_fwd_cb *rfc_next;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001778 struct rcu_fwd *rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001779 int rfc_gps;
1780};
Paul E. McKenneya289e602019-11-05 08:31:56 -08001781
Paul E. McKenney48718482018-08-15 15:32:51 -07001782#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1783#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1784#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
Paul E. McKenney2e57bf92018-10-05 16:43:09 -07001785#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
Paul E. McKenneya289e602019-11-05 08:31:56 -08001786#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1787
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001788struct rcu_launder_hist {
1789 long n_launders;
1790 unsigned long launder_gp_seq;
1791};
Paul E. McKenney48718482018-08-15 15:32:51 -07001792
Paul E. McKenneya289e602019-11-05 08:31:56 -08001793struct rcu_fwd {
1794 spinlock_t rcu_fwd_lock;
1795 struct rcu_fwd_cb *rcu_fwd_cb_head;
1796 struct rcu_fwd_cb **rcu_fwd_cb_tail;
1797 long n_launders_cb;
1798 unsigned long rcu_fwd_startat;
1799 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1800 unsigned long rcu_launder_gp_seq_start;
1801};
1802
Paul E. McKenney57f60202020-07-20 08:34:07 -07001803static DEFINE_MUTEX(rcu_fwd_mutex);
Jason Yanafbc1572020-04-09 19:42:38 +08001804static struct rcu_fwd *rcu_fwds;
1805static bool rcu_fwd_emergency_stop;
Paul E. McKenney48718482018-08-15 15:32:51 -07001806
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001807static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001808{
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001809 unsigned long gps;
1810 unsigned long gps_old;
Paul E. McKenney1a682752018-10-03 12:33:41 -07001811 int i;
1812 int j;
1813
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001814 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1815 if (rfp->n_launders_hist[i].n_launders > 0)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001816 break;
Paul E. McKenney73d665b2018-10-04 10:54:22 -07001817 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001818 __func__, jiffies - rfp->rcu_fwd_startat);
1819 gps_old = rfp->rcu_launder_gp_seq_start;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001820 for (j = 0; j <= i; j++) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001821 gps = rfp->n_launders_hist[j].launder_gp_seq;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001822 pr_cont(" %ds/%d: %ld:%ld",
Paul E. McKenneya289e602019-11-05 08:31:56 -08001823 j + 1, FWD_CBS_HIST_DIV,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001824 rfp->n_launders_hist[j].n_launders,
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001825 rcutorture_seq_diff(gps, gps_old));
1826 gps_old = gps;
1827 }
Paul E. McKenney1a682752018-10-03 12:33:41 -07001828 pr_cont("\n");
1829}
1830
Paul E. McKenney48718482018-08-15 15:32:51 -07001831/* Callback function for continuous-flood RCU callbacks. */
1832static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1833{
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001834 unsigned long flags;
Paul E. McKenney48718482018-08-15 15:32:51 -07001835 int i;
1836 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1837 struct rcu_fwd_cb **rfcpp;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001838 struct rcu_fwd *rfp = rfcp->rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001839
1840 rfcp->rfc_next = NULL;
1841 rfcp->rfc_gps++;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001842 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1843 rfcpp = rfp->rcu_fwd_cb_tail;
1844 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
Paul E. McKenney48718482018-08-15 15:32:51 -07001845 WRITE_ONCE(*rfcpp, rfcp);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001846 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1847 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1848 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1849 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1850 rfp->n_launders_hist[i].n_launders++;
1851 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1852 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001853}
1854
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001855// Give the scheduler a chance, even on nohz_full CPUs.
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001856static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001857{
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02001858 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001859 // Real call_rcu() floods hit userspace, so emulate that.
1860 if (need_resched() || (iter & 0xfff))
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001861 schedule();
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001862 return;
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001863 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001864 // No userspace emulation: CB invocation throttles call_rcu()
1865 cond_resched();
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001866}
1867
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001868/*
1869 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1870 * test is over or because we hit an OOM event.
1871 */
Paul E. McKenney67641002019-11-06 08:20:20 -08001872static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001873{
1874 unsigned long flags;
1875 unsigned long freed = 0;
1876 struct rcu_fwd_cb *rfcp;
1877
1878 for (;;) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001879 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1880 rfcp = rfp->rcu_fwd_cb_head;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001881 if (!rfcp) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001882 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001883 break;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001884 }
Paul E. McKenney67641002019-11-06 08:20:20 -08001885 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1886 if (!rfp->rcu_fwd_cb_head)
1887 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1888 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001889 kfree(rfcp);
1890 freed++;
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001891 rcu_torture_fwd_prog_cond_resched(freed);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001892 if (tick_nohz_full_enabled()) {
1893 local_irq_save(flags);
1894 rcu_momentary_dyntick_idle();
1895 local_irq_restore(flags);
1896 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001897 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001898 return freed;
Paul E. McKenney48718482018-08-15 15:32:51 -07001899}
1900
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001901/* Carry out need_resched()/cond_resched() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001902static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1903 int *tested, int *tested_tries)
Paul E. McKenney1b272912018-07-18 14:32:31 -07001904{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001905 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001906 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001907 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001908 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001909 int idx;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001910 int sd;
1911 int sd4;
1912 bool selfpropcb = false;
1913 unsigned long stopat;
1914 static DEFINE_TORTURE_RANDOM(trs);
1915
1916 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1917 init_rcu_head_on_stack(&fcs.rh);
1918 selfpropcb = true;
1919 }
1920
1921 /* Tight loop containing cond_resched(). */
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001922 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1923 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001924 if (selfpropcb) {
1925 WRITE_ONCE(fcs.stop, 0);
1926 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1927 }
1928 cver = READ_ONCE(rcu_torture_current_version);
1929 gps = cur_ops->get_gp_seq();
1930 sd = cur_ops->stall_dur() + 1;
1931 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1932 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001933 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1934 stopat = rfp->rcu_fwd_startat + dur;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001935 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001936 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001937 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001938 idx = cur_ops->readlock();
1939 udelay(10);
1940 cur_ops->readunlock(idx);
1941 if (!fwd_progress_need_resched || need_resched())
Paul E. McKenneyfbbd5e32019-08-15 11:43:53 -07001942 cond_resched();
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001943 }
1944 (*tested_tries)++;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001945 if (!time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001946 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001947 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001948 (*tested)++;
1949 cver = READ_ONCE(rcu_torture_current_version) - cver;
1950 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1951 WARN_ON(!cver && gps < 2);
1952 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1953 }
1954 if (selfpropcb) {
1955 WRITE_ONCE(fcs.stop, 1);
1956 cur_ops->sync(); /* Wait for running CB to complete. */
1957 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1958 }
1959
1960 if (selfpropcb) {
1961 WARN_ON(READ_ONCE(fcs.stop) != 2);
1962 destroy_rcu_head_on_stack(&fcs.rh);
1963 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001964 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1965 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001966}
1967
1968/* Carry out call_rcu() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001969static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001970{
1971 unsigned long cver;
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001972 unsigned long flags;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001973 unsigned long gps;
1974 int i;
Paul E. McKenney48718482018-08-15 15:32:51 -07001975 long n_launders;
1976 long n_launders_cb_snap;
1977 long n_launders_sa;
1978 long n_max_cbs;
1979 long n_max_gps;
1980 struct rcu_fwd_cb *rfcp;
1981 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001982 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07001983 unsigned long stoppedat;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001984
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001985 if (READ_ONCE(rcu_fwd_emergency_stop))
1986 return; /* Get out of the way quickly, no GP wait! */
Paul E. McKenneyc682db52019-04-19 07:38:27 -07001987 if (!cur_ops->call)
1988 return; /* Can't do call_rcu() fwd prog without ->call. */
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001989
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001990 /* Loop continuously posting RCU callbacks. */
1991 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1992 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001993 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1994 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001995 n_launders = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001996 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001997 n_launders_sa = 0;
1998 n_max_cbs = 0;
1999 n_max_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002000 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2001 rfp->n_launders_hist[i].n_launders = 0;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002002 cver = READ_ONCE(rcu_torture_current_version);
2003 gps = cur_ops->get_gp_seq();
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002004 rfp->rcu_launder_gp_seq_start = gps;
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002005 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002006 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07002007 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002008 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002009 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002010 rfcpn = NULL;
2011 if (rfcp)
2012 rfcpn = READ_ONCE(rfcp->rfc_next);
2013 if (rfcpn) {
2014 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2015 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2016 break;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002017 rfp->rcu_fwd_cb_head = rfcpn;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002018 n_launders++;
2019 n_launders_sa++;
2020 } else {
2021 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2022 if (WARN_ON_ONCE(!rfcp)) {
2023 schedule_timeout_interruptible(1);
2024 continue;
2025 }
2026 n_max_cbs++;
2027 n_launders_sa = 0;
2028 rfcp->rfc_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002029 rfcp->rfc_rfp = rfp;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002030 }
2031 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07002032 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07002033 if (tick_nohz_full_enabled()) {
2034 local_irq_save(flags);
2035 rcu_momentary_dyntick_idle();
2036 local_irq_restore(flags);
2037 }
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002038 }
2039 stoppedat = jiffies;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002040 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002041 cver = READ_ONCE(rcu_torture_current_version) - cver;
2042 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2043 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
Paul E. McKenney67641002019-11-06 08:20:20 -08002044 (void)rcu_torture_fwd_prog_cbfree(rfp);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002045
Paul E. McKenney60013d52019-07-10 08:30:00 -07002046 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2047 !shutdown_time_arrived()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002048 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2049 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2050 __func__,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002051 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002052 n_launders + n_max_cbs - n_launders_cb_snap,
2053 n_launders, n_launders_sa,
2054 n_max_gps, n_max_cbs, cver, gps);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002055 rcu_torture_fwd_cb_hist(rfp);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002056 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002057 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07002058 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07002059 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002060}
2061
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002062
2063/*
2064 * OOM notifier, but this only prints diagnostic information for the
2065 * current forward-progress test.
2066 */
2067static int rcutorture_oom_notify(struct notifier_block *self,
2068 unsigned long notused, void *nfreed)
2069{
Paul E. McKenney57f60202020-07-20 08:34:07 -07002070 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002071
Paul E. McKenney57f60202020-07-20 08:34:07 -07002072 mutex_lock(&rcu_fwd_mutex);
2073 rfp = rcu_fwds;
2074 if (!rfp) {
2075 mutex_unlock(&rcu_fwd_mutex);
2076 return NOTIFY_OK;
2077 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002078 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2079 __func__);
Paul E. McKenney67641002019-11-06 08:20:20 -08002080 rcu_torture_fwd_cb_hist(rfp);
2081 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002082 WRITE_ONCE(rcu_fwd_emergency_stop, true);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002083 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2084 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08002085 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002086 rcu_barrier();
2087 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08002088 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002089 rcu_barrier();
2090 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08002091 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07002092 smp_mb(); /* Frees before return to avoid redoing OOM. */
2093 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2094 pr_info("%s returning after OOM processing.\n", __func__);
Paul E. McKenney57f60202020-07-20 08:34:07 -07002095 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002096 return NOTIFY_OK;
2097}
2098
2099static struct notifier_block rcutorture_oom_nb = {
2100 .notifier_call = rcutorture_oom_notify
2101};
2102
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07002103/* Carry out grace-period forward-progress testing. */
2104static int rcu_torture_fwd_prog(void *args)
2105{
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08002106 struct rcu_fwd *rfp = args;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002107 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002108 int tested_tries = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002109
2110 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenney5ab7ab82018-09-21 18:08:09 -07002111 rcu_bind_current_to_nocb();
Paul E. McKenneyfecad502018-07-20 12:18:11 -07002112 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2113 set_user_nice(current, MAX_NICE);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002114 do {
2115 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002116 WRITE_ONCE(rcu_fwd_emergency_stop, false);
Paul E. McKenney43550802019-12-04 15:58:41 -08002117 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2118 rcu_inkernel_boot_has_ended())
2119 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2120 if (rcu_inkernel_boot_has_ended())
2121 rcu_torture_fwd_prog_cr(rfp);
Paul E. McKenney48718482018-08-15 15:32:51 -07002122
Paul E. McKenney1b272912018-07-18 14:32:31 -07002123 /* Avoid slow periods, better to test when busy. */
2124 stutter_wait("rcu_torture_fwd_prog");
2125 } while (!torture_must_stop());
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002126 /* Short runs might not contain a valid forward-progress attempt. */
2127 WARN_ON(!tested && tested_tries >= 5);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002128 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002129 torture_kthread_stopping("rcu_torture_fwd_prog");
2130 return 0;
2131}
2132
2133/* If forward-progress checking is requested and feasible, spawn the thread. */
2134static int __init rcu_torture_fwd_prog_init(void)
2135{
Paul E. McKenney5155be92019-11-06 08:35:08 -08002136 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002137
Paul E. McKenney1b272912018-07-18 14:32:31 -07002138 if (!fwd_progress)
2139 return 0; /* Not requested, so don't do it. */
Paul E. McKenney5ac7cdc2018-10-16 05:46:58 -07002140 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
2141 cur_ops == &rcu_busted_ops) {
Paul E. McKenney1b272912018-07-18 14:32:31 -07002142 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2143 return 0;
2144 }
2145 if (stall_cpu > 0) {
2146 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2147 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2148 return -EINVAL; /* In module, can fail back to user. */
2149 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2150 return 0;
2151 }
2152 if (fwd_progress_holdoff <= 0)
2153 fwd_progress_holdoff = 1;
2154 if (fwd_progress_div <= 0)
2155 fwd_progress_div = 4;
Paul E. McKenney5155be92019-11-06 08:35:08 -08002156 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2157 if (!rfp)
2158 return -ENOMEM;
2159 spin_lock_init(&rfp->rcu_fwd_lock);
2160 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002161 mutex_lock(&rcu_fwd_mutex);
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002162 rcu_fwds = rfp;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002163 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenney299c7d92020-07-22 10:45:12 -07002164 register_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney67641002019-11-06 08:20:20 -08002165 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002166}
2167
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002168static void rcu_torture_fwd_prog_cleanup(void)
2169{
2170 struct rcu_fwd *rfp;
2171
2172 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2173 rfp = rcu_fwds;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002174 mutex_lock(&rcu_fwd_mutex);
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002175 rcu_fwds = NULL;
Paul E. McKenney57f60202020-07-20 08:34:07 -07002176 mutex_unlock(&rcu_fwd_mutex);
Paul E. McKenney299c7d92020-07-22 10:45:12 -07002177 unregister_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002178 kfree(rfp);
2179}
2180
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002181/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05302182static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002183{
2184 atomic_inc(&barrier_cbs_invoked);
2185}
2186
Paul E. McKenney50d4b622020-02-04 15:00:56 -08002187/* IPI handler to get callback posted on desired CPU, if online. */
2188static void rcu_torture_barrier1cb(void *rcu_void)
2189{
2190 struct rcu_head *rhp = rcu_void;
2191
2192 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2193}
2194
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002195/* kthread function to register callbacks used to test RCU barriers. */
2196static int rcu_torture_barrier_cbs(void *arg)
2197{
2198 long myid = (long)arg;
Jules Irenge8f43d592020-06-01 19:45:48 +01002199 bool lastphase = false;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002200 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002201 struct rcu_head rcu;
2202
2203 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002204 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07002205 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002206 do {
2207 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002208 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002209 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002210 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002211 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002212 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002213 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002214 /*
2215 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07002216 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002217 */
Paul E. McKenney50d4b622020-02-04 15:00:56 -08002218 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2219 &rcu, 1)) {
2220 // IPI failed, so use direct call from current CPU.
2221 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2222 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002223 if (atomic_dec_and_test(&barrier_cbs_count))
2224 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002225 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07002226 if (cur_ops->cb_barrier != NULL)
2227 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002228 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002229 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002230 return 0;
2231}
2232
2233/* kthread function to drive and coordinate RCU barrier testing. */
2234static int rcu_torture_barrier(void *arg)
2235{
2236 int i;
2237
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002238 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002239 do {
2240 atomic_set(&barrier_cbs_invoked, 0);
2241 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002242 /* Ensure barrier_phase ordered after prior assignments. */
2243 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002244 for (i = 0; i < n_barrier_cbs; i++)
2245 wake_up(&barrier_cbs_wq[i]);
2246 wait_event(barrier_wq,
2247 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002248 torture_must_stop());
2249 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002250 break;
2251 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002252 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002253 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2254 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08002255 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2256 atomic_read(&barrier_cbs_invoked),
2257 n_barrier_cbs);
Paul E. McKenney9470a182020-02-05 12:54:34 -08002258 WARN_ON(1);
2259 // Wait manually for the remaining callbacks
2260 i = 0;
2261 do {
2262 if (WARN_ON(i++ > HZ))
2263 i = INT_MIN;
2264 schedule_timeout_interruptible(1);
2265 cur_ops->cb_barrier();
2266 } while (atomic_read(&barrier_cbs_invoked) !=
2267 n_barrier_cbs &&
2268 !torture_must_stop());
2269 smp_mb(); // Can't trust ordering if broken.
2270 if (!torture_must_stop())
2271 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2272 atomic_read(&barrier_cbs_invoked));
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002273 } else {
2274 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002275 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002276 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002277 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002278 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002279 return 0;
2280}
2281
2282/* Initialize RCU barrier testing. */
2283static int rcu_torture_barrier_init(void)
2284{
2285 int i;
2286 int ret;
2287
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07002288 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002289 return 0;
2290 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002291 pr_alert("%s" TORTURE_FLAG
2292 " Call or barrier ops missing for %s,\n",
2293 torture_type, cur_ops->name);
2294 pr_alert("%s" TORTURE_FLAG
2295 " RCU barrier testing omitted from run.\n",
2296 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002297 return 0;
2298 }
2299 atomic_set(&barrier_cbs_count, 0);
2300 atomic_set(&barrier_cbs_invoked, 0);
2301 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002302 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002303 GFP_KERNEL);
2304 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002305 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05002306 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002307 return -ENOMEM;
2308 for (i = 0; i < n_barrier_cbs; i++) {
2309 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002310 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2311 (void *)(long)i,
2312 barrier_cbs_tasks[i]);
2313 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002314 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002315 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002316 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002317}
2318
2319/* Clean up after RCU barrier testing. */
2320static void rcu_torture_barrier_cleanup(void)
2321{
2322 int i;
2323
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002324 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002325 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002326 for (i = 0; i < n_barrier_cbs; i++)
2327 torture_stop_kthread(rcu_torture_barrier_cbs,
2328 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002329 kfree(barrier_cbs_tasks);
2330 barrier_cbs_tasks = NULL;
2331 }
2332 if (barrier_cbs_wq != NULL) {
2333 kfree(barrier_cbs_wq);
2334 barrier_cbs_wq = NULL;
2335 }
2336}
2337
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002338static bool rcu_torture_can_boost(void)
2339{
2340 static int boost_warn_once;
2341 int prio;
2342
2343 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2344 return false;
2345
2346 prio = rcu_get_gp_kthreads_prio();
2347 if (!prio)
2348 return false;
2349
2350 if (prio < 2) {
2351 if (boost_warn_once == 1)
2352 return false;
2353
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002354 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002355 boost_warn_once = 1;
2356 return false;
2357 }
2358
2359 return true;
2360}
2361
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002362static bool read_exit_child_stop;
2363static bool read_exit_child_stopped;
2364static wait_queue_head_t read_exit_wq;
2365
2366// Child kthread which just does an rcutorture reader and exits.
2367static int rcu_torture_read_exit_child(void *trsp_in)
2368{
2369 struct torture_random_state *trsp = trsp_in;
2370
2371 set_user_nice(current, MAX_NICE);
2372 // Minimize time between reading and exiting.
2373 while (!kthread_should_stop())
2374 schedule_timeout_uninterruptible(1);
2375 (void)rcu_torture_one_read(trsp);
2376 return 0;
2377}
2378
2379// Parent kthread which creates and destroys read-exit child kthreads.
2380static int rcu_torture_read_exit(void *unused)
2381{
2382 int count = 0;
2383 bool errexit = false;
2384 int i;
2385 struct task_struct *tsp;
2386 DEFINE_TORTURE_RANDOM(trs);
2387
2388 // Allocate and initialize.
2389 set_user_nice(current, MAX_NICE);
2390 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2391
2392 // Each pass through this loop does one read-exit episode.
2393 do {
2394 if (++count > read_exit_burst) {
2395 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2396 rcu_barrier(); // Wait for task_struct free, avoid OOM.
2397 for (i = 0; i < read_exit_delay; i++) {
2398 schedule_timeout_uninterruptible(HZ);
2399 if (READ_ONCE(read_exit_child_stop))
2400 break;
2401 }
2402 if (!READ_ONCE(read_exit_child_stop))
2403 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2404 count = 0;
2405 }
2406 if (READ_ONCE(read_exit_child_stop))
2407 break;
2408 // Spawn child.
2409 tsp = kthread_run(rcu_torture_read_exit_child,
2410 &trs, "%s",
2411 "rcu_torture_read_exit_child");
2412 if (IS_ERR(tsp)) {
2413 VERBOSE_TOROUT_ERRSTRING("out of memory");
2414 errexit = true;
2415 tsp = NULL;
2416 break;
2417 }
2418 cond_resched();
2419 kthread_stop(tsp);
2420 n_read_exits ++;
2421 stutter_wait("rcu_torture_read_exit");
2422 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2423
2424 // Clean up and exit.
2425 smp_store_release(&read_exit_child_stopped, true); // After reaping.
2426 smp_mb(); // Store before wakeup.
2427 wake_up(&read_exit_wq);
2428 while (!torture_must_stop())
2429 schedule_timeout_uninterruptible(1);
2430 torture_kthread_stopping("rcu_torture_read_exit");
2431 return 0;
2432}
2433
2434static int rcu_torture_read_exit_init(void)
2435{
2436 if (read_exit_burst <= 0)
2437 return -EINVAL;
2438 init_waitqueue_head(&read_exit_wq);
2439 read_exit_child_stop = false;
2440 read_exit_child_stopped = false;
2441 return torture_create_kthread(rcu_torture_read_exit, NULL,
2442 read_exit_task);
2443}
2444
2445static void rcu_torture_read_exit_cleanup(void)
2446{
2447 if (!read_exit_task)
2448 return;
2449 WRITE_ONCE(read_exit_child_stop, true);
2450 smp_mb(); // Above write before wait.
2451 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2452 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2453}
2454
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002455static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002456
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002457static void
2458rcu_torture_cleanup(void)
2459{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002460 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07002461 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002462 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002463 int i;
2464
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002465 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08002466 if (cur_ops->cb_barrier != NULL)
2467 cur_ops->cb_barrier();
2468 return;
2469 }
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002470 if (!cur_ops) {
2471 torture_cleanup_end();
2472 return;
2473 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002474
Paul E. McKenneyf7a81b12019-06-25 13:32:51 -07002475 show_rcu_gp_kthreads();
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002476 rcu_torture_read_exit_cleanup();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002477 rcu_torture_barrier_cleanup();
Paul E. McKenneyc8fa6372020-07-19 14:40:31 -07002478 rcu_torture_fwd_prog_cleanup();
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002479 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002480 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002481
Josh Triplettc8e5b162007-05-08 00:33:20 -07002482 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002483 for (i = 0; i < nrealreaders; i++)
2484 torture_stop_kthread(rcu_torture_reader,
2485 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002486 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002487 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002488
Josh Triplettc8e5b162007-05-08 00:33:20 -07002489 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07002490 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002491 torture_stop_kthread(rcu_torture_fakewriter,
2492 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002493 }
2494 kfree(fakewriter_tasks);
2495 fakewriter_tasks = NULL;
2496 }
2497
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002498 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2499 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04002500 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2501 cur_ops->name, (long)gp_seq, flags,
2502 rcutorture_seq_diff(gp_seq, start_gp_seq));
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002503 torture_stop_kthread(rcu_torture_stats, stats_task);
2504 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002505 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002506 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002507
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002508 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002509 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002510 * cleanup operations.
2511 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002512 if (cur_ops->cb_barrier != NULL)
2513 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002514 if (cur_ops->cleanup != NULL)
2515 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002516
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002517 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002518
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002519 if (err_segs_recorded) {
2520 pr_alert("Failure/close-call rcutorture reader segments:\n");
2521 if (rt_read_nsegs == 0)
2522 pr_alert("\t: No segments recorded!!!\n");
2523 firsttime = 1;
2524 for (i = 0; i < rt_read_nsegs; i++) {
2525 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2526 if (err_segs[i].rt_delay_jiffies != 0) {
2527 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2528 err_segs[i].rt_delay_jiffies);
2529 firsttime = 0;
2530 }
2531 if (err_segs[i].rt_delay_ms != 0) {
2532 pr_cont("%s%ldms", firsttime ? "" : "+",
2533 err_segs[i].rt_delay_ms);
2534 firsttime = 0;
2535 }
2536 if (err_segs[i].rt_delay_us != 0) {
2537 pr_cont("%s%ldus", firsttime ? "" : "+",
2538 err_segs[i].rt_delay_us);
2539 firsttime = 0;
2540 }
2541 pr_cont("%s\n",
2542 err_segs[i].rt_preempted ? "preempted" : "");
2543
2544 }
2545 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002546 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002547 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08002548 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08002549 rcu_torture_print_module_parms(cur_ops,
2550 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08002551 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002552 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002553 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002554}
2555
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002556#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2557static void rcu_torture_leak_cb(struct rcu_head *rhp)
2558{
2559}
2560
2561static void rcu_torture_err_cb(struct rcu_head *rhp)
2562{
2563 /*
2564 * This -might- happen due to race conditions, but is unlikely.
2565 * The scenario that leads to this happening is that the
2566 * first of the pair of duplicate callbacks is queued,
2567 * someone else starts a grace period that includes that
2568 * callback, then the second of the pair must wait for the
2569 * next grace period. Unlikely, but can happen. If it
2570 * does happen, the debug-objects subsystem won't have splatted.
2571 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002572 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002573}
2574#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2575
2576/*
2577 * Verify that double-free causes debug-objects to complain, but only
2578 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2579 * cannot be carried out.
2580 */
2581static void rcu_test_debug_objects(void)
2582{
2583#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2584 struct rcu_head rh1;
2585 struct rcu_head rh2;
2586
2587 init_rcu_head_on_stack(&rh1);
2588 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002589 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002590
2591 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2592 preempt_disable(); /* Prevent preemption from interrupting test. */
2593 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2594 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2595 local_irq_disable(); /* Make it harder to start a new grace period. */
2596 call_rcu(&rh2, rcu_torture_leak_cb);
2597 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2598 local_irq_enable();
2599 rcu_read_unlock();
2600 preempt_enable();
2601
2602 /* Wait for them all to get done so we can safely return. */
2603 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002604 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002605 destroy_rcu_head_on_stack(&rh1);
2606 destroy_rcu_head_on_stack(&rh2);
2607#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002608 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002609#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2610}
2611
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002612static void rcutorture_sync(void)
2613{
2614 static unsigned long n;
2615
2616 if (cur_ops->sync && !(++n & 0xfff))
2617 cur_ops->sync();
2618}
2619
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002620static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002621rcu_torture_init(void)
2622{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002623 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002624 int cpu;
2625 int firsterr = 0;
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04002626 int flags = 0;
2627 unsigned long gp_seq = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002628 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07002629 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenneyc1a76c02020-03-10 10:32:30 -07002630 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2631 &tasks_tracing_ops, &trivial_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002632 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002633
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002634 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002635 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002636
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002637 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002638 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002639 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002640 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002641 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002642 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002643 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002644 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2645 torture_type);
2646 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002647 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002648 pr_cont(" %s", torture_ops[i]->name);
2649 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002650 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002651 firsterr = -EINVAL;
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002652 cur_ops = NULL;
Paul E. McKenney889d4872015-08-24 11:37:58 -07002653 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002654 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002655 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002656 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002657 fqs_duration = 0;
2658 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002659 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002660 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002661
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002662 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002663 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002664 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002665 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002666 if (nrealreaders <= 0)
2667 nrealreaders = 1;
2668 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002669 rcu_torture_print_module_parms(cur_ops, "Start of test");
Joel Fernandes (Google)959954d2020-06-18 16:29:55 -04002670 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2671 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2672 start_gp_seq = gp_seq;
2673 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
2674 cur_ops->name, (long)gp_seq, flags);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002675
2676 /* Set up the freelist. */
2677
2678 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002679 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002680 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002681 list_add_tail(&rcu_tortures[i].rtort_free,
2682 &rcu_torture_freelist);
2683 }
2684
2685 /* Initialize the statistics so that each run gets its own numbers. */
2686
2687 rcu_torture_current = NULL;
2688 rcu_torture_current_version = 0;
2689 atomic_set(&n_rcu_torture_alloc, 0);
2690 atomic_set(&n_rcu_torture_alloc_fail, 0);
2691 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002692 atomic_set(&n_rcu_torture_mberror, 0);
2693 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002694 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002695 n_rcu_torture_boost_ktrerror = 0;
2696 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002697 n_rcu_torture_boost_failure = 0;
2698 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002699 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2700 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002701 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002702 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2703 per_cpu(rcu_torture_count, cpu)[i] = 0;
2704 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2705 }
2706 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002707 err_segs_recorded = 0;
2708 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002709
2710 /* Start up the kthreads. */
2711
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002712 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2713 writer_task);
2714 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002715 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002716 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002717 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002718 sizeof(fakewriter_tasks[0]),
2719 GFP_KERNEL);
2720 if (fakewriter_tasks == NULL) {
2721 VERBOSE_TOROUT_ERRSTRING("out of memory");
2722 firsterr = -ENOMEM;
2723 goto unwind;
2724 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002725 }
2726 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002727 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2728 NULL, fakewriter_tasks[i]);
2729 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002730 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002731 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002732 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002733 GFP_KERNEL);
2734 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002735 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002736 firsterr = -ENOMEM;
2737 goto unwind;
2738 }
2739 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002740 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002741 reader_tasks[i]);
2742 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002743 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002744 }
2745 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002746 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2747 stats_task);
2748 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002749 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002750 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002751 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002752 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2753 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002754 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002755 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002756 if (stutter < 0)
2757 stutter = 0;
2758 if (stutter) {
Paul E. McKenneyff3bf922019-04-09 14:44:49 -07002759 int t;
2760
2761 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2762 firsterr = torture_stutter_init(stutter * HZ, t);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002763 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002764 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002765 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002766 if (fqs_duration < 0)
2767 fqs_duration = 0;
2768 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002769 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002770 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2771 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002772 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002773 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002774 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002775 if (test_boost_interval < 1)
2776 test_boost_interval = 1;
2777 if (test_boost_duration < 2)
2778 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002779 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002780
2781 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002782
2783 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2784 rcutorture_booster_init,
2785 rcutorture_booster_cleanup);
2786 if (firsterr < 0)
2787 goto unwind;
2788 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002789 }
Paul E. McKenney60013d52019-07-10 08:30:00 -07002790 shutdown_jiffies = jiffies + shutdown_secs * HZ;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002791 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2792 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002793 goto unwind;
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002794 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2795 rcutorture_sync);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002796 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002797 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002798 firsterr = rcu_torture_stall_init();
2799 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002800 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002801 firsterr = rcu_torture_fwd_prog_init();
2802 if (firsterr)
2803 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002804 firsterr = rcu_torture_barrier_init();
2805 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002806 goto unwind;
Paul E. McKenney4a5f1332020-04-24 11:21:40 -07002807 firsterr = rcu_torture_read_exit_init();
2808 if (firsterr)
2809 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002810 if (object_debug)
2811 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002812 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002813 return 0;
2814
2815unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002816 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002817 rcu_torture_cleanup();
2818 return firsterr;
2819}
2820
2821module_init(rcu_torture_init);
2822module_exit(rcu_torture_cleanup);