blob: 0b9ce9a006232c3a43ebfe9fda7b8c74a9b289de [file] [log] [blame]
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07003 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08004 *
Josh Triplettb772e1d2006-10-04 02:17:13 -07005 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -08006 *
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -07008 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -08009 *
10 * See also: Documentation/RCU/torture.txt
11 */
Paul E. McKenney60500032018-05-15 12:25:05 -070012
13#define pr_fmt(fmt) fmt
14
Paul E. McKenneya241ec62005-10-30 15:03:12 -080015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
23#include <linux/rcupdate.h>
24#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010026#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070027#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080029#include <linux/completion.h>
30#include <linux/moduleparam.h>
31#include <linux/percpu.h>
32#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080033#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070034#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080035#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080036#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080037#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070038#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070039#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080040#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070041#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080042#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070043#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070044#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070045#include <linux/sched/sysctl.h>
Paul E. McKenneye0aff972018-10-01 17:40:54 -070046#include <linux/oom.h>
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -070047#include <linux/tick.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048
Paul E. McKenney25c36322017-05-03 09:51:55 -070049#include "rcu.h"
50
Paul E. McKenneya241ec62005-10-30 15:03:12 -080051MODULE_LICENSE("GPL");
Paul E. McKenney2e24ce82019-01-17 10:16:42 -080052MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080053
Paul E. McKenney4102ada2013-10-08 20:23:47 -070054
Paul E. McKenney2397d072018-05-25 07:29:25 -070055/* Bits for ->extendables field, extendables param, and related definitions. */
56#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
57#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070058#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
59#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
60#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
61#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
62#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
63#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
64#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
65#define RCUTORTURE_MAX_EXTEND \
66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070068#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
69 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070070#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070071
Paul E. McKenney2397d072018-05-25 07:29:25 -070072torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080074torture_param(int, fqs_duration, 0,
75 "Duration of fqs bursts (us), 0 to disable");
76torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070078torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80torture_param(int, fwd_progress_holdoff, 60,
81 "Time between forward-progress tests (s)");
82torture_param(bool, fwd_progress_need_resched, 1,
83 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070084torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080085torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86torture_param(bool, gp_normal, false,
87 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070088torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080089torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
90torture_param(int, n_barrier_cbs, 0,
91 "# of callbacks/kthreads for barrier testing");
92torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
93torture_param(int, nreaders, -1, "Number of RCU reader threads");
94torture_param(int, object_debug, 0,
95 "Enable debug-object double call_rcu() testing");
96torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
97torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -070098 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -080099torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
100torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
101torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
102torture_param(int, stall_cpu_holdoff, 10,
103 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700104torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800105torture_param(int, stat_interval, 60,
106 "Number of seconds between stats printk()s");
107torture_param(int, stutter, 5, "Number of seconds to run/halt test");
108torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
109torture_param(int, test_boost_duration, 4,
110 "Duration of each boost test, seconds.");
111torture_param(int, test_boost_interval, 7,
112 "Interval between boost tests, seconds.");
113torture_param(bool, test_no_idle_hz, true,
114 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700115torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800116 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800117
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800118static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800119module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700120MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700121
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800122static int nrealreaders;
123static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700124static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800125static struct task_struct **reader_tasks;
126static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800127static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700128static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800129static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700130static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800131static struct task_struct **barrier_cbs_tasks;
132static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800133
134#define RCU_TORTURE_PIPE_LEN 10
135
136struct rcu_torture {
137 struct rcu_head rtort_rcu;
138 int rtort_pipe_count;
139 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800140 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800141};
142
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800143static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700144static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700145static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800146static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
147static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800148static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
149static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800150static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700151static atomic_t n_rcu_torture_alloc;
152static atomic_t n_rcu_torture_alloc_fail;
153static atomic_t n_rcu_torture_free;
154static atomic_t n_rcu_torture_mberror;
155static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800156static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700157static long n_rcu_torture_boost_ktrerror;
158static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700159static long n_rcu_torture_boost_failure;
160static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700161static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800162static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700163static long n_barrier_successes; /* did rcu_barrier test succeed? */
Josh Triplette3033732006-10-04 02:17:14 -0700164static struct list_head rcu_torture_removed;
Paul E. McKenney60013d52019-07-10 08:30:00 -0700165static unsigned long shutdown_jiffies;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800166
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800167static int rcu_torture_writer_state;
168#define RTWS_FIXED_DELAY 0
169#define RTWS_DELAY 1
170#define RTWS_REPLACE 2
171#define RTWS_DEF_FREE 3
172#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700173#define RTWS_COND_GET 5
174#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700175#define RTWS_SYNC 7
176#define RTWS_STUTTER 8
177#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800178static const char * const rcu_torture_writer_state_names[] = {
179 "RTWS_FIXED_DELAY",
180 "RTWS_DELAY",
181 "RTWS_REPLACE",
182 "RTWS_DEF_FREE",
183 "RTWS_EXP_SYNC",
184 "RTWS_COND_GET",
185 "RTWS_COND_SYNC",
186 "RTWS_SYNC",
187 "RTWS_STUTTER",
188 "RTWS_STOPPING",
189};
190
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700191/* Record reader segment types and duration for first failing read. */
192struct rt_read_seg {
193 int rt_readstate;
194 unsigned long rt_delay_jiffies;
195 unsigned long rt_delay_ms;
196 unsigned long rt_delay_us;
197 bool rt_preempted;
198};
199static int err_segs_recorded;
200static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
201static int rt_read_nsegs;
202
Paul E. McKenney18aff332015-11-17 13:35:28 -0800203static const char *rcu_torture_writer_state_getname(void)
204{
205 unsigned int i = READ_ONCE(rcu_torture_writer_state);
206
207 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
208 return "???";
209 return rcu_torture_writer_state_names[i];
210}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800211
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700212#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700213#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700214#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700215#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700216#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700217
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500218#ifdef CONFIG_RCU_TRACE
219static u64 notrace rcu_trace_clock_local(void)
220{
221 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700222
223 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500224 return ts;
225}
226#else /* #ifdef CONFIG_RCU_TRACE */
227static u64 notrace rcu_trace_clock_local(void)
228{
229 return 0ULL;
230}
231#endif /* #else #ifdef CONFIG_RCU_TRACE */
232
Paul E. McKenney60013d52019-07-10 08:30:00 -0700233/*
234 * Stop aggressive CPU-hog tests a bit before the end of the test in order
235 * to avoid interfering with test shutdown.
236 */
237static bool shutdown_time_arrived(void)
238{
239 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
240}
241
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700242static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400243static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700244 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800245static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700246static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800247static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
248static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
249static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700250
Paul E. McKenney48718482018-08-15 15:32:51 -0700251static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
252
Paul E. McKenney343e9092008-12-15 16:13:07 -0800253/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800254 * Allocate an element from the rcu_tortures pool.
255 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800256static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800257rcu_torture_alloc(void)
258{
259 struct list_head *p;
260
Ingo Molnaradac1662006-01-25 19:50:12 +0100261 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800262 if (list_empty(&rcu_torture_freelist)) {
263 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100264 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800265 return NULL;
266 }
267 atomic_inc(&n_rcu_torture_alloc);
268 p = rcu_torture_freelist.next;
269 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100270 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800271 return container_of(p, struct rcu_torture, rtort_free);
272}
273
274/*
275 * Free an element to the rcu_tortures pool.
276 */
277static void
278rcu_torture_free(struct rcu_torture *p)
279{
280 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100281 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800282 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100283 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800284}
285
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800286/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700287 * Operations vector for selecting different types of tests.
288 */
289
290struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800291 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700292 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700293 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700294 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700295 void (*read_delay)(struct torture_random_state *rrsp,
296 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700297 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700298 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700299 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700300 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700301 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700302 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700303 unsigned long (*get_state)(void);
304 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800305 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200306 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800307 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400308 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700309 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700310 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700311 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700312 int extendables;
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700313 int slow_gps;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400314 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700315};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700316
317static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700318
319/*
320 * Definitions for rcu torture testing.
321 */
322
Josh Tripletta49a4af2006-09-29 01:59:30 -0700323static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700324{
325 rcu_read_lock();
326 return 0;
327}
328
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700329static void
330rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700331{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700332 unsigned long started;
333 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700334 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700335 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700336 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700337
Josh Triplettb8d57a72009-09-08 15:54:35 -0700338 /* We want a short delay sometimes to make a reader delay the grace
339 * period, and we want a long delay occasionally to trigger
340 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700341
Paul E. McKenney102c14d2019-12-21 11:23:50 -0800342 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney48718482018-08-15 15:32:51 -0700343 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700344 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700345 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700346 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
347 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700348 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700349 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700350 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700351 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
352 started, completed);
353 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700354 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700355 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700356 rtrsp->rt_delay_us = shortdelay_us;
357 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800358 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700359 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700360 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700361 rtrsp->rt_preempted = true;
362 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700363}
364
Josh Tripletta49a4af2006-09-29 01:59:30 -0700365static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700366{
367 rcu_read_unlock();
368}
369
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700370/*
371 * Update callback in the pipe. This should be invoked after a grace period.
372 */
373static bool
374rcu_torture_pipe_update_one(struct rcu_torture *rp)
375{
376 int i;
377
Paul E. McKenney20248912019-12-21 10:41:48 -0800378 i = READ_ONCE(rp->rtort_pipe_count);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700379 if (i > RCU_TORTURE_PIPE_LEN)
380 i = RCU_TORTURE_PIPE_LEN;
381 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -0800382 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
383 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700384 rp->rtort_mbtest = 0;
385 return true;
386 }
387 return false;
388}
389
390/*
391 * Update all callbacks in the pipe. Suitable for synchronous grace-period
392 * primitives.
393 */
394static void
395rcu_torture_pipe_update(struct rcu_torture *old_rp)
396{
397 struct rcu_torture *rp;
398 struct rcu_torture *rp1;
399
400 if (old_rp)
401 list_add(&old_rp->rtort_free, &rcu_torture_removed);
402 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
403 if (rcu_torture_pipe_update_one(rp)) {
404 list_del(&rp->rtort_free);
405 rcu_torture_free(rp);
406 }
407 }
408}
409
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700410static void
411rcu_torture_cb(struct rcu_head *p)
412{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700413 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
414
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800415 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700416 /* Test is ending, just drop callbacks on the floor. */
417 /* The next initialization will pick up the pieces. */
418 return;
419 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700420 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700421 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700422 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700423 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700424}
425
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800426static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800427{
428 return 0;
429}
430
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700431static void rcu_torture_deferred_free(struct rcu_torture *p)
432{
433 call_rcu(&p->rtort_rcu, rcu_torture_cb);
434}
435
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700436static void rcu_sync_torture_init(void)
437{
438 INIT_LIST_HEAD(&rcu_torture_removed);
439}
440
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700441static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800442 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700443 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700444 .readlock = rcu_torture_read_lock,
445 .read_delay = rcu_read_delay,
446 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700447 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700448 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700449 .deferred_free = rcu_torture_deferred_free,
450 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700451 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700452 .get_state = get_state_synchronize_rcu,
453 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800454 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700455 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800456 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700457 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700458 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700459 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700460 .can_boost = rcu_can_boost(),
Paul E. McKenneyc0335742018-06-21 16:17:46 -0700461 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700462 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700463};
464
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700465/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800466 * Don't even think about trying any of these in real life!!!
467 * The names includes "busted", and they really means it!
468 * The only purpose of these functions is to provide a buggy RCU
469 * implementation to make sure that rcutorture correctly emits
470 * buggy-RCU error messages.
471 */
472static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
473{
474 /* This is a deliberate bug for testing purposes only! */
475 rcu_torture_cb(&p->rtort_rcu);
476}
477
478static void synchronize_rcu_busted(void)
479{
480 /* This is a deliberate bug for testing purposes only! */
481}
482
483static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800484call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800485{
486 /* This is a deliberate bug for testing purposes only! */
487 func(head);
488}
489
490static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800491 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800492 .init = rcu_sync_torture_init,
493 .readlock = rcu_torture_read_lock,
494 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
495 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700496 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800497 .deferred_free = rcu_busted_torture_deferred_free,
498 .sync = synchronize_rcu_busted,
499 .exp_sync = synchronize_rcu_busted,
500 .call = call_rcu_busted,
501 .cb_barrier = NULL,
502 .fqs = NULL,
503 .stats = NULL,
504 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700505 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800506};
507
508/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700509 * Definitions for srcu torture testing.
510 */
511
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800512DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700513static struct srcu_struct srcu_ctld;
514static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700515
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700516static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700517{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700518 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700519}
520
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700521static void
522srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700523{
524 long delay;
525 const long uspertick = 1000000 / HZ;
526 const long longdelay = 10;
527
528 /* We want there to be long-running readers, but not all the time. */
529
Paul E. McKenney51b11302014-01-27 11:49:39 -0800530 delay = torture_random(rrsp) %
531 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700532 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700533 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700534 rtrsp->rt_delay_jiffies = longdelay;
535 } else {
536 rcu_read_delay(rrsp, rtrsp);
537 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700538}
539
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700540static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700541{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700542 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700543}
544
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800545static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700546{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700547 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700548}
549
Lai Jiangshan9059c942012-03-19 16:12:14 +0800550static void srcu_torture_deferred_free(struct rcu_torture *rp)
551{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700552 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800553}
554
Josh Triplettb772e1d2006-10-04 02:17:13 -0700555static void srcu_torture_synchronize(void)
556{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700557 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700558}
559
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700560static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800561 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700562{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700563 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700564}
565
566static void srcu_torture_barrier(void)
567{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700568 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700569}
570
Joe Percheseea203f2014-07-14 09:16:15 -0400571static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700572{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700573 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700574}
575
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700576static void srcu_torture_synchronize_expedited(void)
577{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700578 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700579}
580
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700581static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800582 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800583 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700584 .readlock = srcu_torture_read_lock,
585 .read_delay = srcu_read_delay,
586 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700587 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800588 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700589 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700590 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700591 .call = srcu_torture_call,
592 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700593 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700594 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700595 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700596};
597
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700598static void srcu_torture_init(void)
599{
600 rcu_sync_torture_init();
601 WARN_ON(init_srcu_struct(&srcu_ctld));
602 srcu_ctlp = &srcu_ctld;
603}
604
605static void srcu_torture_cleanup(void)
606{
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800607 cleanup_srcu_struct(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700608 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
609}
610
611/* As above, but dynamically allocated. */
612static struct rcu_torture_ops srcud_ops = {
613 .ttype = SRCU_FLAVOR,
614 .init = srcu_torture_init,
615 .cleanup = srcu_torture_cleanup,
616 .readlock = srcu_torture_read_lock,
617 .read_delay = srcu_read_delay,
618 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700619 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700620 .deferred_free = srcu_torture_deferred_free,
621 .sync = srcu_torture_synchronize,
622 .exp_sync = srcu_torture_synchronize_expedited,
623 .call = srcu_torture_call,
624 .cb_barrier = srcu_torture_barrier,
625 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700626 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700627 .name = "srcud"
628};
629
Paul E. McKenney2397d072018-05-25 07:29:25 -0700630/* As above, but broken due to inappropriate reader extension. */
631static struct rcu_torture_ops busted_srcud_ops = {
632 .ttype = SRCU_FLAVOR,
633 .init = srcu_torture_init,
634 .cleanup = srcu_torture_cleanup,
635 .readlock = srcu_torture_read_lock,
636 .read_delay = rcu_read_delay,
637 .readunlock = srcu_torture_read_unlock,
638 .get_gp_seq = srcu_torture_completed,
639 .deferred_free = srcu_torture_deferred_free,
640 .sync = srcu_torture_synchronize,
641 .exp_sync = srcu_torture_synchronize_expedited,
642 .call = srcu_torture_call,
643 .cb_barrier = srcu_torture_barrier,
644 .stats = srcu_torture_stats,
645 .irq_capable = 1,
646 .extendables = RCUTORTURE_MAX_EXTEND,
647 .name = "busted_srcud"
648};
649
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700650/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700651 * Definitions for RCU-tasks torture testing.
652 */
653
654static int tasks_torture_read_lock(void)
655{
656 return 0;
657}
658
659static void tasks_torture_read_unlock(int idx)
660{
661}
662
663static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
664{
665 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
666}
667
668static struct rcu_torture_ops tasks_ops = {
669 .ttype = RCU_TASKS_FLAVOR,
670 .init = rcu_sync_torture_init,
671 .readlock = tasks_torture_read_lock,
672 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
673 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700674 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700675 .deferred_free = rcu_tasks_torture_deferred_free,
676 .sync = synchronize_rcu_tasks,
677 .exp_sync = synchronize_rcu_tasks,
678 .call = call_rcu_tasks,
679 .cb_barrier = rcu_barrier_tasks,
680 .fqs = NULL,
681 .stats = NULL,
682 .irq_capable = 1,
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700683 .slow_gps = 1,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700684 .name = "tasks"
685};
686
Paul E. McKenneyc682db52019-04-19 07:38:27 -0700687/*
688 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
689 * This implementation does not necessarily work well with CPU hotplug.
690 */
691
692static void synchronize_rcu_trivial(void)
693{
694 int cpu;
695
696 for_each_online_cpu(cpu) {
697 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
698 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
699 }
700}
701
702static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
703{
704 preempt_disable();
705 return 0;
706}
707
708static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
709{
710 preempt_enable();
711}
712
713static struct rcu_torture_ops trivial_ops = {
714 .ttype = RCU_TRIVIAL_FLAVOR,
715 .init = rcu_sync_torture_init,
716 .readlock = rcu_torture_read_lock_trivial,
717 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
718 .readunlock = rcu_torture_read_unlock_trivial,
719 .get_gp_seq = rcu_no_completed,
720 .sync = synchronize_rcu_trivial,
721 .exp_sync = synchronize_rcu_trivial,
722 .fqs = NULL,
723 .stats = NULL,
724 .irq_capable = 1,
725 .name = "trivial"
726};
727
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700728static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
729{
730 if (!cur_ops->gp_diff)
731 return new - old;
732 return cur_ops->gp_diff(new, old);
733}
734
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700735static bool __maybe_unused torturing_tasks(void)
736{
737 return cur_ops == &tasks_ops;
738}
739
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700740/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700741 * RCU torture priority-boost testing. Runs one real-time thread per
742 * CPU for moderate bursts, repeatedly registering RCU callbacks and
743 * spinning waiting for them to be invoked. If a given callback takes
744 * too long to be invoked, we assume that priority inversion has occurred.
745 */
746
747struct rcu_boost_inflight {
748 struct rcu_head rcu;
749 int inflight;
750};
751
752static void rcu_torture_boost_cb(struct rcu_head *head)
753{
754 struct rcu_boost_inflight *rbip =
755 container_of(head, struct rcu_boost_inflight, rcu);
756
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700757 /* Ensure RCU-core accesses precede clearing ->inflight */
758 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700759}
760
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700761static int old_rt_runtime = -1;
762
763static void rcu_torture_disable_rt_throttle(void)
764{
765 /*
766 * Disable RT throttling so that rcutorture's boost threads don't get
767 * throttled. Only possible if rcutorture is built-in otherwise the
768 * user should manually do this by setting the sched_rt_period_us and
769 * sched_rt_runtime sysctls.
770 */
771 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
772 return;
773
774 old_rt_runtime = sysctl_sched_rt_runtime;
775 sysctl_sched_rt_runtime = -1;
776}
777
778static void rcu_torture_enable_rt_throttle(void)
779{
780 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
781 return;
782
783 sysctl_sched_rt_runtime = old_rt_runtime;
784 old_rt_runtime = -1;
785}
786
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700787static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
788{
789 if (end - start > test_boost_duration * HZ - HZ / 2) {
790 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
791 n_rcu_torture_boost_failure++;
792
793 return true; /* failed */
794 }
795
796 return false; /* passed */
797}
798
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700799static int rcu_torture_boost(void *arg)
800{
801 unsigned long call_rcu_time;
802 unsigned long endtime;
803 unsigned long oldstarttime;
804 struct rcu_boost_inflight rbi = { .inflight = 0 };
805 struct sched_param sp;
806
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800807 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700808
809 /* Set real-time priority. */
810 sp.sched_priority = 1;
811 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800812 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700813 n_rcu_torture_boost_rterror++;
814 }
815
Paul E. McKenney561190e2011-03-30 09:10:44 -0700816 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700817 /* Each pass through the following loop does one boost-test cycle. */
818 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700819 /* Track if the test failed already in this test interval? */
820 bool failed = false;
821
822 /* Increment n_rcu_torture_boosts once per boost-test */
823 while (!kthread_should_stop()) {
824 if (mutex_trylock(&boost_mutex)) {
825 n_rcu_torture_boosts++;
826 mutex_unlock(&boost_mutex);
827 break;
828 }
829 schedule_timeout_uninterruptible(1);
830 }
831 if (kthread_should_stop())
832 goto checkwait;
833
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700834 /* Wait for the next test interval. */
835 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700836 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800837 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800838 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800839 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700840 goto checkwait;
841 }
842
843 /* Do one boost-test interval. */
844 endtime = oldstarttime + test_boost_duration * HZ;
845 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700846 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700847 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700848 if (!smp_load_acquire(&rbi.inflight)) {
849 /* RCU core before ->inflight = 1. */
850 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700851 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700852 /* Check if the boost test failed */
853 failed = failed ||
854 rcu_torture_boost_failed(call_rcu_time,
855 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700856 call_rcu_time = jiffies;
857 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800858 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800859 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700860 goto checkwait;
861 }
862
863 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700864 * If boost never happened, then inflight will always be 1, in
865 * this case the boost check would never happen in the above
866 * loop so do another one here.
867 */
868 if (!failed && smp_load_acquire(&rbi.inflight))
869 rcu_torture_boost_failed(call_rcu_time, jiffies);
870
871 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700872 * Set the start time of the next test interval.
873 * Yes, this is vulnerable to long delays, but such
874 * delays simply cause a false negative for the next
875 * interval. Besides, we are running at RT priority,
876 * so delays should be relatively rare.
877 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700878 while (oldstarttime == boost_starttime &&
879 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700880 if (mutex_trylock(&boost_mutex)) {
881 boost_starttime = jiffies +
882 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700883 mutex_unlock(&boost_mutex);
884 break;
885 }
886 schedule_timeout_uninterruptible(1);
887 }
888
889 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800890checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800891 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700892
893 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700894 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800895 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700896 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800897 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700898 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800899 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700900 return 0;
901}
902
903/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800904 * RCU torture force-quiescent-state kthread. Repeatedly induces
905 * bursts of calls to force_quiescent_state(), increasing the probability
906 * of occurrence of some important types of race conditions.
907 */
908static int
909rcu_torture_fqs(void *arg)
910{
911 unsigned long fqs_resume_time;
912 int fqs_burst_remaining;
913
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800914 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800915 do {
916 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700917 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
918 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800919 schedule_timeout_interruptible(1);
920 }
921 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700922 while (fqs_burst_remaining > 0 &&
923 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800924 cur_ops->fqs();
925 udelay(fqs_holdoff);
926 fqs_burst_remaining -= fqs_holdoff;
927 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800928 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800929 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800930 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800931 return 0;
932}
933
934/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800935 * RCU torture writer kthread. Repeatedly substitutes a new structure
936 * for that pointed to by rcu_torture_current, freeing the old structure
937 * after a series of grace periods (the "pipeline").
938 */
939static int
940rcu_torture_writer(void *arg)
941{
Paul E. McKenney9efafb82015-12-31 18:11:47 -0800942 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -0800943 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700944 unsigned long gp_snap;
945 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700946 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800947 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800948 struct rcu_torture *rp;
949 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800950 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700951 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
952 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700953 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800954
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800955 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800956 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800957 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800958 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800959 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800960
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700961 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -0800962 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700963 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800964 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700965 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800966 pr_info("%s: Testing conditional GPs.\n", __func__);
967 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800968 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800969 }
970 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700971 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800972 pr_info("%s: Testing expedited GPs.\n", __func__);
973 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800974 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800975 }
976 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700977 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800978 pr_info("%s: Testing asynchronous GPs.\n", __func__);
979 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800980 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800981 }
982 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700983 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800984 pr_info("%s: Testing normal GPs.\n", __func__);
985 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800986 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800987 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700988 if (WARN_ONCE(nsynctypes == 0,
989 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700990 /*
991 * No updates primitives, so don't try updating.
992 * The resulting test won't be testing much, hence the
993 * above WARN_ONCE().
994 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700995 rcu_torture_writer_state = RTWS_STOPPING;
996 torture_kthread_stopping("rcu_torture_writer");
997 }
998
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800999 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001000 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001001 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001002 rp = rcu_torture_alloc();
1003 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001004 continue;
1005 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001006 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001007 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001008 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001009 old_rp = rcu_dereference_check(rcu_torture_current,
1010 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001011 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001012 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001013 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001014 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001015 i = old_rp->rtort_pipe_count;
1016 if (i > RCU_TORTURE_PIPE_LEN)
1017 i = RCU_TORTURE_PIPE_LEN;
1018 atomic_inc(&rcu_torture_wcount[i]);
Paul E. McKenney20248912019-12-21 10:41:48 -08001019 WRITE_ONCE(old_rp->rtort_pipe_count,
1020 old_rp->rtort_pipe_count + 1);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001021 switch (synctype[torture_random(&rand) % nsynctypes]) {
1022 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001023 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001024 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001025 break;
1026 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001027 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001028 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001029 rcu_torture_pipe_update(old_rp);
1030 break;
1031 case RTWS_COND_GET:
1032 rcu_torture_writer_state = RTWS_COND_GET;
1033 gp_snap = cur_ops->get_state();
1034 i = torture_random(&rand) % 16;
1035 if (i != 0)
1036 schedule_timeout_interruptible(i);
1037 udelay(torture_random(&rand) % 1000);
1038 rcu_torture_writer_state = RTWS_COND_SYNC;
1039 cur_ops->cond_sync(gp_snap);
1040 rcu_torture_pipe_update(old_rp);
1041 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001042 case RTWS_SYNC:
1043 rcu_torture_writer_state = RTWS_SYNC;
1044 cur_ops->sync();
1045 rcu_torture_pipe_update(old_rp);
1046 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001047 default:
1048 WARN_ON_ONCE(1);
1049 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001050 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001051 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001052 WRITE_ONCE(rcu_torture_current_version,
1053 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001054 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1055 if (can_expedite &&
1056 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1057 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1058 if (expediting >= 0)
1059 rcu_expedite_gp();
1060 else
1061 rcu_unexpedite_gp();
1062 if (++expediting > 3)
1063 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001064 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1065 can_expedite = !rcu_gp_is_expedited() &&
1066 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001067 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001068 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001069 if (stutter_wait("rcu_torture_writer") &&
Paul E. McKenney5eabea52019-04-12 09:02:46 -07001070 !READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney3432d762019-04-15 14:50:05 -07001071 !cur_ops->slow_gps &&
Paul E. McKenney59ee0322019-11-28 18:54:06 -08001072 !torture_must_stop() &&
1073 rcu_inkernel_boot_has_ended())
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001074 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001075 if (list_empty(&rcu_tortures[i].rtort_free) &&
1076 rcu_access_pointer(rcu_torture_current) !=
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001077 &rcu_tortures[i]) {
1078 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001079 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001080 }
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001081 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001082 /* Reset expediting back to unexpedited. */
1083 if (expediting > 0)
1084 expediting = -expediting;
1085 while (can_expedite && expediting++ < 0)
1086 rcu_unexpedite_gp();
1087 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001088 if (!can_expedite)
1089 pr_alert("%s" TORTURE_FLAG
1090 " Dynamic grace-period expediting was disabled.\n",
1091 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001092 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001093 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001094 return 0;
1095}
1096
1097/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001098 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1099 * delay between calls.
1100 */
1101static int
1102rcu_torture_fakewriter(void *arg)
1103{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001104 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001105
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001106 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001107 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001108
1109 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001110 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1111 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001112 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001113 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001114 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001115 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001116 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001117 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001118 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001119 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001120 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001121 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001122 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001123 cur_ops->exp_sync();
1124 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001125 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001126 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001127
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001128 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001129 return 0;
1130}
1131
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001132static void rcu_torture_timer_cb(struct rcu_head *rhp)
1133{
1134 kfree(rhp);
1135}
1136
Josh Triplettb772e1d2006-10-04 02:17:13 -07001137/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001138 * Do one extension of an RCU read-side critical section using the
1139 * current reader state in readstate (set to zero for initial entry
1140 * to extended critical section), set the new state as specified by
1141 * newstate (set to zero for final exit from extended critical section),
1142 * and random-number-generator state in trsp. If this is neither the
1143 * beginning or end of the critical section and if there was actually a
1144 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001145 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001146static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001147 struct torture_random_state *trsp,
1148 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001149{
Paul E. McKenney2397d072018-05-25 07:29:25 -07001150 int idxnew = -1;
1151 int idxold = *readstate;
1152 int statesnew = ~*readstate & newstate;
1153 int statesold = *readstate & ~newstate;
1154
1155 WARN_ON_ONCE(idxold < 0);
1156 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001157 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001158
1159 /* First, put new protection in place to avoid critical-section gap. */
1160 if (statesnew & RCUTORTURE_RDR_BH)
1161 local_bh_disable();
1162 if (statesnew & RCUTORTURE_RDR_IRQ)
1163 local_irq_disable();
1164 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1165 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001166 if (statesnew & RCUTORTURE_RDR_RBH)
1167 rcu_read_lock_bh();
1168 if (statesnew & RCUTORTURE_RDR_SCHED)
1169 rcu_read_lock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001170 if (statesnew & RCUTORTURE_RDR_RCU)
1171 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1172
1173 /* Next, remove old protection, irq first due to bh conflict. */
1174 if (statesold & RCUTORTURE_RDR_IRQ)
1175 local_irq_enable();
1176 if (statesold & RCUTORTURE_RDR_BH)
1177 local_bh_enable();
1178 if (statesold & RCUTORTURE_RDR_PREEMPT)
1179 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001180 if (statesold & RCUTORTURE_RDR_RBH)
1181 rcu_read_unlock_bh();
1182 if (statesold & RCUTORTURE_RDR_SCHED)
1183 rcu_read_unlock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001184 if (statesold & RCUTORTURE_RDR_RCU)
1185 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1186
1187 /* Delay if neither beginning nor end and there was a change. */
1188 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001189 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001190
1191 /* Update the reader state. */
1192 if (idxnew == -1)
1193 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1194 WARN_ON_ONCE(idxnew < 0);
1195 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1196 *readstate = idxnew | newstate;
1197 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1198 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1199}
1200
1201/* Return the biggest extendables mask given current RCU and boot parameters. */
1202static int rcutorture_extend_mask_max(void)
1203{
1204 int mask;
1205
1206 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1207 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1208 mask = mask | RCUTORTURE_RDR_RCU;
1209 return mask;
1210}
1211
1212/* Return a random protection state mask, but with at least one bit set. */
1213static int
1214rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1215{
1216 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001217 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001218 unsigned long randmask2 = randmask1 >> 3;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001219
1220 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneya3b0e1e52019-02-28 15:06:13 -08001221 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001222 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001223 mask = mask & randmask2;
1224 else
1225 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001226 /* Can't enable bh w/irq disabled. */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001227 if ((mask & RCUTORTURE_RDR_IRQ) &&
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001228 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1229 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1230 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001231 return mask ?: RCUTORTURE_RDR_RCU;
1232}
1233
1234/*
1235 * Do a randomly selected number of extensions of an existing RCU read-side
1236 * critical section.
1237 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001238static struct rt_read_seg *
1239rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1240 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001241{
1242 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001243 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001244 int mask = rcutorture_extend_mask_max();
1245
1246 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1247 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001248 return rtrsp; /* Current RCU reader not extendable. */
1249 /* Bias towards larger numbers of loops. */
1250 i = (torture_random(trsp) >> 3);
1251 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1252 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001253 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001254 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001255 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001256 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001257}
1258
1259/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001260 * Do one read-side critical section, returning false if there was
1261 * no data to read. Can be invoked both from process context and
1262 * from a timer handler.
1263 */
1264static bool rcu_torture_one_read(struct torture_random_state *trsp)
1265{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001266 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001267 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001268 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001269 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001270 struct rcu_torture *p;
1271 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001272 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001273 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1274 struct rt_read_seg *rtrsp = &rtseg[0];
1275 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001276 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001277
Paul E. McKenney2397d072018-05-25 07:29:25 -07001278 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001279 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001280 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001281 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001282 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001283 rcu_read_lock_bh_held() ||
1284 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001285 srcu_read_lock_held(srcu_ctlp) ||
1286 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001287 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001288 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001289 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001290 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001291 }
1292 if (p->rtort_mbtest == 0)
1293 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001294 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001295 preempt_disable();
Paul E. McKenney20248912019-12-21 10:41:48 -08001296 pipe_count = READ_ONCE(p->rtort_pipe_count);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001297 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1298 /* Should not happen, but... */
1299 pipe_count = RCU_TORTURE_PIPE_LEN;
1300 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001301 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001302 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001303 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1304 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001305 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001306 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001307 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001308 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001309 if (completed > RCU_TORTURE_PIPE_LEN) {
1310 /* Should not happen, but... */
1311 completed = RCU_TORTURE_PIPE_LEN;
1312 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001313 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001314 preempt_enable();
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001315 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001316 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001317
1318 /* If error or close call, record the sequence of reader protections. */
1319 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1320 i = 0;
1321 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1322 err_segs[i++] = *rtrsp1;
1323 rt_read_nsegs = i;
1324 }
1325
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001326 return true;
1327}
1328
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001329static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1330
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001331/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001332 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1333 * incrementing the corresponding element of the pipeline array. The
1334 * counter in the element should never be greater than 1, otherwise, the
1335 * RCU implementation is broken.
1336 */
1337static void rcu_torture_timer(struct timer_list *unused)
1338{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001339 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001340 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001341
1342 /* Test call_rcu() invocation from interrupt handler. */
1343 if (cur_ops->call) {
1344 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1345
1346 if (rhp)
1347 cur_ops->call(rhp, rcu_torture_timer_cb);
1348 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001349}
1350
1351/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001352 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1353 * incrementing the corresponding element of the pipeline array. The
1354 * counter in the element should never be greater than 1, otherwise, the
1355 * RCU implementation is broken.
1356 */
1357static int
1358rcu_torture_reader(void *arg)
1359{
Paul E. McKenney444da512018-07-04 14:14:42 -07001360 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001361 long myid = (long)arg;
1362 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001363 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001364 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001365
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001366 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001367 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001368 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001369 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001370 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001371 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001372 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001373 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001374 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001375 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001376 if (!rcu_torture_one_read(&rand) && !torture_must_stop())
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001377 schedule_timeout_interruptible(HZ);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001378 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
Paul E. McKenney444da512018-07-04 14:14:42 -07001379 schedule_timeout_interruptible(1);
1380 lastsleep = jiffies + 10;
1381 }
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001382 while (num_online_cpus() < mynumonline && !torture_must_stop())
1383 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001384 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001385 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001386 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001387 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001388 destroy_timer_on_stack(&t);
1389 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001390 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001391 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001392 return 0;
1393}
1394
1395/*
Joe Percheseea203f2014-07-14 09:16:15 -04001396 * Print torture statistics. Caller must ensure that there is only
1397 * one call to this function at a given time!!! This is normally
1398 * accomplished by relying on the module system to only have one copy
1399 * of the module loaded, and then by giving the rcu_torture_stats
1400 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1401 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001402 */
Chen Gangd1008952013-11-07 10:30:25 +08001403static void
Joe Percheseea203f2014-07-14 09:16:15 -04001404rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001405{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001406 int cpu;
1407 int i;
1408 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1409 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001410 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001411 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001412 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001413
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001414 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001415 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Paul E. McKenneyf042a432020-01-03 16:27:00 -08001416 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1417 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001418 }
1419 }
1420 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1421 if (pipesummary[i] != 0)
1422 break;
1423 }
Joe Percheseea203f2014-07-14 09:16:15 -04001424
1425 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney354ea052019-05-25 12:36:53 -07001426 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
Joe Percheseea203f2014-07-14 09:16:15 -04001427 rcu_torture_current,
Paul E. McKenney4ab00bd2019-12-05 15:53:28 -08001428 rcu_torture_current && !rcu_stall_is_suppressed_at_boot()
1429 ? "ver" : "VER",
Joe Percheseea203f2014-07-14 09:16:15 -04001430 rcu_torture_current_version,
1431 list_empty(&rcu_torture_freelist),
1432 atomic_read(&n_rcu_torture_alloc),
1433 atomic_read(&n_rcu_torture_alloc_fail),
1434 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001435 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001436 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001437 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001438 n_rcu_torture_boost_ktrerror,
1439 n_rcu_torture_boost_rterror);
1440 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1441 n_rcu_torture_boost_failure,
1442 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001443 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001444 torture_onoff_stats();
Paul E. McKenneyfc6f9c52018-08-27 14:43:05 -07001445 pr_cont("barrier: %ld/%ld:%ld\n",
Joe Percheseea203f2014-07-14 09:16:15 -04001446 n_barrier_successes,
1447 n_barrier_attempts,
1448 n_rcu_torture_barrier_error);
1449
1450 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001451 if (atomic_read(&n_rcu_torture_mberror) ||
1452 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1453 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001454 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001455 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001456 atomic_inc(&n_rcu_torture_error);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001457 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1458 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1459 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1460 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1461 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1462 WARN_ON_ONCE(i > 1); // Too-short grace period
Paul E. McKenney996417d2005-11-18 01:10:50 -08001463 }
Joe Percheseea203f2014-07-14 09:16:15 -04001464 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001465 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001466 pr_cont(" %ld", pipesummary[i]);
1467 pr_cont("\n");
1468
1469 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1470 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001471 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001472 pr_cont(" %ld", batchsummary[i]);
1473 pr_cont("\n");
1474
1475 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1476 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001477 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001478 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001479 }
Joe Percheseea203f2014-07-14 09:16:15 -04001480 pr_cont("\n");
1481
Josh Triplettc8e5b162007-05-08 00:33:20 -07001482 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001483 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001484 if (rtcv_snap == rcu_torture_current_version &&
Paul E. McKenney58c53362019-12-05 11:29:01 -08001485 rcu_torture_current != NULL && !rcu_stall_is_suppressed()) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001486 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001487 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001488
1489 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001490 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001491 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001492 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001493 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001494 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001495 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001496 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001497 wtp == NULL ? ~0UL : wtp->state,
1498 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001499 if (!splatted && wtp) {
1500 sched_show_task(wtp);
1501 splatted = true;
1502 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001503 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001504 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001505 }
1506 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001507}
1508
1509/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001510 * Periodically prints torture statistics, if periodic statistics printing
1511 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001512 */
1513static int
1514rcu_torture_stats(void *arg)
1515{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001516 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001517 do {
1518 schedule_timeout_interruptible(stat_interval * HZ);
1519 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001520 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001521 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001522 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001523 return 0;
1524}
1525
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001526static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001527rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001528{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001529 pr_alert("%s" TORTURE_FLAG
1530 "--- %s: nreaders=%d nfakewriters=%d "
1531 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1532 "shuffle_interval=%d stutter=%d irqreader=%d "
1533 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1534 "test_boost=%d/%d test_boost_interval=%d "
1535 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001536 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001537 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001538 "onoff_interval=%d onoff_holdoff=%d\n",
1539 torture_type, tag, nrealreaders, nfakewriters,
1540 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1541 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1542 test_boost, cur_ops->can_boost,
1543 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001544 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001545 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001546 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001547}
1548
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001549static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001550{
1551 struct task_struct *t;
1552
1553 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001554 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001555 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001556 t = boost_tasks[cpu];
1557 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001558 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001559 mutex_unlock(&boost_mutex);
1560
1561 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001562 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001563 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001564}
1565
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001566static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001567{
1568 int retval;
1569
1570 if (boost_tasks[cpu] != NULL)
1571 return 0; /* Already created, nothing more to do. */
1572
1573 /* Don't allow time recalculation while creating a new task. */
1574 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001575 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001576 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001577 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1578 cpu_to_node(cpu),
1579 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001580 if (IS_ERR(boost_tasks[cpu])) {
1581 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001582 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001583 n_rcu_torture_boost_ktrerror++;
1584 boost_tasks[cpu] = NULL;
1585 mutex_unlock(&boost_mutex);
1586 return retval;
1587 }
1588 kthread_bind(boost_tasks[cpu], cpu);
1589 wake_up_process(boost_tasks[cpu]);
1590 mutex_unlock(&boost_mutex);
1591 return 0;
1592}
1593
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001594/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001595 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1596 * induces a CPU stall for the time specified by stall_cpu.
1597 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001598static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001599{
1600 unsigned long stop_at;
1601
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001602 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001603 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001604 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001605 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001606 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001607 }
1608 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001609 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001610 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001611 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001612 if (stall_cpu_irqsoff)
1613 local_irq_disable();
1614 else
1615 preempt_disable();
1616 pr_alert("rcu_torture_stall start on CPU %d.\n",
1617 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001618 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1619 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001620 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001621 if (stall_cpu_irqsoff)
1622 local_irq_enable();
1623 else
1624 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001625 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001626 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001627 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001628 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001629 while (!kthread_should_stop())
1630 schedule_timeout_interruptible(10 * HZ);
1631 return 0;
1632}
1633
1634/* Spawn CPU-stall kthread, if stall_cpu specified. */
1635static int __init rcu_torture_stall_init(void)
1636{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001637 if (stall_cpu <= 0)
1638 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001639 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001640}
1641
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001642/* State structure for forward-progress self-propagating RCU callback. */
1643struct fwd_cb_state {
1644 struct rcu_head rh;
1645 int stop;
1646};
1647
1648/*
1649 * Forward-progress self-propagating RCU callback function. Because
1650 * callbacks run from softirq, this function is an implicit RCU read-side
1651 * critical section.
1652 */
1653static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1654{
1655 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1656
1657 if (READ_ONCE(fcsp->stop)) {
1658 WRITE_ONCE(fcsp->stop, 2);
1659 return;
1660 }
1661 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1662}
1663
Paul E. McKenney48718482018-08-15 15:32:51 -07001664/* State for continuous-flood RCU callbacks. */
1665struct rcu_fwd_cb {
1666 struct rcu_head rh;
1667 struct rcu_fwd_cb *rfc_next;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001668 struct rcu_fwd *rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001669 int rfc_gps;
1670};
Paul E. McKenneya289e602019-11-05 08:31:56 -08001671
Paul E. McKenney48718482018-08-15 15:32:51 -07001672#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1673#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1674#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
Paul E. McKenney2e57bf92018-10-05 16:43:09 -07001675#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
Paul E. McKenneya289e602019-11-05 08:31:56 -08001676#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1677
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001678struct rcu_launder_hist {
1679 long n_launders;
1680 unsigned long launder_gp_seq;
1681};
Paul E. McKenney48718482018-08-15 15:32:51 -07001682
Paul E. McKenneya289e602019-11-05 08:31:56 -08001683struct rcu_fwd {
1684 spinlock_t rcu_fwd_lock;
1685 struct rcu_fwd_cb *rcu_fwd_cb_head;
1686 struct rcu_fwd_cb **rcu_fwd_cb_tail;
1687 long n_launders_cb;
1688 unsigned long rcu_fwd_startat;
1689 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1690 unsigned long rcu_launder_gp_seq_start;
1691};
1692
Paul E. McKenney5155be92019-11-06 08:35:08 -08001693struct rcu_fwd *rcu_fwds;
Paul E. McKenneya289e602019-11-05 08:31:56 -08001694bool rcu_fwd_emergency_stop;
Paul E. McKenney48718482018-08-15 15:32:51 -07001695
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001696static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001697{
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001698 unsigned long gps;
1699 unsigned long gps_old;
Paul E. McKenney1a682752018-10-03 12:33:41 -07001700 int i;
1701 int j;
1702
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001703 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1704 if (rfp->n_launders_hist[i].n_launders > 0)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001705 break;
Paul E. McKenney73d665b2018-10-04 10:54:22 -07001706 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001707 __func__, jiffies - rfp->rcu_fwd_startat);
1708 gps_old = rfp->rcu_launder_gp_seq_start;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001709 for (j = 0; j <= i; j++) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001710 gps = rfp->n_launders_hist[j].launder_gp_seq;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001711 pr_cont(" %ds/%d: %ld:%ld",
Paul E. McKenneya289e602019-11-05 08:31:56 -08001712 j + 1, FWD_CBS_HIST_DIV,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001713 rfp->n_launders_hist[j].n_launders,
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001714 rcutorture_seq_diff(gps, gps_old));
1715 gps_old = gps;
1716 }
Paul E. McKenney1a682752018-10-03 12:33:41 -07001717 pr_cont("\n");
1718}
1719
Paul E. McKenney48718482018-08-15 15:32:51 -07001720/* Callback function for continuous-flood RCU callbacks. */
1721static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1722{
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001723 unsigned long flags;
Paul E. McKenney48718482018-08-15 15:32:51 -07001724 int i;
1725 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1726 struct rcu_fwd_cb **rfcpp;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001727 struct rcu_fwd *rfp = rfcp->rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001728
1729 rfcp->rfc_next = NULL;
1730 rfcp->rfc_gps++;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001731 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1732 rfcpp = rfp->rcu_fwd_cb_tail;
1733 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
Paul E. McKenney48718482018-08-15 15:32:51 -07001734 WRITE_ONCE(*rfcpp, rfcp);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001735 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1736 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1737 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1738 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1739 rfp->n_launders_hist[i].n_launders++;
1740 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1741 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001742}
1743
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001744// Give the scheduler a chance, even on nohz_full CPUs.
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001745static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001746{
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02001747 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001748 // Real call_rcu() floods hit userspace, so emulate that.
1749 if (need_resched() || (iter & 0xfff))
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001750 schedule();
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001751 return;
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001752 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001753 // No userspace emulation: CB invocation throttles call_rcu()
1754 cond_resched();
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001755}
1756
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001757/*
1758 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1759 * test is over or because we hit an OOM event.
1760 */
Paul E. McKenney67641002019-11-06 08:20:20 -08001761static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001762{
1763 unsigned long flags;
1764 unsigned long freed = 0;
1765 struct rcu_fwd_cb *rfcp;
1766
1767 for (;;) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001768 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1769 rfcp = rfp->rcu_fwd_cb_head;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001770 if (!rfcp) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001771 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001772 break;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001773 }
Paul E. McKenney67641002019-11-06 08:20:20 -08001774 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1775 if (!rfp->rcu_fwd_cb_head)
1776 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1777 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001778 kfree(rfcp);
1779 freed++;
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001780 rcu_torture_fwd_prog_cond_resched(freed);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001781 if (tick_nohz_full_enabled()) {
1782 local_irq_save(flags);
1783 rcu_momentary_dyntick_idle();
1784 local_irq_restore(flags);
1785 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001786 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001787 return freed;
Paul E. McKenney48718482018-08-15 15:32:51 -07001788}
1789
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001790/* Carry out need_resched()/cond_resched() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001791static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1792 int *tested, int *tested_tries)
Paul E. McKenney1b272912018-07-18 14:32:31 -07001793{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001794 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001795 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001796 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001797 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001798 int idx;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001799 int sd;
1800 int sd4;
1801 bool selfpropcb = false;
1802 unsigned long stopat;
1803 static DEFINE_TORTURE_RANDOM(trs);
1804
1805 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1806 init_rcu_head_on_stack(&fcs.rh);
1807 selfpropcb = true;
1808 }
1809
1810 /* Tight loop containing cond_resched(). */
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001811 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1812 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001813 if (selfpropcb) {
1814 WRITE_ONCE(fcs.stop, 0);
1815 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1816 }
1817 cver = READ_ONCE(rcu_torture_current_version);
1818 gps = cur_ops->get_gp_seq();
1819 sd = cur_ops->stall_dur() + 1;
1820 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1821 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001822 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1823 stopat = rfp->rcu_fwd_startat + dur;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001824 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001825 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001826 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001827 idx = cur_ops->readlock();
1828 udelay(10);
1829 cur_ops->readunlock(idx);
1830 if (!fwd_progress_need_resched || need_resched())
Paul E. McKenneyfbbd5e32019-08-15 11:43:53 -07001831 cond_resched();
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001832 }
1833 (*tested_tries)++;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001834 if (!time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001835 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001836 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001837 (*tested)++;
1838 cver = READ_ONCE(rcu_torture_current_version) - cver;
1839 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1840 WARN_ON(!cver && gps < 2);
1841 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1842 }
1843 if (selfpropcb) {
1844 WRITE_ONCE(fcs.stop, 1);
1845 cur_ops->sync(); /* Wait for running CB to complete. */
1846 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1847 }
1848
1849 if (selfpropcb) {
1850 WARN_ON(READ_ONCE(fcs.stop) != 2);
1851 destroy_rcu_head_on_stack(&fcs.rh);
1852 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001853 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1854 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001855}
1856
1857/* Carry out call_rcu() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001858static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001859{
1860 unsigned long cver;
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001861 unsigned long flags;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001862 unsigned long gps;
1863 int i;
Paul E. McKenney48718482018-08-15 15:32:51 -07001864 long n_launders;
1865 long n_launders_cb_snap;
1866 long n_launders_sa;
1867 long n_max_cbs;
1868 long n_max_gps;
1869 struct rcu_fwd_cb *rfcp;
1870 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001871 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07001872 unsigned long stoppedat;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001873
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001874 if (READ_ONCE(rcu_fwd_emergency_stop))
1875 return; /* Get out of the way quickly, no GP wait! */
Paul E. McKenneyc682db52019-04-19 07:38:27 -07001876 if (!cur_ops->call)
1877 return; /* Can't do call_rcu() fwd prog without ->call. */
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001878
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001879 /* Loop continuously posting RCU callbacks. */
1880 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1881 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001882 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1883 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001884 n_launders = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001885 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001886 n_launders_sa = 0;
1887 n_max_cbs = 0;
1888 n_max_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001889 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
1890 rfp->n_launders_hist[i].n_launders = 0;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001891 cver = READ_ONCE(rcu_torture_current_version);
1892 gps = cur_ops->get_gp_seq();
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001893 rfp->rcu_launder_gp_seq_start = gps;
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001894 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001895 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001896 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001897 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001898 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001899 rfcpn = NULL;
1900 if (rfcp)
1901 rfcpn = READ_ONCE(rfcp->rfc_next);
1902 if (rfcpn) {
1903 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1904 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
1905 break;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001906 rfp->rcu_fwd_cb_head = rfcpn;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001907 n_launders++;
1908 n_launders_sa++;
1909 } else {
1910 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
1911 if (WARN_ON_ONCE(!rfcp)) {
1912 schedule_timeout_interruptible(1);
1913 continue;
1914 }
1915 n_max_cbs++;
1916 n_launders_sa = 0;
1917 rfcp->rfc_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001918 rfcp->rfc_rfp = rfp;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001919 }
1920 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001921 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001922 if (tick_nohz_full_enabled()) {
1923 local_irq_save(flags);
1924 rcu_momentary_dyntick_idle();
1925 local_irq_restore(flags);
1926 }
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001927 }
1928 stoppedat = jiffies;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001929 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001930 cver = READ_ONCE(rcu_torture_current_version) - cver;
1931 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1932 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
Paul E. McKenney67641002019-11-06 08:20:20 -08001933 (void)rcu_torture_fwd_prog_cbfree(rfp);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001934
Paul E. McKenney60013d52019-07-10 08:30:00 -07001935 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
1936 !shutdown_time_arrived()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001937 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
1938 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1939 __func__,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001940 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001941 n_launders + n_max_cbs - n_launders_cb_snap,
1942 n_launders, n_launders_sa,
1943 n_max_gps, n_max_cbs, cver, gps);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001944 rcu_torture_fwd_cb_hist(rfp);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001945 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001946 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001947 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001948 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001949}
1950
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001951
1952/*
1953 * OOM notifier, but this only prints diagnostic information for the
1954 * current forward-progress test.
1955 */
1956static int rcutorture_oom_notify(struct notifier_block *self,
1957 unsigned long notused, void *nfreed)
1958{
Paul E. McKenney5155be92019-11-06 08:35:08 -08001959 struct rcu_fwd *rfp = rcu_fwds;
Paul E. McKenney67641002019-11-06 08:20:20 -08001960
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001961 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1962 __func__);
Paul E. McKenney67641002019-11-06 08:20:20 -08001963 rcu_torture_fwd_cb_hist(rfp);
1964 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001965 WRITE_ONCE(rcu_fwd_emergency_stop, true);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001966 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1967 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001968 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001969 rcu_barrier();
1970 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001971 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001972 rcu_barrier();
1973 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001974 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001975 smp_mb(); /* Frees before return to avoid redoing OOM. */
1976 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
1977 pr_info("%s returning after OOM processing.\n", __func__);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001978 return NOTIFY_OK;
1979}
1980
1981static struct notifier_block rcutorture_oom_nb = {
1982 .notifier_call = rcutorture_oom_notify
1983};
1984
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001985/* Carry out grace-period forward-progress testing. */
1986static int rcu_torture_fwd_prog(void *args)
1987{
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001988 struct rcu_fwd *rfp = args;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001989 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001990 int tested_tries = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001991
1992 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenney5ab7ab82018-09-21 18:08:09 -07001993 rcu_bind_current_to_nocb();
Paul E. McKenneyfecad502018-07-20 12:18:11 -07001994 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1995 set_user_nice(current, MAX_NICE);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001996 do {
1997 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001998 WRITE_ONCE(rcu_fwd_emergency_stop, false);
1999 register_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney43550802019-12-04 15:58:41 -08002000 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2001 rcu_inkernel_boot_has_ended())
2002 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2003 if (rcu_inkernel_boot_has_ended())
2004 rcu_torture_fwd_prog_cr(rfp);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07002005 unregister_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney48718482018-08-15 15:32:51 -07002006
Paul E. McKenney1b272912018-07-18 14:32:31 -07002007 /* Avoid slow periods, better to test when busy. */
2008 stutter_wait("rcu_torture_fwd_prog");
2009 } while (!torture_must_stop());
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002010 /* Short runs might not contain a valid forward-progress attempt. */
2011 WARN_ON(!tested && tested_tries >= 5);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002012 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002013 torture_kthread_stopping("rcu_torture_fwd_prog");
2014 return 0;
2015}
2016
2017/* If forward-progress checking is requested and feasible, spawn the thread. */
2018static int __init rcu_torture_fwd_prog_init(void)
2019{
Paul E. McKenney5155be92019-11-06 08:35:08 -08002020 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002021
Paul E. McKenney1b272912018-07-18 14:32:31 -07002022 if (!fwd_progress)
2023 return 0; /* Not requested, so don't do it. */
Paul E. McKenney5ac7cdc2018-10-16 05:46:58 -07002024 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
2025 cur_ops == &rcu_busted_ops) {
Paul E. McKenney1b272912018-07-18 14:32:31 -07002026 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2027 return 0;
2028 }
2029 if (stall_cpu > 0) {
2030 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2031 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2032 return -EINVAL; /* In module, can fail back to user. */
2033 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2034 return 0;
2035 }
2036 if (fwd_progress_holdoff <= 0)
2037 fwd_progress_holdoff = 1;
2038 if (fwd_progress_div <= 0)
2039 fwd_progress_div = 4;
Paul E. McKenney5155be92019-11-06 08:35:08 -08002040 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2041 if (!rfp)
2042 return -ENOMEM;
2043 spin_lock_init(&rfp->rcu_fwd_lock);
2044 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
Paul E. McKenney67641002019-11-06 08:20:20 -08002045 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002046}
2047
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002048/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05302049static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002050{
2051 atomic_inc(&barrier_cbs_invoked);
2052}
2053
2054/* kthread function to register callbacks used to test RCU barriers. */
2055static int rcu_torture_barrier_cbs(void *arg)
2056{
2057 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07002058 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002059 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002060 struct rcu_head rcu;
2061
2062 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002063 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07002064 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002065 do {
2066 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002067 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002068 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002069 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002070 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002071 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002072 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002073 /*
2074 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07002075 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002076 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07002077 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002078 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07002079 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002080 if (atomic_dec_and_test(&barrier_cbs_count))
2081 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002082 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07002083 if (cur_ops->cb_barrier != NULL)
2084 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002085 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002086 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002087 return 0;
2088}
2089
2090/* kthread function to drive and coordinate RCU barrier testing. */
2091static int rcu_torture_barrier(void *arg)
2092{
2093 int i;
2094
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002095 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002096 do {
2097 atomic_set(&barrier_cbs_invoked, 0);
2098 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002099 /* Ensure barrier_phase ordered after prior assignments. */
2100 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002101 for (i = 0; i < n_barrier_cbs; i++)
2102 wake_up(&barrier_cbs_wq[i]);
2103 wait_event(barrier_wq,
2104 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002105 torture_must_stop());
2106 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002107 break;
2108 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002109 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002110 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2111 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08002112 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2113 atomic_read(&barrier_cbs_invoked),
2114 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002115 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002116 } else {
2117 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002118 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002119 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002120 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002121 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002122 return 0;
2123}
2124
2125/* Initialize RCU barrier testing. */
2126static int rcu_torture_barrier_init(void)
2127{
2128 int i;
2129 int ret;
2130
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07002131 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002132 return 0;
2133 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002134 pr_alert("%s" TORTURE_FLAG
2135 " Call or barrier ops missing for %s,\n",
2136 torture_type, cur_ops->name);
2137 pr_alert("%s" TORTURE_FLAG
2138 " RCU barrier testing omitted from run.\n",
2139 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002140 return 0;
2141 }
2142 atomic_set(&barrier_cbs_count, 0);
2143 atomic_set(&barrier_cbs_invoked, 0);
2144 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002145 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002146 GFP_KERNEL);
2147 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002148 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05002149 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002150 return -ENOMEM;
2151 for (i = 0; i < n_barrier_cbs; i++) {
2152 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002153 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2154 (void *)(long)i,
2155 barrier_cbs_tasks[i]);
2156 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002157 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002158 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002159 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002160}
2161
2162/* Clean up after RCU barrier testing. */
2163static void rcu_torture_barrier_cleanup(void)
2164{
2165 int i;
2166
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002167 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002168 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002169 for (i = 0; i < n_barrier_cbs; i++)
2170 torture_stop_kthread(rcu_torture_barrier_cbs,
2171 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002172 kfree(barrier_cbs_tasks);
2173 barrier_cbs_tasks = NULL;
2174 }
2175 if (barrier_cbs_wq != NULL) {
2176 kfree(barrier_cbs_wq);
2177 barrier_cbs_wq = NULL;
2178 }
2179}
2180
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002181static bool rcu_torture_can_boost(void)
2182{
2183 static int boost_warn_once;
2184 int prio;
2185
2186 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2187 return false;
2188
2189 prio = rcu_get_gp_kthreads_prio();
2190 if (!prio)
2191 return false;
2192
2193 if (prio < 2) {
2194 if (boost_warn_once == 1)
2195 return false;
2196
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002197 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002198 boost_warn_once = 1;
2199 return false;
2200 }
2201
2202 return true;
2203}
2204
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002205static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002206
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002207static void
2208rcu_torture_cleanup(void)
2209{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002210 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07002211 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002212 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002213 int i;
2214
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002215 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08002216 if (cur_ops->cb_barrier != NULL)
2217 cur_ops->cb_barrier();
2218 return;
2219 }
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002220 if (!cur_ops) {
2221 torture_cleanup_end();
2222 return;
2223 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002224
Paul E. McKenneyf7a81b12019-06-25 13:32:51 -07002225 show_rcu_gp_kthreads();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002226 rcu_torture_barrier_cleanup();
Paul E. McKenney1b272912018-07-18 14:32:31 -07002227 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002228 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002229 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002230
Josh Triplettc8e5b162007-05-08 00:33:20 -07002231 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002232 for (i = 0; i < nrealreaders; i++)
2233 torture_stop_kthread(rcu_torture_reader,
2234 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002235 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002236 }
2237 rcu_torture_current = NULL;
2238
Josh Triplettc8e5b162007-05-08 00:33:20 -07002239 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07002240 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002241 torture_stop_kthread(rcu_torture_fakewriter,
2242 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002243 }
2244 kfree(fakewriter_tasks);
2245 fakewriter_tasks = NULL;
2246 }
2247
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002248 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2249 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2250 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2251 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002252 torture_stop_kthread(rcu_torture_stats, stats_task);
2253 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002254 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002255 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002256
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002257 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002258 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002259 * cleanup operations.
2260 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002261 if (cur_ops->cb_barrier != NULL)
2262 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002263 if (cur_ops->cleanup != NULL)
2264 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002265
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002266 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002267
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002268 if (err_segs_recorded) {
2269 pr_alert("Failure/close-call rcutorture reader segments:\n");
2270 if (rt_read_nsegs == 0)
2271 pr_alert("\t: No segments recorded!!!\n");
2272 firsttime = 1;
2273 for (i = 0; i < rt_read_nsegs; i++) {
2274 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2275 if (err_segs[i].rt_delay_jiffies != 0) {
2276 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2277 err_segs[i].rt_delay_jiffies);
2278 firsttime = 0;
2279 }
2280 if (err_segs[i].rt_delay_ms != 0) {
2281 pr_cont("%s%ldms", firsttime ? "" : "+",
2282 err_segs[i].rt_delay_ms);
2283 firsttime = 0;
2284 }
2285 if (err_segs[i].rt_delay_us != 0) {
2286 pr_cont("%s%ldus", firsttime ? "" : "+",
2287 err_segs[i].rt_delay_us);
2288 firsttime = 0;
2289 }
2290 pr_cont("%s\n",
2291 err_segs[i].rt_preempted ? "preempted" : "");
2292
2293 }
2294 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002295 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002296 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08002297 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08002298 rcu_torture_print_module_parms(cur_ops,
2299 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08002300 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002301 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002302 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002303}
2304
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002305#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2306static void rcu_torture_leak_cb(struct rcu_head *rhp)
2307{
2308}
2309
2310static void rcu_torture_err_cb(struct rcu_head *rhp)
2311{
2312 /*
2313 * This -might- happen due to race conditions, but is unlikely.
2314 * The scenario that leads to this happening is that the
2315 * first of the pair of duplicate callbacks is queued,
2316 * someone else starts a grace period that includes that
2317 * callback, then the second of the pair must wait for the
2318 * next grace period. Unlikely, but can happen. If it
2319 * does happen, the debug-objects subsystem won't have splatted.
2320 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002321 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002322}
2323#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2324
2325/*
2326 * Verify that double-free causes debug-objects to complain, but only
2327 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2328 * cannot be carried out.
2329 */
2330static void rcu_test_debug_objects(void)
2331{
2332#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2333 struct rcu_head rh1;
2334 struct rcu_head rh2;
2335
2336 init_rcu_head_on_stack(&rh1);
2337 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002338 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002339
2340 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2341 preempt_disable(); /* Prevent preemption from interrupting test. */
2342 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2343 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2344 local_irq_disable(); /* Make it harder to start a new grace period. */
2345 call_rcu(&rh2, rcu_torture_leak_cb);
2346 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2347 local_irq_enable();
2348 rcu_read_unlock();
2349 preempt_enable();
2350
2351 /* Wait for them all to get done so we can safely return. */
2352 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002353 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002354 destroy_rcu_head_on_stack(&rh1);
2355 destroy_rcu_head_on_stack(&rh2);
2356#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002357 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002358#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2359}
2360
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002361static void rcutorture_sync(void)
2362{
2363 static unsigned long n;
2364
2365 if (cur_ops->sync && !(++n & 0xfff))
2366 cur_ops->sync();
2367}
2368
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002369static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002370rcu_torture_init(void)
2371{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002372 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002373 int cpu;
2374 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002375 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07002376 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenneyc682db52019-04-19 07:38:27 -07002377 &busted_srcud_ops, &tasks_ops, &trivial_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002378 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002379
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002380 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002381 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002382
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002383 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002384 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002385 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002386 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002387 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002388 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002389 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002390 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2391 torture_type);
2392 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002393 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002394 pr_cont(" %s", torture_ops[i]->name);
2395 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002396 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002397 firsterr = -EINVAL;
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002398 cur_ops = NULL;
Paul E. McKenney889d4872015-08-24 11:37:58 -07002399 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002400 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002401 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002402 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002403 fqs_duration = 0;
2404 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002405 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002406 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002407
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002408 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002409 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002410 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002411 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002412 if (nrealreaders <= 0)
2413 nrealreaders = 1;
2414 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002415 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002416
2417 /* Set up the freelist. */
2418
2419 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002420 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002421 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002422 list_add_tail(&rcu_tortures[i].rtort_free,
2423 &rcu_torture_freelist);
2424 }
2425
2426 /* Initialize the statistics so that each run gets its own numbers. */
2427
2428 rcu_torture_current = NULL;
2429 rcu_torture_current_version = 0;
2430 atomic_set(&n_rcu_torture_alloc, 0);
2431 atomic_set(&n_rcu_torture_alloc_fail, 0);
2432 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002433 atomic_set(&n_rcu_torture_mberror, 0);
2434 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002435 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002436 n_rcu_torture_boost_ktrerror = 0;
2437 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002438 n_rcu_torture_boost_failure = 0;
2439 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002440 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2441 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002442 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002443 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2444 per_cpu(rcu_torture_count, cpu)[i] = 0;
2445 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2446 }
2447 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002448 err_segs_recorded = 0;
2449 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002450
2451 /* Start up the kthreads. */
2452
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002453 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2454 writer_task);
2455 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002456 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002457 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002458 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002459 sizeof(fakewriter_tasks[0]),
2460 GFP_KERNEL);
2461 if (fakewriter_tasks == NULL) {
2462 VERBOSE_TOROUT_ERRSTRING("out of memory");
2463 firsterr = -ENOMEM;
2464 goto unwind;
2465 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002466 }
2467 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002468 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2469 NULL, fakewriter_tasks[i]);
2470 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002471 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002472 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002473 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002474 GFP_KERNEL);
2475 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002476 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002477 firsterr = -ENOMEM;
2478 goto unwind;
2479 }
2480 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002481 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002482 reader_tasks[i]);
2483 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002484 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002485 }
2486 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002487 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2488 stats_task);
2489 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002490 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002491 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002492 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002493 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2494 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002495 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002496 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002497 if (stutter < 0)
2498 stutter = 0;
2499 if (stutter) {
Paul E. McKenneyff3bf922019-04-09 14:44:49 -07002500 int t;
2501
2502 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2503 firsterr = torture_stutter_init(stutter * HZ, t);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002504 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002505 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002506 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002507 if (fqs_duration < 0)
2508 fqs_duration = 0;
2509 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002510 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002511 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2512 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002513 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002514 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002515 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002516 if (test_boost_interval < 1)
2517 test_boost_interval = 1;
2518 if (test_boost_duration < 2)
2519 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002520 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002521
2522 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002523
2524 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2525 rcutorture_booster_init,
2526 rcutorture_booster_cleanup);
2527 if (firsterr < 0)
2528 goto unwind;
2529 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002530 }
Paul E. McKenney60013d52019-07-10 08:30:00 -07002531 shutdown_jiffies = jiffies + shutdown_secs * HZ;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002532 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2533 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002534 goto unwind;
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002535 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2536 rcutorture_sync);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002537 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002538 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002539 firsterr = rcu_torture_stall_init();
2540 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002541 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002542 firsterr = rcu_torture_fwd_prog_init();
2543 if (firsterr)
2544 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002545 firsterr = rcu_torture_barrier_init();
2546 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002547 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002548 if (object_debug)
2549 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002550 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002551 return 0;
2552
2553unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002554 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002555 rcu_torture_cleanup();
2556 return firsterr;
2557}
2558
2559module_init(rcu_torture_init);
2560module_exit(rcu_torture_cleanup);