blob: dee7b45b21860844aed975c7be0474e5ee199474 [file] [log] [blame]
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07002 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenneyf5604f62014-02-26 06:38:59 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenneya241ec62005-10-30 15:03:12 -080017 *
Josh Triplettb772e1d2006-10-04 02:17:13 -070018 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -080019 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -070021 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080022 *
23 * See also: Documentation/RCU/torture.txt
24 */
Paul E. McKenney60500032018-05-15 12:25:05 -070025
26#define pr_fmt(fmt) fmt
27
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/kthread.h>
33#include <linux/err.h>
34#include <linux/spinlock.h>
35#include <linux/smp.h>
36#include <linux/rcupdate.h>
37#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010038#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010039#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080041#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080042#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080046#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070047#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080049#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080050#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070051#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070052#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080053#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070054#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080055#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070056#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070057#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070058#include <linux/sched/sysctl.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080059
Paul E. McKenney25c36322017-05-03 09:51:55 -070060#include "rcu.h"
61
Paul E. McKenneya241ec62005-10-30 15:03:12 -080062MODULE_LICENSE("GPL");
Josh Triplette0198b292014-07-30 16:08:42 -070063MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080064
Paul E. McKenney4102ada2013-10-08 20:23:47 -070065
Paul E. McKenney2397d072018-05-25 07:29:25 -070066/* Bits for ->extendables field, extendables param, and related definitions. */
67#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
68#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
69#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
70#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
71#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
72#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
Paul E. McKenneybf1bef52018-06-10 08:50:09 -070073#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
Paul E. McKenney2397d072018-05-25 07:29:25 -070074#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
75 RCUTORTURE_RDR_PREEMPT)
76#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
77 /* Must be power of two minus one. */
78
Paul E. McKenney38706bc2014-08-18 21:12:17 -070079torture_param(int, cbflood_inter_holdoff, HZ,
80 "Holdoff between floods (jiffies)");
81torture_param(int, cbflood_intra_holdoff, 1,
82 "Holdoff between bursts (jiffies)");
83torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
84torture_param(int, cbflood_n_per_burst, 20000,
85 "# callbacks per burst in flood");
Paul E. McKenney2397d072018-05-25 07:29:25 -070086torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
87 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080088torture_param(int, fqs_duration, 0,
89 "Duration of fqs bursts (us), 0 to disable");
90torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
91torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070092torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
93torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
94torture_param(int, fwd_progress_holdoff, 60,
95 "Time between forward-progress tests (s)");
96torture_param(bool, fwd_progress_need_resched, 1,
97 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070098torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080099torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
100torture_param(bool, gp_normal, false,
101 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700102torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800103torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
104torture_param(int, n_barrier_cbs, 0,
105 "# of callbacks/kthreads for barrier testing");
106torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
107torture_param(int, nreaders, -1, "Number of RCU reader threads");
108torture_param(int, object_debug, 0,
109 "Enable debug-object double call_rcu() testing");
110torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
111torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -0700112 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800113torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
114torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
115torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
116torture_param(int, stall_cpu_holdoff, 10,
117 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700118torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800119torture_param(int, stat_interval, 60,
120 "Number of seconds between stats printk()s");
121torture_param(int, stutter, 5, "Number of seconds to run/halt test");
122torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
123torture_param(int, test_boost_duration, 4,
124 "Duration of each boost test, seconds.");
125torture_param(int, test_boost_interval, 7,
126 "Interval between boost tests, seconds.");
127torture_param(bool, test_no_idle_hz, true,
128 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700129torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800130 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800131
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800132static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800133module_param(torture_type, charp, 0444);
Paul E. McKenneyd10453e2013-06-13 15:12:24 -0700134MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700135
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800136static int nrealreaders;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700137static int ncbflooders;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800138static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700139static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800140static struct task_struct **reader_tasks;
141static struct task_struct *stats_task;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700142static struct task_struct **cbflood_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800143static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700144static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800145static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700146static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800147static struct task_struct **barrier_cbs_tasks;
148static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800149
150#define RCU_TORTURE_PIPE_LEN 10
151
152struct rcu_torture {
153 struct rcu_head rtort_rcu;
154 int rtort_pipe_count;
155 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800156 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800157};
158
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800159static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700160static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700161static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800162static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
163static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800164static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
165static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800166static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700167static atomic_t n_rcu_torture_alloc;
168static atomic_t n_rcu_torture_alloc_fail;
169static atomic_t n_rcu_torture_free;
170static atomic_t n_rcu_torture_mberror;
171static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800172static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700173static long n_rcu_torture_boost_ktrerror;
174static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700175static long n_rcu_torture_boost_failure;
176static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700177static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800178static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700179static long n_barrier_successes; /* did rcu_barrier test succeed? */
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700180static atomic_long_t n_cbfloods;
Josh Triplette3033732006-10-04 02:17:14 -0700181static struct list_head rcu_torture_removed;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800182
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800183static int rcu_torture_writer_state;
184#define RTWS_FIXED_DELAY 0
185#define RTWS_DELAY 1
186#define RTWS_REPLACE 2
187#define RTWS_DEF_FREE 3
188#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700189#define RTWS_COND_GET 5
190#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700191#define RTWS_SYNC 7
192#define RTWS_STUTTER 8
193#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800194static const char * const rcu_torture_writer_state_names[] = {
195 "RTWS_FIXED_DELAY",
196 "RTWS_DELAY",
197 "RTWS_REPLACE",
198 "RTWS_DEF_FREE",
199 "RTWS_EXP_SYNC",
200 "RTWS_COND_GET",
201 "RTWS_COND_SYNC",
202 "RTWS_SYNC",
203 "RTWS_STUTTER",
204 "RTWS_STOPPING",
205};
206
207static const char *rcu_torture_writer_state_getname(void)
208{
209 unsigned int i = READ_ONCE(rcu_torture_writer_state);
210
211 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
212 return "???";
213 return rcu_torture_writer_state_names[i];
214}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800215
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700216#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700217#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700218#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700219#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700220#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700221
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500222#ifdef CONFIG_RCU_TRACE
223static u64 notrace rcu_trace_clock_local(void)
224{
225 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700226
227 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500228 return ts;
229}
230#else /* #ifdef CONFIG_RCU_TRACE */
231static u64 notrace rcu_trace_clock_local(void)
232{
233 return 0ULL;
234}
235#endif /* #else #ifdef CONFIG_RCU_TRACE */
236
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700237static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400238static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700239 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800240static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700241static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800242static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
243static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
244static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700245
Paul E. McKenney343e9092008-12-15 16:13:07 -0800246/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800247 * Allocate an element from the rcu_tortures pool.
248 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800249static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800250rcu_torture_alloc(void)
251{
252 struct list_head *p;
253
Ingo Molnaradac1662006-01-25 19:50:12 +0100254 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800255 if (list_empty(&rcu_torture_freelist)) {
256 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100257 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800258 return NULL;
259 }
260 atomic_inc(&n_rcu_torture_alloc);
261 p = rcu_torture_freelist.next;
262 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100263 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800264 return container_of(p, struct rcu_torture, rtort_free);
265}
266
267/*
268 * Free an element to the rcu_tortures pool.
269 */
270static void
271rcu_torture_free(struct rcu_torture *p)
272{
273 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100274 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800275 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100276 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800277}
278
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800279/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700280 * Operations vector for selecting different types of tests.
281 */
282
283struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800284 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700285 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700286 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700287 int (*readlock)(void);
Paul E. McKenney51b11302014-01-27 11:49:39 -0800288 void (*read_delay)(struct torture_random_state *rrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700289 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700290 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700291 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700292 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700293 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700294 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700295 unsigned long (*get_state)(void);
296 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800297 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200298 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800299 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400300 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700301 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700302 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700303 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700304 int extendables;
305 int ext_irq_conflict;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400306 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700307};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700308
309static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700310
311/*
312 * Definitions for rcu torture testing.
313 */
314
Josh Tripletta49a4af2006-09-29 01:59:30 -0700315static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700316{
317 rcu_read_lock();
318 return 0;
319}
320
Paul E. McKenney51b11302014-01-27 11:49:39 -0800321static void rcu_read_delay(struct torture_random_state *rrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700322{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700323 unsigned long started;
324 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700325 const unsigned long shortdelay_us = 200;
326 const unsigned long longdelay_ms = 50;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700327 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700328
Josh Triplettb8d57a72009-09-08 15:54:35 -0700329 /* We want a short delay sometimes to make a reader delay the grace
330 * period, and we want a long delay occasionally to trigger
331 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700332
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700333 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700334 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700335 ts = rcu_trace_clock_local();
Josh Triplettb8d57a72009-09-08 15:54:35 -0700336 mdelay(longdelay_ms);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700337 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700338 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
339 started, completed);
340 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800341 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
Josh Triplettb8d57a72009-09-08 15:54:35 -0700342 udelay(shortdelay_us);
Paul E. McKenney51b11302014-01-27 11:49:39 -0800343 if (!preempt_count() &&
Paul E. McKenneye8302732017-10-16 11:23:42 -0700344 !(torture_random(rrsp) % (nrealreaders * 500)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700345 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700346}
347
Josh Tripletta49a4af2006-09-29 01:59:30 -0700348static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700349{
350 rcu_read_unlock();
351}
352
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700353/*
354 * Update callback in the pipe. This should be invoked after a grace period.
355 */
356static bool
357rcu_torture_pipe_update_one(struct rcu_torture *rp)
358{
359 int i;
360
361 i = rp->rtort_pipe_count;
362 if (i > RCU_TORTURE_PIPE_LEN)
363 i = RCU_TORTURE_PIPE_LEN;
364 atomic_inc(&rcu_torture_wcount[i]);
365 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
366 rp->rtort_mbtest = 0;
367 return true;
368 }
369 return false;
370}
371
372/*
373 * Update all callbacks in the pipe. Suitable for synchronous grace-period
374 * primitives.
375 */
376static void
377rcu_torture_pipe_update(struct rcu_torture *old_rp)
378{
379 struct rcu_torture *rp;
380 struct rcu_torture *rp1;
381
382 if (old_rp)
383 list_add(&old_rp->rtort_free, &rcu_torture_removed);
384 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
385 if (rcu_torture_pipe_update_one(rp)) {
386 list_del(&rp->rtort_free);
387 rcu_torture_free(rp);
388 }
389 }
390}
391
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700392static void
393rcu_torture_cb(struct rcu_head *p)
394{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700395 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
396
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800397 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700398 /* Test is ending, just drop callbacks on the floor. */
399 /* The next initialization will pick up the pieces. */
400 return;
401 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700402 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700403 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700404 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700405 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700406}
407
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800408static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800409{
410 return 0;
411}
412
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700413static void rcu_torture_deferred_free(struct rcu_torture *p)
414{
415 call_rcu(&p->rtort_rcu, rcu_torture_cb);
416}
417
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700418static void rcu_sync_torture_init(void)
419{
420 INIT_LIST_HEAD(&rcu_torture_removed);
421}
422
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700423static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800424 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700425 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700426 .readlock = rcu_torture_read_lock,
427 .read_delay = rcu_read_delay,
428 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700429 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700430 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700431 .deferred_free = rcu_torture_deferred_free,
432 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700433 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700434 .get_state = get_state_synchronize_rcu,
435 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800436 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700437 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800438 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700439 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700440 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700441 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700442 .can_boost = rcu_can_boost(),
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700443 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700444};
445
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700446/*
447 * Definitions for rcu_bh torture testing.
448 */
449
Josh Tripletta49a4af2006-09-29 01:59:30 -0700450static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700451{
452 rcu_read_lock_bh();
453 return 0;
454}
455
Josh Tripletta49a4af2006-09-29 01:59:30 -0700456static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700457{
458 rcu_read_unlock_bh();
459}
460
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700461static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
462{
463 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
464}
465
466static struct rcu_torture_ops rcu_bh_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800467 .ttype = RCU_BH_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700468 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700469 .readlock = rcu_bh_torture_read_lock,
470 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
471 .readunlock = rcu_bh_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700472 .get_gp_seq = rcu_bh_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700473 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700474 .deferred_free = rcu_bh_torture_deferred_free,
Paul E. McKenneybdf2a432011-06-07 16:59:35 -0700475 .sync = synchronize_rcu_bh,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700476 .exp_sync = synchronize_rcu_bh_expedited,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800477 .call = call_rcu_bh,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700478 .cb_barrier = rcu_barrier_bh,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800479 .fqs = rcu_bh_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700480 .stats = NULL,
481 .irq_capable = 1,
Paul E. McKenney2397d072018-05-25 07:29:25 -0700482 .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
483 .ext_irq_conflict = RCUTORTURE_RDR_RCU,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700484 .name = "rcu_bh"
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700485};
486
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700487/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800488 * Don't even think about trying any of these in real life!!!
489 * The names includes "busted", and they really means it!
490 * The only purpose of these functions is to provide a buggy RCU
491 * implementation to make sure that rcutorture correctly emits
492 * buggy-RCU error messages.
493 */
494static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
495{
496 /* This is a deliberate bug for testing purposes only! */
497 rcu_torture_cb(&p->rtort_rcu);
498}
499
500static void synchronize_rcu_busted(void)
501{
502 /* This is a deliberate bug for testing purposes only! */
503}
504
505static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800506call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800507{
508 /* This is a deliberate bug for testing purposes only! */
509 func(head);
510}
511
512static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800513 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800514 .init = rcu_sync_torture_init,
515 .readlock = rcu_torture_read_lock,
516 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
517 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700518 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800519 .deferred_free = rcu_busted_torture_deferred_free,
520 .sync = synchronize_rcu_busted,
521 .exp_sync = synchronize_rcu_busted,
522 .call = call_rcu_busted,
523 .cb_barrier = NULL,
524 .fqs = NULL,
525 .stats = NULL,
526 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700527 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800528};
529
530/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700531 * Definitions for srcu torture testing.
532 */
533
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800534DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700535static struct srcu_struct srcu_ctld;
536static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700537
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700538static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700539{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700540 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700541}
542
Paul E. McKenney51b11302014-01-27 11:49:39 -0800543static void srcu_read_delay(struct torture_random_state *rrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700544{
545 long delay;
546 const long uspertick = 1000000 / HZ;
547 const long longdelay = 10;
548
549 /* We want there to be long-running readers, but not all the time. */
550
Paul E. McKenney51b11302014-01-27 11:49:39 -0800551 delay = torture_random(rrsp) %
552 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700553 if (!delay && in_task())
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700554 schedule_timeout_interruptible(longdelay);
Lai Jiangshane546f482010-06-21 16:57:42 +0800555 else
556 rcu_read_delay(rrsp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700557}
558
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700559static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700560{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700561 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700562}
563
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800564static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700565{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700566 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700567}
568
Lai Jiangshan9059c942012-03-19 16:12:14 +0800569static void srcu_torture_deferred_free(struct rcu_torture *rp)
570{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700571 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800572}
573
Josh Triplettb772e1d2006-10-04 02:17:13 -0700574static void srcu_torture_synchronize(void)
575{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700576 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700577}
578
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700579static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800580 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700581{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700582 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700583}
584
585static void srcu_torture_barrier(void)
586{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700587 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700588}
589
Joe Percheseea203f2014-07-14 09:16:15 -0400590static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700591{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700592 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700593}
594
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700595static void srcu_torture_synchronize_expedited(void)
596{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700597 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700598}
599
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700600static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800601 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800602 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700603 .readlock = srcu_torture_read_lock,
604 .read_delay = srcu_read_delay,
605 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700606 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800607 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700608 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700609 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700610 .call = srcu_torture_call,
611 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700612 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700613 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700614 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700615};
616
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700617static void srcu_torture_init(void)
618{
619 rcu_sync_torture_init();
620 WARN_ON(init_srcu_struct(&srcu_ctld));
621 srcu_ctlp = &srcu_ctld;
622}
623
624static void srcu_torture_cleanup(void)
625{
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700626 static DEFINE_TORTURE_RANDOM(rand);
627
628 if (torture_random(&rand) & 0x800)
629 cleanup_srcu_struct(&srcu_ctld);
630 else
631 cleanup_srcu_struct_quiesced(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700632 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
633}
634
635/* As above, but dynamically allocated. */
636static struct rcu_torture_ops srcud_ops = {
637 .ttype = SRCU_FLAVOR,
638 .init = srcu_torture_init,
639 .cleanup = srcu_torture_cleanup,
640 .readlock = srcu_torture_read_lock,
641 .read_delay = srcu_read_delay,
642 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700643 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700644 .deferred_free = srcu_torture_deferred_free,
645 .sync = srcu_torture_synchronize,
646 .exp_sync = srcu_torture_synchronize_expedited,
647 .call = srcu_torture_call,
648 .cb_barrier = srcu_torture_barrier,
649 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700650 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700651 .name = "srcud"
652};
653
Paul E. McKenney2397d072018-05-25 07:29:25 -0700654/* As above, but broken due to inappropriate reader extension. */
655static struct rcu_torture_ops busted_srcud_ops = {
656 .ttype = SRCU_FLAVOR,
657 .init = srcu_torture_init,
658 .cleanup = srcu_torture_cleanup,
659 .readlock = srcu_torture_read_lock,
660 .read_delay = rcu_read_delay,
661 .readunlock = srcu_torture_read_unlock,
662 .get_gp_seq = srcu_torture_completed,
663 .deferred_free = srcu_torture_deferred_free,
664 .sync = srcu_torture_synchronize,
665 .exp_sync = srcu_torture_synchronize_expedited,
666 .call = srcu_torture_call,
667 .cb_barrier = srcu_torture_barrier,
668 .stats = srcu_torture_stats,
669 .irq_capable = 1,
670 .extendables = RCUTORTURE_MAX_EXTEND,
671 .name = "busted_srcud"
672};
673
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700674/*
675 * Definitions for sched torture testing.
676 */
677
678static int sched_torture_read_lock(void)
679{
680 preempt_disable();
681 return 0;
682}
683
684static void sched_torture_read_unlock(int idx)
685{
686 preempt_enable();
687}
688
Paul E. McKenney23269742008-05-12 21:21:05 +0200689static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
690{
691 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
692}
693
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700694static struct rcu_torture_ops sched_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800695 .ttype = RCU_SCHED_FLAVOR,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700696 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700697 .readlock = sched_torture_read_lock,
698 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
699 .readunlock = sched_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700700 .get_gp_seq = rcu_sched_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700701 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700702 .deferred_free = rcu_sched_torture_deferred_free,
Paul E. McKenneybdf2a432011-06-07 16:59:35 -0700703 .sync = synchronize_sched,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700704 .exp_sync = synchronize_sched_expedited,
Paul E. McKenney24560052015-05-30 10:11:24 -0700705 .get_state = get_state_synchronize_sched,
706 .cond_sync = cond_synchronize_sched,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700707 .call = call_rcu_sched,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700708 .cb_barrier = rcu_barrier_sched,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800709 .fqs = rcu_sched_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700710 .stats = NULL,
711 .irq_capable = 1,
Paul E. McKenney2397d072018-05-25 07:29:25 -0700712 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700713 .name = "sched"
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700714};
715
Paul E. McKenney69c60452014-07-01 11:59:36 -0700716/*
717 * Definitions for RCU-tasks torture testing.
718 */
719
720static int tasks_torture_read_lock(void)
721{
722 return 0;
723}
724
725static void tasks_torture_read_unlock(int idx)
726{
727}
728
729static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
730{
731 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
732}
733
734static struct rcu_torture_ops tasks_ops = {
735 .ttype = RCU_TASKS_FLAVOR,
736 .init = rcu_sync_torture_init,
737 .readlock = tasks_torture_read_lock,
738 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
739 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700740 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700741 .deferred_free = rcu_tasks_torture_deferred_free,
742 .sync = synchronize_rcu_tasks,
743 .exp_sync = synchronize_rcu_tasks,
744 .call = call_rcu_tasks,
745 .cb_barrier = rcu_barrier_tasks,
746 .fqs = NULL,
747 .stats = NULL,
748 .irq_capable = 1,
749 .name = "tasks"
750};
751
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700752static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
753{
754 if (!cur_ops->gp_diff)
755 return new - old;
756 return cur_ops->gp_diff(new, old);
757}
758
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700759static bool __maybe_unused torturing_tasks(void)
760{
761 return cur_ops == &tasks_ops;
762}
763
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700764/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700765 * RCU torture priority-boost testing. Runs one real-time thread per
766 * CPU for moderate bursts, repeatedly registering RCU callbacks and
767 * spinning waiting for them to be invoked. If a given callback takes
768 * too long to be invoked, we assume that priority inversion has occurred.
769 */
770
771struct rcu_boost_inflight {
772 struct rcu_head rcu;
773 int inflight;
774};
775
776static void rcu_torture_boost_cb(struct rcu_head *head)
777{
778 struct rcu_boost_inflight *rbip =
779 container_of(head, struct rcu_boost_inflight, rcu);
780
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700781 /* Ensure RCU-core accesses precede clearing ->inflight */
782 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700783}
784
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700785static int old_rt_runtime = -1;
786
787static void rcu_torture_disable_rt_throttle(void)
788{
789 /*
790 * Disable RT throttling so that rcutorture's boost threads don't get
791 * throttled. Only possible if rcutorture is built-in otherwise the
792 * user should manually do this by setting the sched_rt_period_us and
793 * sched_rt_runtime sysctls.
794 */
795 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
796 return;
797
798 old_rt_runtime = sysctl_sched_rt_runtime;
799 sysctl_sched_rt_runtime = -1;
800}
801
802static void rcu_torture_enable_rt_throttle(void)
803{
804 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
805 return;
806
807 sysctl_sched_rt_runtime = old_rt_runtime;
808 old_rt_runtime = -1;
809}
810
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700811static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
812{
813 if (end - start > test_boost_duration * HZ - HZ / 2) {
814 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
815 n_rcu_torture_boost_failure++;
816
817 return true; /* failed */
818 }
819
820 return false; /* passed */
821}
822
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700823static int rcu_torture_boost(void *arg)
824{
825 unsigned long call_rcu_time;
826 unsigned long endtime;
827 unsigned long oldstarttime;
828 struct rcu_boost_inflight rbi = { .inflight = 0 };
829 struct sched_param sp;
830
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800831 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700832
833 /* Set real-time priority. */
834 sp.sched_priority = 1;
835 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800836 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700837 n_rcu_torture_boost_rterror++;
838 }
839
Paul E. McKenney561190e2011-03-30 09:10:44 -0700840 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700841 /* Each pass through the following loop does one boost-test cycle. */
842 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700843 /* Track if the test failed already in this test interval? */
844 bool failed = false;
845
846 /* Increment n_rcu_torture_boosts once per boost-test */
847 while (!kthread_should_stop()) {
848 if (mutex_trylock(&boost_mutex)) {
849 n_rcu_torture_boosts++;
850 mutex_unlock(&boost_mutex);
851 break;
852 }
853 schedule_timeout_uninterruptible(1);
854 }
855 if (kthread_should_stop())
856 goto checkwait;
857
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700858 /* Wait for the next test interval. */
859 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700860 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800861 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800862 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800863 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700864 goto checkwait;
865 }
866
867 /* Do one boost-test interval. */
868 endtime = oldstarttime + test_boost_duration * HZ;
869 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700870 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700871 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700872 if (!smp_load_acquire(&rbi.inflight)) {
873 /* RCU core before ->inflight = 1. */
874 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700875 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700876 /* Check if the boost test failed */
877 failed = failed ||
878 rcu_torture_boost_failed(call_rcu_time,
879 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700880 call_rcu_time = jiffies;
881 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800882 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800883 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700884 goto checkwait;
885 }
886
887 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700888 * If boost never happened, then inflight will always be 1, in
889 * this case the boost check would never happen in the above
890 * loop so do another one here.
891 */
892 if (!failed && smp_load_acquire(&rbi.inflight))
893 rcu_torture_boost_failed(call_rcu_time, jiffies);
894
895 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700896 * Set the start time of the next test interval.
897 * Yes, this is vulnerable to long delays, but such
898 * delays simply cause a false negative for the next
899 * interval. Besides, we are running at RT priority,
900 * so delays should be relatively rare.
901 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700902 while (oldstarttime == boost_starttime &&
903 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700904 if (mutex_trylock(&boost_mutex)) {
905 boost_starttime = jiffies +
906 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700907 mutex_unlock(&boost_mutex);
908 break;
909 }
910 schedule_timeout_uninterruptible(1);
911 }
912
913 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800914checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800915 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700916
917 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700918 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800919 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700920 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800921 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700922 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800923 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700924 return 0;
925}
926
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700927static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
928{
929}
930
931/*
932 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
933 * to call_rcu() or analogous, increasing the probability of occurrence
934 * of callback-overflow corner cases.
935 */
936static int
937rcu_torture_cbflood(void *arg)
938{
939 int err = 1;
940 int i;
941 int j;
942 struct rcu_head *rhp;
943
944 if (cbflood_n_per_burst > 0 &&
945 cbflood_inter_holdoff > 0 &&
946 cbflood_intra_holdoff > 0 &&
947 cur_ops->call &&
948 cur_ops->cb_barrier) {
Kees Cook42bc47b2018-06-12 14:27:11 -0700949 rhp = vmalloc(array3_size(cbflood_n_burst,
950 cbflood_n_per_burst,
951 sizeof(*rhp)));
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700952 err = !rhp;
953 }
954 if (err) {
955 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
Paul E. McKenney3a0af332015-06-22 18:11:31 -0700956 goto wait_for_stop;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700957 }
958 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
959 do {
960 schedule_timeout_interruptible(cbflood_inter_holdoff);
961 atomic_long_inc(&n_cbfloods);
962 WARN_ON(signal_pending(current));
963 for (i = 0; i < cbflood_n_burst; i++) {
964 for (j = 0; j < cbflood_n_per_burst; j++) {
965 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
966 rcu_torture_cbflood_cb);
967 }
968 schedule_timeout_interruptible(cbflood_intra_holdoff);
969 WARN_ON(signal_pending(current));
970 }
971 cur_ops->cb_barrier();
972 stutter_wait("rcu_torture_cbflood");
973 } while (!torture_must_stop());
Paul E. McKenneyb8969d12014-10-27 15:52:04 -0700974 vfree(rhp);
Paul E. McKenney3a0af332015-06-22 18:11:31 -0700975wait_for_stop:
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700976 torture_kthread_stopping("rcu_torture_cbflood");
977 return 0;
978}
979
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700980/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800981 * RCU torture force-quiescent-state kthread. Repeatedly induces
982 * bursts of calls to force_quiescent_state(), increasing the probability
983 * of occurrence of some important types of race conditions.
984 */
985static int
986rcu_torture_fqs(void *arg)
987{
988 unsigned long fqs_resume_time;
989 int fqs_burst_remaining;
990
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800991 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800992 do {
993 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700994 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
995 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800996 schedule_timeout_interruptible(1);
997 }
998 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700999 while (fqs_burst_remaining > 0 &&
1000 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001001 cur_ops->fqs();
1002 udelay(fqs_holdoff);
1003 fqs_burst_remaining -= fqs_holdoff;
1004 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001005 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001006 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001007 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001008 return 0;
1009}
1010
1011/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001012 * RCU torture writer kthread. Repeatedly substitutes a new structure
1013 * for that pointed to by rcu_torture_current, freeing the old structure
1014 * after a series of grace periods (the "pipeline").
1015 */
1016static int
1017rcu_torture_writer(void *arg)
1018{
Paul E. McKenney9efafb82015-12-31 18:11:47 -08001019 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001020 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001021 unsigned long gp_snap;
1022 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001023 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001024 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001025 struct rcu_torture *rp;
1026 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001027 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001028 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1029 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001030 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001031
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001032 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001033 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001034 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001035 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001036 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001037
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001038 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -08001039 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001040 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001041 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001042 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001043 pr_info("%s: Testing conditional GPs.\n", __func__);
1044 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001045 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001046 }
1047 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001048 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001049 pr_info("%s: Testing expedited GPs.\n", __func__);
1050 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001051 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001052 }
1053 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001054 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001055 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1056 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001057 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001058 }
1059 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001060 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001061 pr_info("%s: Testing normal GPs.\n", __func__);
1062 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001063 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001064 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001065 if (WARN_ONCE(nsynctypes == 0,
1066 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001067 /*
1068 * No updates primitives, so don't try updating.
1069 * The resulting test won't be testing much, hence the
1070 * above WARN_ONCE().
1071 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001072 rcu_torture_writer_state = RTWS_STOPPING;
1073 torture_kthread_stopping("rcu_torture_writer");
1074 }
1075
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001076 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001077 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001078 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001079 rp = rcu_torture_alloc();
1080 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001081 continue;
1082 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001083 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001084 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001085 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001086 old_rp = rcu_dereference_check(rcu_torture_current,
1087 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001088 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001089 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001090 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001091 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001092 i = old_rp->rtort_pipe_count;
1093 if (i > RCU_TORTURE_PIPE_LEN)
1094 i = RCU_TORTURE_PIPE_LEN;
1095 atomic_inc(&rcu_torture_wcount[i]);
1096 old_rp->rtort_pipe_count++;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001097 switch (synctype[torture_random(&rand) % nsynctypes]) {
1098 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001099 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001100 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001101 break;
1102 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001103 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001104 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001105 rcu_torture_pipe_update(old_rp);
1106 break;
1107 case RTWS_COND_GET:
1108 rcu_torture_writer_state = RTWS_COND_GET;
1109 gp_snap = cur_ops->get_state();
1110 i = torture_random(&rand) % 16;
1111 if (i != 0)
1112 schedule_timeout_interruptible(i);
1113 udelay(torture_random(&rand) % 1000);
1114 rcu_torture_writer_state = RTWS_COND_SYNC;
1115 cur_ops->cond_sync(gp_snap);
1116 rcu_torture_pipe_update(old_rp);
1117 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001118 case RTWS_SYNC:
1119 rcu_torture_writer_state = RTWS_SYNC;
1120 cur_ops->sync();
1121 rcu_torture_pipe_update(old_rp);
1122 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001123 default:
1124 WARN_ON_ONCE(1);
1125 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001126 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001127 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001128 WRITE_ONCE(rcu_torture_current_version,
1129 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001130 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1131 if (can_expedite &&
1132 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1133 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1134 if (expediting >= 0)
1135 rcu_expedite_gp();
1136 else
1137 rcu_unexpedite_gp();
1138 if (++expediting > 3)
1139 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001140 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1141 can_expedite = !rcu_gp_is_expedited() &&
1142 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001143 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001144 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001145 stutter_wait("rcu_torture_writer");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001146 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001147 /* Reset expediting back to unexpedited. */
1148 if (expediting > 0)
1149 expediting = -expediting;
1150 while (can_expedite && expediting++ < 0)
1151 rcu_unexpedite_gp();
1152 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001153 if (!can_expedite)
1154 pr_alert("%s" TORTURE_FLAG
1155 " Dynamic grace-period expediting was disabled.\n",
1156 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001157 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001158 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001159 return 0;
1160}
1161
1162/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001163 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1164 * delay between calls.
1165 */
1166static int
1167rcu_torture_fakewriter(void *arg)
1168{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001169 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001170
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001171 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001172 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001173
1174 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001175 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1176 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001177 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001178 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001179 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001180 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001181 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001182 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001183 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001184 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001185 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001186 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001187 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001188 cur_ops->exp_sync();
1189 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001190 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001191 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001192
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001193 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001194 return 0;
1195}
1196
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001197static void rcu_torture_timer_cb(struct rcu_head *rhp)
1198{
1199 kfree(rhp);
1200}
1201
Josh Triplettb772e1d2006-10-04 02:17:13 -07001202/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001203 * Do one extension of an RCU read-side critical section using the
1204 * current reader state in readstate (set to zero for initial entry
1205 * to extended critical section), set the new state as specified by
1206 * newstate (set to zero for final exit from extended critical section),
1207 * and random-number-generator state in trsp. If this is neither the
1208 * beginning or end of the critical section and if there was actually a
1209 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001210 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001211static void rcutorture_one_extend(int *readstate, int newstate,
1212 struct torture_random_state *trsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001213{
Paul E. McKenney2397d072018-05-25 07:29:25 -07001214 int idxnew = -1;
1215 int idxold = *readstate;
1216 int statesnew = ~*readstate & newstate;
1217 int statesold = *readstate & ~newstate;
1218
1219 WARN_ON_ONCE(idxold < 0);
1220 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1221
1222 /* First, put new protection in place to avoid critical-section gap. */
1223 if (statesnew & RCUTORTURE_RDR_BH)
1224 local_bh_disable();
1225 if (statesnew & RCUTORTURE_RDR_IRQ)
1226 local_irq_disable();
1227 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1228 preempt_disable();
1229 if (statesnew & RCUTORTURE_RDR_RCU)
1230 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1231
1232 /* Next, remove old protection, irq first due to bh conflict. */
1233 if (statesold & RCUTORTURE_RDR_IRQ)
1234 local_irq_enable();
1235 if (statesold & RCUTORTURE_RDR_BH)
1236 local_bh_enable();
1237 if (statesold & RCUTORTURE_RDR_PREEMPT)
1238 preempt_enable();
1239 if (statesold & RCUTORTURE_RDR_RCU)
1240 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1241
1242 /* Delay if neither beginning nor end and there was a change. */
1243 if ((statesnew || statesold) && *readstate && newstate)
1244 cur_ops->read_delay(trsp);
1245
1246 /* Update the reader state. */
1247 if (idxnew == -1)
1248 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1249 WARN_ON_ONCE(idxnew < 0);
1250 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1251 *readstate = idxnew | newstate;
1252 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1253 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1254}
1255
1256/* Return the biggest extendables mask given current RCU and boot parameters. */
1257static int rcutorture_extend_mask_max(void)
1258{
1259 int mask;
1260
1261 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1262 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1263 mask = mask | RCUTORTURE_RDR_RCU;
1264 return mask;
1265}
1266
1267/* Return a random protection state mask, but with at least one bit set. */
1268static int
1269rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1270{
1271 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001272 unsigned long randmask1 = torture_random(trsp) >> 8;
1273 unsigned long randmask2 = randmask1 >> 1;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001274
1275 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001276 /* Half the time lots of bits, half the time only one bit. */
1277 if (randmask1 & 0x1)
1278 mask = mask & randmask2;
1279 else
1280 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2397d072018-05-25 07:29:25 -07001281 if ((mask & RCUTORTURE_RDR_IRQ) &&
1282 !(mask & RCUTORTURE_RDR_BH) &&
1283 (oldmask & RCUTORTURE_RDR_BH))
1284 mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
1285 if ((mask & RCUTORTURE_RDR_IRQ) &&
1286 !(mask & cur_ops->ext_irq_conflict) &&
1287 (oldmask & cur_ops->ext_irq_conflict))
1288 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1289 return mask ?: RCUTORTURE_RDR_RCU;
1290}
1291
1292/*
1293 * Do a randomly selected number of extensions of an existing RCU read-side
1294 * critical section.
1295 */
1296static void rcutorture_loop_extend(int *readstate,
1297 struct torture_random_state *trsp)
1298{
1299 int i;
1300 int mask = rcutorture_extend_mask_max();
1301
1302 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1303 if (!((mask - 1) & mask))
1304 return; /* Current RCU flavor not extendable. */
1305 i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
1306 while (i--) {
1307 mask = rcutorture_extend_mask(*readstate, trsp);
1308 rcutorture_one_extend(readstate, mask, trsp);
1309 }
1310}
1311
1312/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001313 * Do one read-side critical section, returning false if there was
1314 * no data to read. Can be invoked both from process context and
1315 * from a timer handler.
1316 */
1317static bool rcu_torture_one_read(struct torture_random_state *trsp)
1318{
Paul E. McKenney917963d2014-11-21 17:10:16 -08001319 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001320 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001321 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001322 struct rcu_torture *p;
1323 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001324 int readstate = 0;
Paul E. McKenney52494532012-11-14 16:26:40 -08001325 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001326
Paul E. McKenney2397d072018-05-25 07:29:25 -07001327 newstate = rcutorture_extend_mask(readstate, trsp);
1328 rcutorture_one_extend(&readstate, newstate, trsp);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001329 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001330 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001331 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001332 rcu_read_lock_bh_held() ||
1333 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001334 srcu_read_lock_held(srcu_ctlp) ||
1335 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001336 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001337 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001338 rcutorture_one_extend(&readstate, 0, trsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001339 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001340 }
1341 if (p->rtort_mbtest == 0)
1342 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001343 rcutorture_loop_extend(&readstate, trsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001344 preempt_disable();
1345 pipe_count = p->rtort_pipe_count;
1346 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1347 /* Should not happen, but... */
1348 pipe_count = RCU_TORTURE_PIPE_LEN;
1349 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001350 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001351 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001352 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1353 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001354 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001355 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001356 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001357 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001358 if (completed > RCU_TORTURE_PIPE_LEN) {
1359 /* Should not happen, but... */
1360 completed = RCU_TORTURE_PIPE_LEN;
1361 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001362 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001363 preempt_enable();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001364 rcutorture_one_extend(&readstate, 0, trsp);
1365 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001366 return true;
1367}
1368
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001369static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1370
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001371/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001372 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1373 * incrementing the corresponding element of the pipeline array. The
1374 * counter in the element should never be greater than 1, otherwise, the
1375 * RCU implementation is broken.
1376 */
1377static void rcu_torture_timer(struct timer_list *unused)
1378{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001379 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001380 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001381
1382 /* Test call_rcu() invocation from interrupt handler. */
1383 if (cur_ops->call) {
1384 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1385
1386 if (rhp)
1387 cur_ops->call(rhp, rcu_torture_timer_cb);
1388 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001389}
1390
1391/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001392 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1393 * incrementing the corresponding element of the pipeline array. The
1394 * counter in the element should never be greater than 1, otherwise, the
1395 * RCU implementation is broken.
1396 */
1397static int
1398rcu_torture_reader(void *arg)
1399{
Paul E. McKenney444da512018-07-04 14:14:42 -07001400 unsigned long lastsleep = jiffies;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001401 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001402 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001403
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001404 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001405 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001406 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001407 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001408
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001409 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001410 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001411 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001412 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001413 }
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001414 if (!rcu_torture_one_read(&rand))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001415 schedule_timeout_interruptible(HZ);
Paul E. McKenney444da512018-07-04 14:14:42 -07001416 if (time_after(jiffies, lastsleep)) {
1417 schedule_timeout_interruptible(1);
1418 lastsleep = jiffies + 10;
1419 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001420 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001421 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001422 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001423 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001424 destroy_timer_on_stack(&t);
1425 }
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001426 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001427 return 0;
1428}
1429
1430/*
Joe Percheseea203f2014-07-14 09:16:15 -04001431 * Print torture statistics. Caller must ensure that there is only
1432 * one call to this function at a given time!!! This is normally
1433 * accomplished by relying on the module system to only have one copy
1434 * of the module loaded, and then by giving the rcu_torture_stats
1435 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1436 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001437 */
Chen Gangd1008952013-11-07 10:30:25 +08001438static void
Joe Percheseea203f2014-07-14 09:16:15 -04001439rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001440{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001441 int cpu;
1442 int i;
1443 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1444 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001445 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001446 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001447 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001448
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001449 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001450 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1451 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1452 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1453 }
1454 }
1455 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1456 if (pipesummary[i] != 0)
1457 break;
1458 }
Joe Percheseea203f2014-07-14 09:16:15 -04001459
1460 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1461 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1462 rcu_torture_current,
1463 rcu_torture_current_version,
1464 list_empty(&rcu_torture_freelist),
1465 atomic_read(&n_rcu_torture_alloc),
1466 atomic_read(&n_rcu_torture_alloc_fail),
1467 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001468 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001469 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001470 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001471 n_rcu_torture_boost_ktrerror,
1472 n_rcu_torture_boost_rterror);
1473 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1474 n_rcu_torture_boost_failure,
1475 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001476 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001477 torture_onoff_stats();
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001478 pr_cont("barrier: %ld/%ld:%ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001479 n_barrier_successes,
1480 n_barrier_attempts,
1481 n_rcu_torture_barrier_error);
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001482 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
Joe Percheseea203f2014-07-14 09:16:15 -04001483
1484 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001485 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001486 n_rcu_torture_barrier_error != 0 ||
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001487 n_rcu_torture_boost_ktrerror != 0 ||
1488 n_rcu_torture_boost_rterror != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001489 n_rcu_torture_boost_failure != 0 ||
1490 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001491 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001492 atomic_inc(&n_rcu_torture_error);
Ingo Molnar5af970a2008-06-18 10:09:48 +02001493 WARN_ON_ONCE(1);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001494 }
Joe Percheseea203f2014-07-14 09:16:15 -04001495 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001496 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001497 pr_cont(" %ld", pipesummary[i]);
1498 pr_cont("\n");
1499
1500 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1501 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001502 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001503 pr_cont(" %ld", batchsummary[i]);
1504 pr_cont("\n");
1505
1506 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1507 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001508 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001509 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001510 }
Joe Percheseea203f2014-07-14 09:16:15 -04001511 pr_cont("\n");
1512
Josh Triplettc8e5b162007-05-08 00:33:20 -07001513 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001514 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001515 if (rtcv_snap == rcu_torture_current_version &&
1516 rcu_torture_current != NULL) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001517 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001518 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001519
1520 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001521 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001522 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001523 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001524 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001525 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001526 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001527 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001528 wtp == NULL ? ~0UL : wtp->state,
1529 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001530 if (!splatted && wtp) {
1531 sched_show_task(wtp);
1532 splatted = true;
1533 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001534 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001535 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001536 }
1537 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001538}
1539
1540/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001541 * Periodically prints torture statistics, if periodic statistics printing
1542 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001543 */
1544static int
1545rcu_torture_stats(void *arg)
1546{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001547 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001548 do {
1549 schedule_timeout_interruptible(stat_interval * HZ);
1550 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001551 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001552 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001553 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001554 return 0;
1555}
1556
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001557static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001558rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001559{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001560 pr_alert("%s" TORTURE_FLAG
1561 "--- %s: nreaders=%d nfakewriters=%d "
1562 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1563 "shuffle_interval=%d stutter=%d irqreader=%d "
1564 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1565 "test_boost=%d/%d test_boost_interval=%d "
1566 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001567 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001568 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001569 "onoff_interval=%d onoff_holdoff=%d\n",
1570 torture_type, tag, nrealreaders, nfakewriters,
1571 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1572 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1573 test_boost, cur_ops->can_boost,
1574 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001575 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001576 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001577 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001578}
1579
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001580static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001581{
1582 struct task_struct *t;
1583
1584 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001585 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001586 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001587 t = boost_tasks[cpu];
1588 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001589 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001590 mutex_unlock(&boost_mutex);
1591
1592 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001593 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001594 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001595}
1596
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001597static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001598{
1599 int retval;
1600
1601 if (boost_tasks[cpu] != NULL)
1602 return 0; /* Already created, nothing more to do. */
1603
1604 /* Don't allow time recalculation while creating a new task. */
1605 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001606 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001607 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001608 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1609 cpu_to_node(cpu),
1610 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001611 if (IS_ERR(boost_tasks[cpu])) {
1612 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001613 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001614 n_rcu_torture_boost_ktrerror++;
1615 boost_tasks[cpu] = NULL;
1616 mutex_unlock(&boost_mutex);
1617 return retval;
1618 }
1619 kthread_bind(boost_tasks[cpu], cpu);
1620 wake_up_process(boost_tasks[cpu]);
1621 mutex_unlock(&boost_mutex);
1622 return 0;
1623}
1624
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001625/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001626 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1627 * induces a CPU stall for the time specified by stall_cpu.
1628 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001629static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001630{
1631 unsigned long stop_at;
1632
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001633 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001634 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001635 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001636 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001637 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001638 }
1639 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001640 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001641 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001642 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001643 if (stall_cpu_irqsoff)
1644 local_irq_disable();
1645 else
1646 preempt_disable();
1647 pr_alert("rcu_torture_stall start on CPU %d.\n",
1648 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001649 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1650 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001651 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001652 if (stall_cpu_irqsoff)
1653 local_irq_enable();
1654 else
1655 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001656 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001657 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001658 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001659 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001660 while (!kthread_should_stop())
1661 schedule_timeout_interruptible(10 * HZ);
1662 return 0;
1663}
1664
1665/* Spawn CPU-stall kthread, if stall_cpu specified. */
1666static int __init rcu_torture_stall_init(void)
1667{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001668 if (stall_cpu <= 0)
1669 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001670 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001671}
1672
Paul E. McKenney1b272912018-07-18 14:32:31 -07001673/* Carry out grace-period forward-progress testing. */
1674static int rcu_torture_fwd_prog(void *args)
1675{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001676 unsigned long cver;
1677 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001678 int idx;
1679 unsigned long stopat;
1680 bool tested = false;
1681
1682 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1683 do {
1684 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenney119248b2018-07-18 15:39:37 -07001685 cver = READ_ONCE(rcu_torture_current_version);
1686 gps = cur_ops->get_gp_seq();
Paul E. McKenney1b272912018-07-18 14:32:31 -07001687 stopat = jiffies + cur_ops->stall_dur() / fwd_progress_div;
1688 while (time_before(jiffies, stopat) && !torture_must_stop()) {
1689 idx = cur_ops->readlock();
1690 udelay(10);
1691 cur_ops->readunlock(idx);
1692 if (!fwd_progress_need_resched || need_resched())
1693 cond_resched();
1694 }
1695 if (!time_before(jiffies, stopat) && !torture_must_stop()) {
1696 tested = true;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001697 cver = cver == READ_ONCE(rcu_torture_current_version);
1698 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1699 WARN_ON_ONCE(cver && gps < 2);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001700 }
1701 /* Avoid slow periods, better to test when busy. */
1702 stutter_wait("rcu_torture_fwd_prog");
1703 } while (!torture_must_stop());
1704 WARN_ON(!tested);
1705 torture_kthread_stopping("rcu_torture_fwd_prog");
1706 return 0;
1707}
1708
1709/* If forward-progress checking is requested and feasible, spawn the thread. */
1710static int __init rcu_torture_fwd_prog_init(void)
1711{
1712 if (!fwd_progress)
1713 return 0; /* Not requested, so don't do it. */
1714 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
1715 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1716 return 0;
1717 }
1718 if (stall_cpu > 0) {
1719 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1720 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
1721 return -EINVAL; /* In module, can fail back to user. */
1722 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1723 return 0;
1724 }
1725 if (fwd_progress_holdoff <= 0)
1726 fwd_progress_holdoff = 1;
1727 if (fwd_progress_div <= 0)
1728 fwd_progress_div = 4;
1729 return torture_create_kthread(rcu_torture_fwd_prog,
1730 NULL, fwd_prog_task);
1731}
1732
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001733/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05301734static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001735{
1736 atomic_inc(&barrier_cbs_invoked);
1737}
1738
1739/* kthread function to register callbacks used to test RCU barriers. */
1740static int rcu_torture_barrier_cbs(void *arg)
1741{
1742 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07001743 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001744 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001745 struct rcu_head rcu;
1746
1747 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001748 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001749 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001750 do {
1751 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001752 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001753 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001754 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001755 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001756 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001757 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001758 /*
1759 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07001760 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001761 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001762 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001763 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001764 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001765 if (atomic_dec_and_test(&barrier_cbs_count))
1766 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001767 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07001768 if (cur_ops->cb_barrier != NULL)
1769 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001770 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001771 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001772 return 0;
1773}
1774
1775/* kthread function to drive and coordinate RCU barrier testing. */
1776static int rcu_torture_barrier(void *arg)
1777{
1778 int i;
1779
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001780 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001781 do {
1782 atomic_set(&barrier_cbs_invoked, 0);
1783 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001784 /* Ensure barrier_phase ordered after prior assignments. */
1785 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001786 for (i = 0; i < n_barrier_cbs; i++)
1787 wake_up(&barrier_cbs_wq[i]);
1788 wait_event(barrier_wq,
1789 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001790 torture_must_stop());
1791 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001792 break;
1793 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001794 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001795 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1796 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08001797 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1798 atomic_read(&barrier_cbs_invoked),
1799 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001800 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001801 } else {
1802 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001803 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001804 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001805 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001806 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001807 return 0;
1808}
1809
1810/* Initialize RCU barrier testing. */
1811static int rcu_torture_barrier_init(void)
1812{
1813 int i;
1814 int ret;
1815
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07001816 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001817 return 0;
1818 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001819 pr_alert("%s" TORTURE_FLAG
1820 " Call or barrier ops missing for %s,\n",
1821 torture_type, cur_ops->name);
1822 pr_alert("%s" TORTURE_FLAG
1823 " RCU barrier testing omitted from run.\n",
1824 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001825 return 0;
1826 }
1827 atomic_set(&barrier_cbs_count, 0);
1828 atomic_set(&barrier_cbs_invoked, 0);
1829 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001830 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001831 GFP_KERNEL);
1832 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001833 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05001834 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001835 return -ENOMEM;
1836 for (i = 0; i < n_barrier_cbs; i++) {
1837 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001838 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1839 (void *)(long)i,
1840 barrier_cbs_tasks[i]);
1841 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001842 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001843 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001844 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001845}
1846
1847/* Clean up after RCU barrier testing. */
1848static void rcu_torture_barrier_cleanup(void)
1849{
1850 int i;
1851
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001852 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001853 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001854 for (i = 0; i < n_barrier_cbs; i++)
1855 torture_stop_kthread(rcu_torture_barrier_cbs,
1856 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001857 kfree(barrier_cbs_tasks);
1858 barrier_cbs_tasks = NULL;
1859 }
1860 if (barrier_cbs_wq != NULL) {
1861 kfree(barrier_cbs_wq);
1862 barrier_cbs_wq = NULL;
1863 }
1864}
1865
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001866static bool rcu_torture_can_boost(void)
1867{
1868 static int boost_warn_once;
1869 int prio;
1870
1871 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
1872 return false;
1873
1874 prio = rcu_get_gp_kthreads_prio();
1875 if (!prio)
1876 return false;
1877
1878 if (prio < 2) {
1879 if (boost_warn_once == 1)
1880 return false;
1881
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001882 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001883 boost_warn_once = 1;
1884 return false;
1885 }
1886
1887 return true;
1888}
1889
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001890static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001891
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001892static void
1893rcu_torture_cleanup(void)
1894{
Paul E. McKenney034777d2018-04-19 08:43:11 -07001895 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001896 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001897 int i;
1898
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07001899 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08001900 if (cur_ops->cb_barrier != NULL)
1901 cur_ops->cb_barrier();
1902 return;
1903 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08001904
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001905 rcu_torture_barrier_cleanup();
Paul E. McKenney1b272912018-07-18 14:32:31 -07001906 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001907 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001908 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001909
Josh Triplettc8e5b162007-05-08 00:33:20 -07001910 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001911 for (i = 0; i < nrealreaders; i++)
1912 torture_stop_kthread(rcu_torture_reader,
1913 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001914 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001915 }
1916 rcu_torture_current = NULL;
1917
Josh Triplettc8e5b162007-05-08 00:33:20 -07001918 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07001919 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001920 torture_stop_kthread(rcu_torture_fakewriter,
1921 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001922 }
1923 kfree(fakewriter_tasks);
1924 fakewriter_tasks = NULL;
1925 }
1926
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001927 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
1928 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
1929 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
1930 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001931 torture_stop_kthread(rcu_torture_stats, stats_task);
1932 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001933 for (i = 0; i < ncbflooders; i++)
1934 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001935 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001936 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001937
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07001938 /*
1939 * Wait for all RCU callbacks to fire, then do flavor-specific
1940 * cleanup operations.
1941 */
Paul E. McKenney23269742008-05-12 21:21:05 +02001942 if (cur_ops->cb_barrier != NULL)
1943 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07001944 if (cur_ops->cleanup != NULL)
1945 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001946
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001947 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001948
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001949 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001950 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08001951 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08001952 rcu_torture_print_module_parms(cur_ops,
1953 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08001954 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001955 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07001956 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001957}
1958
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001959#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1960static void rcu_torture_leak_cb(struct rcu_head *rhp)
1961{
1962}
1963
1964static void rcu_torture_err_cb(struct rcu_head *rhp)
1965{
1966 /*
1967 * This -might- happen due to race conditions, but is unlikely.
1968 * The scenario that leads to this happening is that the
1969 * first of the pair of duplicate callbacks is queued,
1970 * someone else starts a grace period that includes that
1971 * callback, then the second of the pair must wait for the
1972 * next grace period. Unlikely, but can happen. If it
1973 * does happen, the debug-objects subsystem won't have splatted.
1974 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001975 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001976}
1977#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1978
1979/*
1980 * Verify that double-free causes debug-objects to complain, but only
1981 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1982 * cannot be carried out.
1983 */
1984static void rcu_test_debug_objects(void)
1985{
1986#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1987 struct rcu_head rh1;
1988 struct rcu_head rh2;
1989
1990 init_rcu_head_on_stack(&rh1);
1991 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001992 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001993
1994 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1995 preempt_disable(); /* Prevent preemption from interrupting test. */
1996 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1997 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1998 local_irq_disable(); /* Make it harder to start a new grace period. */
1999 call_rcu(&rh2, rcu_torture_leak_cb);
2000 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2001 local_irq_enable();
2002 rcu_read_unlock();
2003 preempt_enable();
2004
2005 /* Wait for them all to get done so we can safely return. */
2006 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002007 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002008 destroy_rcu_head_on_stack(&rh1);
2009 destroy_rcu_head_on_stack(&rh2);
2010#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002011 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002012#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2013}
2014
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002015static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002016rcu_torture_init(void)
2017{
2018 int i;
2019 int cpu;
2020 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002021 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002022 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenney2397d072018-05-25 07:29:25 -07002023 &busted_srcud_ops, &sched_ops, &tasks_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002024 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002025
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002026 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002027 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002028
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002029 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002030 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002031 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002032 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002033 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002034 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002035 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002036 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2037 torture_type);
2038 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002039 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002040 pr_cont(" %s", torture_ops[i]->name);
2041 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002042 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002043 firsterr = -EINVAL;
2044 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002045 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002046 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002047 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002048 fqs_duration = 0;
2049 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002050 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002051 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002052
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002053 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002054 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002055 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002056 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002057 if (nrealreaders <= 0)
2058 nrealreaders = 1;
2059 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002060 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002061
2062 /* Set up the freelist. */
2063
2064 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002065 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002066 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002067 list_add_tail(&rcu_tortures[i].rtort_free,
2068 &rcu_torture_freelist);
2069 }
2070
2071 /* Initialize the statistics so that each run gets its own numbers. */
2072
2073 rcu_torture_current = NULL;
2074 rcu_torture_current_version = 0;
2075 atomic_set(&n_rcu_torture_alloc, 0);
2076 atomic_set(&n_rcu_torture_alloc_fail, 0);
2077 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002078 atomic_set(&n_rcu_torture_mberror, 0);
2079 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002080 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002081 n_rcu_torture_boost_ktrerror = 0;
2082 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002083 n_rcu_torture_boost_failure = 0;
2084 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002085 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2086 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002087 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002088 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2089 per_cpu(rcu_torture_count, cpu)[i] = 0;
2090 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2091 }
2092 }
2093
2094 /* Start up the kthreads. */
2095
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002096 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2097 writer_task);
2098 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002099 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002100 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002101 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002102 sizeof(fakewriter_tasks[0]),
2103 GFP_KERNEL);
2104 if (fakewriter_tasks == NULL) {
2105 VERBOSE_TOROUT_ERRSTRING("out of memory");
2106 firsterr = -ENOMEM;
2107 goto unwind;
2108 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002109 }
2110 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002111 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2112 NULL, fakewriter_tasks[i]);
2113 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002114 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002115 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002116 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002117 GFP_KERNEL);
2118 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002119 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002120 firsterr = -ENOMEM;
2121 goto unwind;
2122 }
2123 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002124 firsterr = torture_create_kthread(rcu_torture_reader, NULL,
2125 reader_tasks[i]);
2126 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002127 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002128 }
2129 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002130 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2131 stats_task);
2132 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002133 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002134 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002135 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002136 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2137 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002138 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002139 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002140 if (stutter < 0)
2141 stutter = 0;
2142 if (stutter) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002143 firsterr = torture_stutter_init(stutter * HZ);
2144 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002145 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002146 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002147 if (fqs_duration < 0)
2148 fqs_duration = 0;
2149 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002150 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002151 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2152 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002153 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002154 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002155 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002156 if (test_boost_interval < 1)
2157 test_boost_interval = 1;
2158 if (test_boost_duration < 2)
2159 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002160 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002161
2162 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002163
2164 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2165 rcutorture_booster_init,
2166 rcutorture_booster_cleanup);
2167 if (firsterr < 0)
2168 goto unwind;
2169 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002170 }
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002171 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2172 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002173 goto unwind;
Paul E. McKenney028be122018-05-08 09:20:34 -07002174 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002175 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002176 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002177 firsterr = rcu_torture_stall_init();
2178 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002179 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002180 firsterr = rcu_torture_fwd_prog_init();
2181 if (firsterr)
2182 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002183 firsterr = rcu_torture_barrier_init();
2184 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002185 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002186 if (object_debug)
2187 rcu_test_debug_objects();
Paul E. McKenney38706bc2014-08-18 21:12:17 -07002188 if (cbflood_n_burst > 0) {
2189 /* Create the cbflood threads */
2190 ncbflooders = (num_online_cpus() + 3) / 4;
2191 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
2192 GFP_KERNEL);
2193 if (!cbflood_task) {
2194 VERBOSE_TOROUT_ERRSTRING("out of memory");
2195 firsterr = -ENOMEM;
2196 goto unwind;
2197 }
2198 for (i = 0; i < ncbflooders; i++) {
2199 firsterr = torture_create_kthread(rcu_torture_cbflood,
2200 NULL,
2201 cbflood_task[i]);
2202 if (firsterr)
2203 goto unwind;
2204 }
2205 }
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002206 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002207 return 0;
2208
2209unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002210 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002211 rcu_torture_cleanup();
2212 return firsterr;
2213}
2214
2215module_init(rcu_torture_init);
2216module_exit(rcu_torture_cleanup);