blob: bdc86cdf3b8b9c966a8cf6cfba339e6df8a2ee76 [file] [log] [blame]
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07002 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenneyf5604f62014-02-26 06:38:59 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenneya241ec62005-10-30 15:03:12 -080017 *
Josh Triplettb772e1d2006-10-04 02:17:13 -070018 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -080019 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -070021 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080022 *
23 * See also: Documentation/RCU/torture.txt
24 */
Paul E. McKenney60500032018-05-15 12:25:05 -070025
26#define pr_fmt(fmt) fmt
27
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/kthread.h>
33#include <linux/err.h>
34#include <linux/spinlock.h>
35#include <linux/smp.h>
36#include <linux/rcupdate.h>
37#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010038#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010039#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080041#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080042#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080046#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070047#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080049#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080050#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070051#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070052#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080053#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070054#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080055#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070056#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070057#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070058#include <linux/sched/sysctl.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080059
Paul E. McKenney25c36322017-05-03 09:51:55 -070060#include "rcu.h"
61
Paul E. McKenneya241ec62005-10-30 15:03:12 -080062MODULE_LICENSE("GPL");
Josh Triplette0198b292014-07-30 16:08:42 -070063MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080064
Paul E. McKenney4102ada2013-10-08 20:23:47 -070065
Paul E. McKenney2397d072018-05-25 07:29:25 -070066/* Bits for ->extendables field, extendables param, and related definitions. */
67#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
68#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
69#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
70#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
71#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
72#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
Paul E. McKenneybf1bef52018-06-10 08:50:09 -070073#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
Paul E. McKenney2397d072018-05-25 07:29:25 -070074#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
75 RCUTORTURE_RDR_PREEMPT)
76#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
77 /* Must be power of two minus one. */
78
Paul E. McKenney38706bc2014-08-18 21:12:17 -070079torture_param(int, cbflood_inter_holdoff, HZ,
80 "Holdoff between floods (jiffies)");
81torture_param(int, cbflood_intra_holdoff, 1,
82 "Holdoff between bursts (jiffies)");
83torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
84torture_param(int, cbflood_n_per_burst, 20000,
85 "# callbacks per burst in flood");
Paul E. McKenney2397d072018-05-25 07:29:25 -070086torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
87 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080088torture_param(int, fqs_duration, 0,
89 "Duration of fqs bursts (us), 0 to disable");
90torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
91torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070092torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080093torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
94torture_param(bool, gp_normal, false,
95 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070096torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080097torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
98torture_param(int, n_barrier_cbs, 0,
99 "# of callbacks/kthreads for barrier testing");
100torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
101torture_param(int, nreaders, -1, "Number of RCU reader threads");
102torture_param(int, object_debug, 0,
103 "Enable debug-object double call_rcu() testing");
104torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
105torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -0700106 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800107torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
108torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
109torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
110torture_param(int, stall_cpu_holdoff, 10,
111 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700112torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800113torture_param(int, stat_interval, 60,
114 "Number of seconds between stats printk()s");
115torture_param(int, stutter, 5, "Number of seconds to run/halt test");
116torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
117torture_param(int, test_boost_duration, 4,
118 "Duration of each boost test, seconds.");
119torture_param(int, test_boost_interval, 7,
120 "Interval between boost tests, seconds.");
121torture_param(bool, test_no_idle_hz, true,
122 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700123torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800124 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800125
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800126static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800127module_param(torture_type, charp, 0444);
Paul E. McKenneyd10453e2013-06-13 15:12:24 -0700128MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700129
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800130static int nrealreaders;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700131static int ncbflooders;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800132static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700133static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800134static struct task_struct **reader_tasks;
135static struct task_struct *stats_task;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700136static struct task_struct **cbflood_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800137static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700138static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800139static struct task_struct *stall_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800140static struct task_struct **barrier_cbs_tasks;
141static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800142
143#define RCU_TORTURE_PIPE_LEN 10
144
145struct rcu_torture {
146 struct rcu_head rtort_rcu;
147 int rtort_pipe_count;
148 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800149 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800150};
151
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800152static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700153static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700154static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800155static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
156static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800157static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
158static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800159static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700160static atomic_t n_rcu_torture_alloc;
161static atomic_t n_rcu_torture_alloc_fail;
162static atomic_t n_rcu_torture_free;
163static atomic_t n_rcu_torture_mberror;
164static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800165static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700166static long n_rcu_torture_boost_ktrerror;
167static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700168static long n_rcu_torture_boost_failure;
169static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700170static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800171static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700172static long n_barrier_successes; /* did rcu_barrier test succeed? */
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700173static atomic_long_t n_cbfloods;
Josh Triplette3033732006-10-04 02:17:14 -0700174static struct list_head rcu_torture_removed;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800175
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800176static int rcu_torture_writer_state;
177#define RTWS_FIXED_DELAY 0
178#define RTWS_DELAY 1
179#define RTWS_REPLACE 2
180#define RTWS_DEF_FREE 3
181#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700182#define RTWS_COND_GET 5
183#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700184#define RTWS_SYNC 7
185#define RTWS_STUTTER 8
186#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800187static const char * const rcu_torture_writer_state_names[] = {
188 "RTWS_FIXED_DELAY",
189 "RTWS_DELAY",
190 "RTWS_REPLACE",
191 "RTWS_DEF_FREE",
192 "RTWS_EXP_SYNC",
193 "RTWS_COND_GET",
194 "RTWS_COND_SYNC",
195 "RTWS_SYNC",
196 "RTWS_STUTTER",
197 "RTWS_STOPPING",
198};
199
200static const char *rcu_torture_writer_state_getname(void)
201{
202 unsigned int i = READ_ONCE(rcu_torture_writer_state);
203
204 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
205 return "???";
206 return rcu_torture_writer_state_names[i];
207}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800208
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700209#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700210#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700211#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700212#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700213#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700214
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500215#ifdef CONFIG_RCU_TRACE
216static u64 notrace rcu_trace_clock_local(void)
217{
218 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700219
220 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500221 return ts;
222}
223#else /* #ifdef CONFIG_RCU_TRACE */
224static u64 notrace rcu_trace_clock_local(void)
225{
226 return 0ULL;
227}
228#endif /* #else #ifdef CONFIG_RCU_TRACE */
229
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700230static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400231static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700232 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800233static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700234static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800235static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
236static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
237static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700238
Paul E. McKenney343e9092008-12-15 16:13:07 -0800239/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800240 * Allocate an element from the rcu_tortures pool.
241 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800242static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800243rcu_torture_alloc(void)
244{
245 struct list_head *p;
246
Ingo Molnaradac1662006-01-25 19:50:12 +0100247 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800248 if (list_empty(&rcu_torture_freelist)) {
249 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100250 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800251 return NULL;
252 }
253 atomic_inc(&n_rcu_torture_alloc);
254 p = rcu_torture_freelist.next;
255 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100256 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800257 return container_of(p, struct rcu_torture, rtort_free);
258}
259
260/*
261 * Free an element to the rcu_tortures pool.
262 */
263static void
264rcu_torture_free(struct rcu_torture *p)
265{
266 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100267 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800268 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100269 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800270}
271
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800272/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700273 * Operations vector for selecting different types of tests.
274 */
275
276struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800277 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700278 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700279 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700280 int (*readlock)(void);
Paul E. McKenney51b11302014-01-27 11:49:39 -0800281 void (*read_delay)(struct torture_random_state *rrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700282 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700283 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700284 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700285 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700286 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700287 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700288 unsigned long (*get_state)(void);
289 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800290 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200291 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800292 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400293 void (*stats)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700294 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700295 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700296 int extendables;
297 int ext_irq_conflict;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400298 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700299};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700300
301static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700302
303/*
304 * Definitions for rcu torture testing.
305 */
306
Josh Tripletta49a4af2006-09-29 01:59:30 -0700307static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700308{
309 rcu_read_lock();
310 return 0;
311}
312
Paul E. McKenney51b11302014-01-27 11:49:39 -0800313static void rcu_read_delay(struct torture_random_state *rrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700314{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700315 unsigned long started;
316 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700317 const unsigned long shortdelay_us = 200;
318 const unsigned long longdelay_ms = 50;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700319 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700320
Josh Triplettb8d57a72009-09-08 15:54:35 -0700321 /* We want a short delay sometimes to make a reader delay the grace
322 * period, and we want a long delay occasionally to trigger
323 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700324
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700325 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700326 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700327 ts = rcu_trace_clock_local();
Josh Triplettb8d57a72009-09-08 15:54:35 -0700328 mdelay(longdelay_ms);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700329 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700330 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
331 started, completed);
332 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800333 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
Josh Triplettb8d57a72009-09-08 15:54:35 -0700334 udelay(shortdelay_us);
Paul E. McKenney51b11302014-01-27 11:49:39 -0800335 if (!preempt_count() &&
Paul E. McKenneye8302732017-10-16 11:23:42 -0700336 !(torture_random(rrsp) % (nrealreaders * 500)))
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700337 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700338}
339
Josh Tripletta49a4af2006-09-29 01:59:30 -0700340static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700341{
342 rcu_read_unlock();
343}
344
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700345/*
346 * Update callback in the pipe. This should be invoked after a grace period.
347 */
348static bool
349rcu_torture_pipe_update_one(struct rcu_torture *rp)
350{
351 int i;
352
353 i = rp->rtort_pipe_count;
354 if (i > RCU_TORTURE_PIPE_LEN)
355 i = RCU_TORTURE_PIPE_LEN;
356 atomic_inc(&rcu_torture_wcount[i]);
357 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
358 rp->rtort_mbtest = 0;
359 return true;
360 }
361 return false;
362}
363
364/*
365 * Update all callbacks in the pipe. Suitable for synchronous grace-period
366 * primitives.
367 */
368static void
369rcu_torture_pipe_update(struct rcu_torture *old_rp)
370{
371 struct rcu_torture *rp;
372 struct rcu_torture *rp1;
373
374 if (old_rp)
375 list_add(&old_rp->rtort_free, &rcu_torture_removed);
376 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
377 if (rcu_torture_pipe_update_one(rp)) {
378 list_del(&rp->rtort_free);
379 rcu_torture_free(rp);
380 }
381 }
382}
383
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700384static void
385rcu_torture_cb(struct rcu_head *p)
386{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700387 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
388
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800389 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700390 /* Test is ending, just drop callbacks on the floor. */
391 /* The next initialization will pick up the pieces. */
392 return;
393 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700394 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700395 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700396 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700397 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700398}
399
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800400static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800401{
402 return 0;
403}
404
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700405static void rcu_torture_deferred_free(struct rcu_torture *p)
406{
407 call_rcu(&p->rtort_rcu, rcu_torture_cb);
408}
409
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700410static void rcu_sync_torture_init(void)
411{
412 INIT_LIST_HEAD(&rcu_torture_removed);
413}
414
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700415static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800416 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700417 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700418 .readlock = rcu_torture_read_lock,
419 .read_delay = rcu_read_delay,
420 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700421 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700422 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700423 .deferred_free = rcu_torture_deferred_free,
424 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700425 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700426 .get_state = get_state_synchronize_rcu,
427 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800428 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700429 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800430 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700431 .stats = NULL,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700432 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700433 .can_boost = rcu_can_boost(),
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700434 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700435};
436
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700437/*
438 * Definitions for rcu_bh torture testing.
439 */
440
Josh Tripletta49a4af2006-09-29 01:59:30 -0700441static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700442{
443 rcu_read_lock_bh();
444 return 0;
445}
446
Josh Tripletta49a4af2006-09-29 01:59:30 -0700447static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700448{
449 rcu_read_unlock_bh();
450}
451
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700452static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
453{
454 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
455}
456
457static struct rcu_torture_ops rcu_bh_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800458 .ttype = RCU_BH_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700459 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700460 .readlock = rcu_bh_torture_read_lock,
461 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
462 .readunlock = rcu_bh_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700463 .get_gp_seq = rcu_bh_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700464 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700465 .deferred_free = rcu_bh_torture_deferred_free,
Paul E. McKenneybdf2a432011-06-07 16:59:35 -0700466 .sync = synchronize_rcu_bh,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700467 .exp_sync = synchronize_rcu_bh_expedited,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800468 .call = call_rcu_bh,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700469 .cb_barrier = rcu_barrier_bh,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800470 .fqs = rcu_bh_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700471 .stats = NULL,
472 .irq_capable = 1,
Paul E. McKenney2397d072018-05-25 07:29:25 -0700473 .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
474 .ext_irq_conflict = RCUTORTURE_RDR_RCU,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700475 .name = "rcu_bh"
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700476};
477
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700478/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800479 * Don't even think about trying any of these in real life!!!
480 * The names includes "busted", and they really means it!
481 * The only purpose of these functions is to provide a buggy RCU
482 * implementation to make sure that rcutorture correctly emits
483 * buggy-RCU error messages.
484 */
485static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
486{
487 /* This is a deliberate bug for testing purposes only! */
488 rcu_torture_cb(&p->rtort_rcu);
489}
490
491static void synchronize_rcu_busted(void)
492{
493 /* This is a deliberate bug for testing purposes only! */
494}
495
496static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800497call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800498{
499 /* This is a deliberate bug for testing purposes only! */
500 func(head);
501}
502
503static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800504 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800505 .init = rcu_sync_torture_init,
506 .readlock = rcu_torture_read_lock,
507 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
508 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700509 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800510 .deferred_free = rcu_busted_torture_deferred_free,
511 .sync = synchronize_rcu_busted,
512 .exp_sync = synchronize_rcu_busted,
513 .call = call_rcu_busted,
514 .cb_barrier = NULL,
515 .fqs = NULL,
516 .stats = NULL,
517 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700518 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800519};
520
521/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700522 * Definitions for srcu torture testing.
523 */
524
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800525DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700526static struct srcu_struct srcu_ctld;
527static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700528
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700529static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700530{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700531 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700532}
533
Paul E. McKenney51b11302014-01-27 11:49:39 -0800534static void srcu_read_delay(struct torture_random_state *rrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700535{
536 long delay;
537 const long uspertick = 1000000 / HZ;
538 const long longdelay = 10;
539
540 /* We want there to be long-running readers, but not all the time. */
541
Paul E. McKenney51b11302014-01-27 11:49:39 -0800542 delay = torture_random(rrsp) %
543 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700544 if (!delay && in_task())
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700545 schedule_timeout_interruptible(longdelay);
Lai Jiangshane546f482010-06-21 16:57:42 +0800546 else
547 rcu_read_delay(rrsp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700548}
549
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700550static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700551{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700552 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700553}
554
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800555static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700556{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700557 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700558}
559
Lai Jiangshan9059c942012-03-19 16:12:14 +0800560static void srcu_torture_deferred_free(struct rcu_torture *rp)
561{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700562 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800563}
564
Josh Triplettb772e1d2006-10-04 02:17:13 -0700565static void srcu_torture_synchronize(void)
566{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700567 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700568}
569
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700570static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800571 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700572{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700573 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700574}
575
576static void srcu_torture_barrier(void)
577{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700578 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700579}
580
Joe Percheseea203f2014-07-14 09:16:15 -0400581static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700582{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700583 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700584}
585
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700586static void srcu_torture_synchronize_expedited(void)
587{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700588 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700589}
590
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700591static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800592 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800593 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700594 .readlock = srcu_torture_read_lock,
595 .read_delay = srcu_read_delay,
596 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700597 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800598 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700599 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700600 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700601 .call = srcu_torture_call,
602 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700603 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700604 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700605 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700606};
607
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700608static void srcu_torture_init(void)
609{
610 rcu_sync_torture_init();
611 WARN_ON(init_srcu_struct(&srcu_ctld));
612 srcu_ctlp = &srcu_ctld;
613}
614
615static void srcu_torture_cleanup(void)
616{
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700617 static DEFINE_TORTURE_RANDOM(rand);
618
619 if (torture_random(&rand) & 0x800)
620 cleanup_srcu_struct(&srcu_ctld);
621 else
622 cleanup_srcu_struct_quiesced(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700623 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
624}
625
626/* As above, but dynamically allocated. */
627static struct rcu_torture_ops srcud_ops = {
628 .ttype = SRCU_FLAVOR,
629 .init = srcu_torture_init,
630 .cleanup = srcu_torture_cleanup,
631 .readlock = srcu_torture_read_lock,
632 .read_delay = srcu_read_delay,
633 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700634 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700635 .deferred_free = srcu_torture_deferred_free,
636 .sync = srcu_torture_synchronize,
637 .exp_sync = srcu_torture_synchronize_expedited,
638 .call = srcu_torture_call,
639 .cb_barrier = srcu_torture_barrier,
640 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700641 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700642 .name = "srcud"
643};
644
Paul E. McKenney2397d072018-05-25 07:29:25 -0700645/* As above, but broken due to inappropriate reader extension. */
646static struct rcu_torture_ops busted_srcud_ops = {
647 .ttype = SRCU_FLAVOR,
648 .init = srcu_torture_init,
649 .cleanup = srcu_torture_cleanup,
650 .readlock = srcu_torture_read_lock,
651 .read_delay = rcu_read_delay,
652 .readunlock = srcu_torture_read_unlock,
653 .get_gp_seq = srcu_torture_completed,
654 .deferred_free = srcu_torture_deferred_free,
655 .sync = srcu_torture_synchronize,
656 .exp_sync = srcu_torture_synchronize_expedited,
657 .call = srcu_torture_call,
658 .cb_barrier = srcu_torture_barrier,
659 .stats = srcu_torture_stats,
660 .irq_capable = 1,
661 .extendables = RCUTORTURE_MAX_EXTEND,
662 .name = "busted_srcud"
663};
664
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700665/*
666 * Definitions for sched torture testing.
667 */
668
669static int sched_torture_read_lock(void)
670{
671 preempt_disable();
672 return 0;
673}
674
675static void sched_torture_read_unlock(int idx)
676{
677 preempt_enable();
678}
679
Paul E. McKenney23269742008-05-12 21:21:05 +0200680static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
681{
682 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
683}
684
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700685static struct rcu_torture_ops sched_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800686 .ttype = RCU_SCHED_FLAVOR,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700687 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700688 .readlock = sched_torture_read_lock,
689 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
690 .readunlock = sched_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700691 .get_gp_seq = rcu_sched_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700692 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700693 .deferred_free = rcu_sched_torture_deferred_free,
Paul E. McKenneybdf2a432011-06-07 16:59:35 -0700694 .sync = synchronize_sched,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700695 .exp_sync = synchronize_sched_expedited,
Paul E. McKenney24560052015-05-30 10:11:24 -0700696 .get_state = get_state_synchronize_sched,
697 .cond_sync = cond_synchronize_sched,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700698 .call = call_rcu_sched,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700699 .cb_barrier = rcu_barrier_sched,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800700 .fqs = rcu_sched_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700701 .stats = NULL,
702 .irq_capable = 1,
Paul E. McKenney2397d072018-05-25 07:29:25 -0700703 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700704 .name = "sched"
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700705};
706
Paul E. McKenney69c60452014-07-01 11:59:36 -0700707/*
708 * Definitions for RCU-tasks torture testing.
709 */
710
711static int tasks_torture_read_lock(void)
712{
713 return 0;
714}
715
716static void tasks_torture_read_unlock(int idx)
717{
718}
719
720static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
721{
722 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
723}
724
725static struct rcu_torture_ops tasks_ops = {
726 .ttype = RCU_TASKS_FLAVOR,
727 .init = rcu_sync_torture_init,
728 .readlock = tasks_torture_read_lock,
729 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
730 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700731 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700732 .deferred_free = rcu_tasks_torture_deferred_free,
733 .sync = synchronize_rcu_tasks,
734 .exp_sync = synchronize_rcu_tasks,
735 .call = call_rcu_tasks,
736 .cb_barrier = rcu_barrier_tasks,
737 .fqs = NULL,
738 .stats = NULL,
739 .irq_capable = 1,
740 .name = "tasks"
741};
742
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700743static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
744{
745 if (!cur_ops->gp_diff)
746 return new - old;
747 return cur_ops->gp_diff(new, old);
748}
749
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700750static bool __maybe_unused torturing_tasks(void)
751{
752 return cur_ops == &tasks_ops;
753}
754
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700755/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700756 * RCU torture priority-boost testing. Runs one real-time thread per
757 * CPU for moderate bursts, repeatedly registering RCU callbacks and
758 * spinning waiting for them to be invoked. If a given callback takes
759 * too long to be invoked, we assume that priority inversion has occurred.
760 */
761
762struct rcu_boost_inflight {
763 struct rcu_head rcu;
764 int inflight;
765};
766
767static void rcu_torture_boost_cb(struct rcu_head *head)
768{
769 struct rcu_boost_inflight *rbip =
770 container_of(head, struct rcu_boost_inflight, rcu);
771
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700772 /* Ensure RCU-core accesses precede clearing ->inflight */
773 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700774}
775
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700776static int old_rt_runtime = -1;
777
778static void rcu_torture_disable_rt_throttle(void)
779{
780 /*
781 * Disable RT throttling so that rcutorture's boost threads don't get
782 * throttled. Only possible if rcutorture is built-in otherwise the
783 * user should manually do this by setting the sched_rt_period_us and
784 * sched_rt_runtime sysctls.
785 */
786 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
787 return;
788
789 old_rt_runtime = sysctl_sched_rt_runtime;
790 sysctl_sched_rt_runtime = -1;
791}
792
793static void rcu_torture_enable_rt_throttle(void)
794{
795 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
796 return;
797
798 sysctl_sched_rt_runtime = old_rt_runtime;
799 old_rt_runtime = -1;
800}
801
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700802static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
803{
804 if (end - start > test_boost_duration * HZ - HZ / 2) {
805 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
806 n_rcu_torture_boost_failure++;
807
808 return true; /* failed */
809 }
810
811 return false; /* passed */
812}
813
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700814static int rcu_torture_boost(void *arg)
815{
816 unsigned long call_rcu_time;
817 unsigned long endtime;
818 unsigned long oldstarttime;
819 struct rcu_boost_inflight rbi = { .inflight = 0 };
820 struct sched_param sp;
821
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800822 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700823
824 /* Set real-time priority. */
825 sp.sched_priority = 1;
826 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800827 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700828 n_rcu_torture_boost_rterror++;
829 }
830
Paul E. McKenney561190e2011-03-30 09:10:44 -0700831 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700832 /* Each pass through the following loop does one boost-test cycle. */
833 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700834 /* Track if the test failed already in this test interval? */
835 bool failed = false;
836
837 /* Increment n_rcu_torture_boosts once per boost-test */
838 while (!kthread_should_stop()) {
839 if (mutex_trylock(&boost_mutex)) {
840 n_rcu_torture_boosts++;
841 mutex_unlock(&boost_mutex);
842 break;
843 }
844 schedule_timeout_uninterruptible(1);
845 }
846 if (kthread_should_stop())
847 goto checkwait;
848
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700849 /* Wait for the next test interval. */
850 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700851 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800852 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800853 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800854 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700855 goto checkwait;
856 }
857
858 /* Do one boost-test interval. */
859 endtime = oldstarttime + test_boost_duration * HZ;
860 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700861 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700862 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700863 if (!smp_load_acquire(&rbi.inflight)) {
864 /* RCU core before ->inflight = 1. */
865 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700866 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700867 /* Check if the boost test failed */
868 failed = failed ||
869 rcu_torture_boost_failed(call_rcu_time,
870 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700871 call_rcu_time = jiffies;
872 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800873 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800874 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700875 goto checkwait;
876 }
877
878 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700879 * If boost never happened, then inflight will always be 1, in
880 * this case the boost check would never happen in the above
881 * loop so do another one here.
882 */
883 if (!failed && smp_load_acquire(&rbi.inflight))
884 rcu_torture_boost_failed(call_rcu_time, jiffies);
885
886 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700887 * Set the start time of the next test interval.
888 * Yes, this is vulnerable to long delays, but such
889 * delays simply cause a false negative for the next
890 * interval. Besides, we are running at RT priority,
891 * so delays should be relatively rare.
892 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700893 while (oldstarttime == boost_starttime &&
894 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700895 if (mutex_trylock(&boost_mutex)) {
896 boost_starttime = jiffies +
897 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700898 mutex_unlock(&boost_mutex);
899 break;
900 }
901 schedule_timeout_uninterruptible(1);
902 }
903
904 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800905checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800906 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700907
908 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700909 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800910 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700911 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800912 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700913 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800914 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700915 return 0;
916}
917
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700918static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
919{
920}
921
922/*
923 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
924 * to call_rcu() or analogous, increasing the probability of occurrence
925 * of callback-overflow corner cases.
926 */
927static int
928rcu_torture_cbflood(void *arg)
929{
930 int err = 1;
931 int i;
932 int j;
933 struct rcu_head *rhp;
934
935 if (cbflood_n_per_burst > 0 &&
936 cbflood_inter_holdoff > 0 &&
937 cbflood_intra_holdoff > 0 &&
938 cur_ops->call &&
939 cur_ops->cb_barrier) {
Kees Cook42bc47b2018-06-12 14:27:11 -0700940 rhp = vmalloc(array3_size(cbflood_n_burst,
941 cbflood_n_per_burst,
942 sizeof(*rhp)));
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700943 err = !rhp;
944 }
945 if (err) {
946 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
Paul E. McKenney3a0af332015-06-22 18:11:31 -0700947 goto wait_for_stop;
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700948 }
949 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
950 do {
951 schedule_timeout_interruptible(cbflood_inter_holdoff);
952 atomic_long_inc(&n_cbfloods);
953 WARN_ON(signal_pending(current));
954 for (i = 0; i < cbflood_n_burst; i++) {
955 for (j = 0; j < cbflood_n_per_burst; j++) {
956 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
957 rcu_torture_cbflood_cb);
958 }
959 schedule_timeout_interruptible(cbflood_intra_holdoff);
960 WARN_ON(signal_pending(current));
961 }
962 cur_ops->cb_barrier();
963 stutter_wait("rcu_torture_cbflood");
964 } while (!torture_must_stop());
Paul E. McKenneyb8969d12014-10-27 15:52:04 -0700965 vfree(rhp);
Paul E. McKenney3a0af332015-06-22 18:11:31 -0700966wait_for_stop:
Paul E. McKenney38706bc2014-08-18 21:12:17 -0700967 torture_kthread_stopping("rcu_torture_cbflood");
968 return 0;
969}
970
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700971/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800972 * RCU torture force-quiescent-state kthread. Repeatedly induces
973 * bursts of calls to force_quiescent_state(), increasing the probability
974 * of occurrence of some important types of race conditions.
975 */
976static int
977rcu_torture_fqs(void *arg)
978{
979 unsigned long fqs_resume_time;
980 int fqs_burst_remaining;
981
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800982 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800983 do {
984 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700985 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
986 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800987 schedule_timeout_interruptible(1);
988 }
989 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700990 while (fqs_burst_remaining > 0 &&
991 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800992 cur_ops->fqs();
993 udelay(fqs_holdoff);
994 fqs_burst_remaining -= fqs_holdoff;
995 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800996 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800997 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800998 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800999 return 0;
1000}
1001
1002/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001003 * RCU torture writer kthread. Repeatedly substitutes a new structure
1004 * for that pointed to by rcu_torture_current, freeing the old structure
1005 * after a series of grace periods (the "pipeline").
1006 */
1007static int
1008rcu_torture_writer(void *arg)
1009{
Paul E. McKenney9efafb82015-12-31 18:11:47 -08001010 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001011 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001012 unsigned long gp_snap;
1013 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001014 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001015 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001016 struct rcu_torture *rp;
1017 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001018 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001019 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1020 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001021 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001022
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001023 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001024 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001025 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001026 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -08001027 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001028
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001029 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -08001030 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001031 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001032 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001033 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001034 pr_info("%s: Testing conditional GPs.\n", __func__);
1035 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001036 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001037 }
1038 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001039 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001040 pr_info("%s: Testing expedited GPs.\n", __func__);
1041 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001042 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001043 }
1044 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001045 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001046 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1047 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001048 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001049 }
1050 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001051 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001052 pr_info("%s: Testing normal GPs.\n", __func__);
1053 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001054 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -08001055 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001056 if (WARN_ONCE(nsynctypes == 0,
1057 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001058 /*
1059 * No updates primitives, so don't try updating.
1060 * The resulting test won't be testing much, hence the
1061 * above WARN_ONCE().
1062 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001063 rcu_torture_writer_state = RTWS_STOPPING;
1064 torture_kthread_stopping("rcu_torture_writer");
1065 }
1066
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001067 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001068 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001069 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001070 rp = rcu_torture_alloc();
1071 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001072 continue;
1073 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001074 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001075 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001076 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001077 old_rp = rcu_dereference_check(rcu_torture_current,
1078 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001079 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001080 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001081 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001082 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001083 i = old_rp->rtort_pipe_count;
1084 if (i > RCU_TORTURE_PIPE_LEN)
1085 i = RCU_TORTURE_PIPE_LEN;
1086 atomic_inc(&rcu_torture_wcount[i]);
1087 old_rp->rtort_pipe_count++;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001088 switch (synctype[torture_random(&rand) % nsynctypes]) {
1089 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001090 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001091 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001092 break;
1093 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001094 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001095 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001096 rcu_torture_pipe_update(old_rp);
1097 break;
1098 case RTWS_COND_GET:
1099 rcu_torture_writer_state = RTWS_COND_GET;
1100 gp_snap = cur_ops->get_state();
1101 i = torture_random(&rand) % 16;
1102 if (i != 0)
1103 schedule_timeout_interruptible(i);
1104 udelay(torture_random(&rand) % 1000);
1105 rcu_torture_writer_state = RTWS_COND_SYNC;
1106 cur_ops->cond_sync(gp_snap);
1107 rcu_torture_pipe_update(old_rp);
1108 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001109 case RTWS_SYNC:
1110 rcu_torture_writer_state = RTWS_SYNC;
1111 cur_ops->sync();
1112 rcu_torture_pipe_update(old_rp);
1113 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001114 default:
1115 WARN_ON_ONCE(1);
1116 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001117 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001118 }
Paul E. McKenney6bea2cc2018-05-16 15:30:36 -07001119 rcu_torture_current_version++;
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001120 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1121 if (can_expedite &&
1122 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1123 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1124 if (expediting >= 0)
1125 rcu_expedite_gp();
1126 else
1127 rcu_unexpedite_gp();
1128 if (++expediting > 3)
1129 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001130 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1131 can_expedite = !rcu_gp_is_expedited() &&
1132 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001133 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001134 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001135 stutter_wait("rcu_torture_writer");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001136 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001137 /* Reset expediting back to unexpedited. */
1138 if (expediting > 0)
1139 expediting = -expediting;
1140 while (can_expedite && expediting++ < 0)
1141 rcu_unexpedite_gp();
1142 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001143 if (!can_expedite)
1144 pr_alert("%s" TORTURE_FLAG
1145 " Dynamic grace-period expediting was disabled.\n",
1146 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001147 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001148 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001149 return 0;
1150}
1151
1152/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001153 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1154 * delay between calls.
1155 */
1156static int
1157rcu_torture_fakewriter(void *arg)
1158{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001159 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001160
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001161 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001162 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001163
1164 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001165 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1166 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001167 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001168 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001169 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001170 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001171 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001172 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001173 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001174 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001175 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001176 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001177 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001178 cur_ops->exp_sync();
1179 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001180 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001181 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001182
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001183 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001184 return 0;
1185}
1186
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001187static void rcu_torture_timer_cb(struct rcu_head *rhp)
1188{
1189 kfree(rhp);
1190}
1191
Josh Triplettb772e1d2006-10-04 02:17:13 -07001192/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001193 * Do one extension of an RCU read-side critical section using the
1194 * current reader state in readstate (set to zero for initial entry
1195 * to extended critical section), set the new state as specified by
1196 * newstate (set to zero for final exit from extended critical section),
1197 * and random-number-generator state in trsp. If this is neither the
1198 * beginning or end of the critical section and if there was actually a
1199 * change, do a ->read_delay().
1200 */
1201static void rcutorture_one_extend(int *readstate, int newstate,
1202 struct torture_random_state *trsp)
1203{
1204 int idxnew = -1;
1205 int idxold = *readstate;
1206 int statesnew = ~*readstate & newstate;
1207 int statesold = *readstate & ~newstate;
1208
1209 WARN_ON_ONCE(idxold < 0);
1210 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1211
1212 /* First, put new protection in place to avoid critical-section gap. */
1213 if (statesnew & RCUTORTURE_RDR_BH)
1214 local_bh_disable();
1215 if (statesnew & RCUTORTURE_RDR_IRQ)
1216 local_irq_disable();
1217 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1218 preempt_disable();
1219 if (statesnew & RCUTORTURE_RDR_RCU)
1220 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1221
1222 /* Next, remove old protection, irq first due to bh conflict. */
1223 if (statesold & RCUTORTURE_RDR_IRQ)
1224 local_irq_enable();
1225 if (statesold & RCUTORTURE_RDR_BH)
1226 local_bh_enable();
1227 if (statesold & RCUTORTURE_RDR_PREEMPT)
1228 preempt_enable();
1229 if (statesold & RCUTORTURE_RDR_RCU)
1230 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1231
1232 /* Delay if neither beginning nor end and there was a change. */
1233 if ((statesnew || statesold) && *readstate && newstate)
1234 cur_ops->read_delay(trsp);
1235
1236 /* Update the reader state. */
1237 if (idxnew == -1)
1238 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1239 WARN_ON_ONCE(idxnew < 0);
1240 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1241 *readstate = idxnew | newstate;
1242 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1243 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1244}
1245
1246/* Return the biggest extendables mask given current RCU and boot parameters. */
1247static int rcutorture_extend_mask_max(void)
1248{
1249 int mask;
1250
1251 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1252 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1253 mask = mask | RCUTORTURE_RDR_RCU;
1254 return mask;
1255}
1256
1257/* Return a random protection state mask, but with at least one bit set. */
1258static int
1259rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1260{
1261 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001262 unsigned long randmask1 = torture_random(trsp) >> 8;
1263 unsigned long randmask2 = randmask1 >> 1;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001264
1265 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001266 /* Half the time lots of bits, half the time only one bit. */
1267 if (randmask1 & 0x1)
1268 mask = mask & randmask2;
1269 else
1270 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2397d072018-05-25 07:29:25 -07001271 if ((mask & RCUTORTURE_RDR_IRQ) &&
1272 !(mask & RCUTORTURE_RDR_BH) &&
1273 (oldmask & RCUTORTURE_RDR_BH))
1274 mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
1275 if ((mask & RCUTORTURE_RDR_IRQ) &&
1276 !(mask & cur_ops->ext_irq_conflict) &&
1277 (oldmask & cur_ops->ext_irq_conflict))
1278 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1279 return mask ?: RCUTORTURE_RDR_RCU;
1280}
1281
1282/*
1283 * Do a randomly selected number of extensions of an existing RCU read-side
1284 * critical section.
1285 */
1286static void rcutorture_loop_extend(int *readstate,
1287 struct torture_random_state *trsp)
1288{
1289 int i;
1290 int mask = rcutorture_extend_mask_max();
1291
1292 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1293 if (!((mask - 1) & mask))
1294 return; /* Current RCU flavor not extendable. */
1295 i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
1296 while (i--) {
1297 mask = rcutorture_extend_mask(*readstate, trsp);
1298 rcutorture_one_extend(readstate, mask, trsp);
1299 }
1300}
1301
1302/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001303 * Do one read-side critical section, returning false if there was
1304 * no data to read. Can be invoked both from process context and
1305 * from a timer handler.
1306 */
1307static bool rcu_torture_one_read(struct torture_random_state *trsp)
1308{
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001309 unsigned long started;
1310 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001311 int newstate;
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001312 struct rcu_torture *p;
1313 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001314 int readstate = 0;
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001315 unsigned long long ts;
1316
Paul E. McKenney2397d072018-05-25 07:29:25 -07001317 newstate = rcutorture_extend_mask(readstate, trsp);
1318 rcutorture_one_extend(&readstate, newstate, trsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001319 started = cur_ops->get_gp_seq();
1320 ts = rcu_trace_clock_local();
1321 p = rcu_dereference_check(rcu_torture_current,
1322 rcu_read_lock_bh_held() ||
1323 rcu_read_lock_sched_held() ||
1324 srcu_read_lock_held(srcu_ctlp) ||
1325 torturing_tasks());
1326 if (p == NULL) {
1327 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001328 rcutorture_one_extend(&readstate, 0, trsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001329 return false;
1330 }
1331 if (p->rtort_mbtest == 0)
1332 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001333 rcutorture_loop_extend(&readstate, trsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001334 preempt_disable();
1335 pipe_count = p->rtort_pipe_count;
1336 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1337 /* Should not happen, but... */
1338 pipe_count = RCU_TORTURE_PIPE_LEN;
1339 }
1340 completed = cur_ops->get_gp_seq();
1341 if (pipe_count > 1) {
1342 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1343 ts, started, completed);
1344 rcu_ftrace_dump(DUMP_ALL);
1345 }
1346 __this_cpu_inc(rcu_torture_count[pipe_count]);
1347 completed = rcutorture_seq_diff(completed, started);
1348 if (completed > RCU_TORTURE_PIPE_LEN) {
1349 /* Should not happen, but... */
1350 completed = RCU_TORTURE_PIPE_LEN;
1351 }
1352 __this_cpu_inc(rcu_torture_batch[completed]);
1353 preempt_enable();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001354 rcutorture_one_extend(&readstate, 0, trsp);
1355 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001356 return true;
1357}
1358
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001359static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1360
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001361/*
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001362 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1363 * incrementing the corresponding element of the pipeline array. The
1364 * counter in the element should never be greater than 1, otherwise, the
1365 * RCU implementation is broken.
1366 */
Kees Cookfd30b712017-10-22 17:58:54 -07001367static void rcu_torture_timer(struct timer_list *unused)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001368{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001369 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001370 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001371
1372 /* Test call_rcu() invocation from interrupt handler. */
1373 if (cur_ops->call) {
1374 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1375
1376 if (rhp)
1377 cur_ops->call(rhp, rcu_torture_timer_cb);
1378 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001379}
1380
1381/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001382 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1383 * incrementing the corresponding element of the pipeline array. The
1384 * counter in the element should never be greater than 1, otherwise, the
1385 * RCU implementation is broken.
1386 */
1387static int
1388rcu_torture_reader(void *arg)
1389{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001390 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001391 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001392
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001393 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001394 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001395 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001396 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001397
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001398 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001399 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001400 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001401 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001402 }
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001403 if (!rcu_torture_one_read(&rand))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001404 schedule_timeout_interruptible(HZ);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001405 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001406 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001407 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001408 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001409 destroy_timer_on_stack(&t);
1410 }
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001411 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001412 return 0;
1413}
1414
1415/*
Joe Percheseea203f2014-07-14 09:16:15 -04001416 * Print torture statistics. Caller must ensure that there is only
1417 * one call to this function at a given time!!! This is normally
1418 * accomplished by relying on the module system to only have one copy
1419 * of the module loaded, and then by giving the rcu_torture_stats
1420 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1421 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001422 */
Chen Gangd1008952013-11-07 10:30:25 +08001423static void
Joe Percheseea203f2014-07-14 09:16:15 -04001424rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001425{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001426 int cpu;
1427 int i;
1428 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1429 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001430 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001431 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001432 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001433
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001434 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001435 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1436 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1437 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1438 }
1439 }
1440 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1441 if (pipesummary[i] != 0)
1442 break;
1443 }
Joe Percheseea203f2014-07-14 09:16:15 -04001444
1445 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1446 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1447 rcu_torture_current,
1448 rcu_torture_current_version,
1449 list_empty(&rcu_torture_freelist),
1450 atomic_read(&n_rcu_torture_alloc),
1451 atomic_read(&n_rcu_torture_alloc_fail),
1452 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001453 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001454 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001455 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001456 n_rcu_torture_boost_ktrerror,
1457 n_rcu_torture_boost_rterror);
1458 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1459 n_rcu_torture_boost_failure,
1460 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001461 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001462 torture_onoff_stats();
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001463 pr_cont("barrier: %ld/%ld:%ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001464 n_barrier_successes,
1465 n_barrier_attempts,
1466 n_rcu_torture_barrier_error);
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001467 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
Joe Percheseea203f2014-07-14 09:16:15 -04001468
1469 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001470 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001471 n_rcu_torture_barrier_error != 0 ||
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001472 n_rcu_torture_boost_ktrerror != 0 ||
1473 n_rcu_torture_boost_rterror != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001474 n_rcu_torture_boost_failure != 0 ||
1475 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001476 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001477 atomic_inc(&n_rcu_torture_error);
Ingo Molnar5af970a2008-06-18 10:09:48 +02001478 WARN_ON_ONCE(1);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001479 }
Joe Percheseea203f2014-07-14 09:16:15 -04001480 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001481 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001482 pr_cont(" %ld", pipesummary[i]);
1483 pr_cont("\n");
1484
1485 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1486 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001487 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001488 pr_cont(" %ld", batchsummary[i]);
1489 pr_cont("\n");
1490
1491 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1492 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001493 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001494 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001495 }
Joe Percheseea203f2014-07-14 09:16:15 -04001496 pr_cont("\n");
1497
Josh Triplettc8e5b162007-05-08 00:33:20 -07001498 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001499 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001500 if (rtcv_snap == rcu_torture_current_version &&
1501 rcu_torture_current != NULL) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001502 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001503 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001504
1505 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001506 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001507 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001508 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001509 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001510 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001511 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001512 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001513 wtp == NULL ? ~0UL : wtp->state,
1514 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001515 if (!splatted && wtp) {
1516 sched_show_task(wtp);
1517 splatted = true;
1518 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001519 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001520 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001521 }
1522 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001523}
1524
1525/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001526 * Periodically prints torture statistics, if periodic statistics printing
1527 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001528 */
1529static int
1530rcu_torture_stats(void *arg)
1531{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001532 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001533 do {
1534 schedule_timeout_interruptible(stat_interval * HZ);
1535 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001536 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001537 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001538 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001539 return 0;
1540}
1541
Paul E. McKenney95c38322006-03-24 03:15:58 -08001542static inline void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001543rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001544{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001545 pr_alert("%s" TORTURE_FLAG
1546 "--- %s: nreaders=%d nfakewriters=%d "
1547 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1548 "shuffle_interval=%d stutter=%d irqreader=%d "
1549 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1550 "test_boost=%d/%d test_boost_interval=%d "
1551 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001552 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001553 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001554 "onoff_interval=%d onoff_holdoff=%d\n",
1555 torture_type, tag, nrealreaders, nfakewriters,
1556 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1557 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1558 test_boost, cur_ops->can_boost,
1559 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001560 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001561 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001562 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001563}
1564
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001565static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001566{
1567 struct task_struct *t;
1568
1569 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001570 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001571 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001572 t = boost_tasks[cpu];
1573 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001574 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001575 mutex_unlock(&boost_mutex);
1576
1577 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001578 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001579 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001580}
1581
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001582static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001583{
1584 int retval;
1585
1586 if (boost_tasks[cpu] != NULL)
1587 return 0; /* Already created, nothing more to do. */
1588
1589 /* Don't allow time recalculation while creating a new task. */
1590 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001591 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001592 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001593 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1594 cpu_to_node(cpu),
1595 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001596 if (IS_ERR(boost_tasks[cpu])) {
1597 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001598 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001599 n_rcu_torture_boost_ktrerror++;
1600 boost_tasks[cpu] = NULL;
1601 mutex_unlock(&boost_mutex);
1602 return retval;
1603 }
1604 kthread_bind(boost_tasks[cpu], cpu);
1605 wake_up_process(boost_tasks[cpu]);
1606 mutex_unlock(&boost_mutex);
1607 return 0;
1608}
1609
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001610/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001611 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1612 * induces a CPU stall for the time specified by stall_cpu.
1613 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001614static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001615{
1616 unsigned long stop_at;
1617
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001618 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001619 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001620 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001621 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001622 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001623 }
1624 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001625 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001626 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001627 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001628 if (stall_cpu_irqsoff)
1629 local_irq_disable();
1630 else
1631 preempt_disable();
1632 pr_alert("rcu_torture_stall start on CPU %d.\n",
1633 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001634 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1635 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001636 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001637 if (stall_cpu_irqsoff)
1638 local_irq_enable();
1639 else
1640 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001641 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001642 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001643 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001644 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001645 while (!kthread_should_stop())
1646 schedule_timeout_interruptible(10 * HZ);
1647 return 0;
1648}
1649
1650/* Spawn CPU-stall kthread, if stall_cpu specified. */
1651static int __init rcu_torture_stall_init(void)
1652{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001653 if (stall_cpu <= 0)
1654 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001655 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001656}
1657
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001658/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05301659static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001660{
1661 atomic_inc(&barrier_cbs_invoked);
1662}
1663
1664/* kthread function to register callbacks used to test RCU barriers. */
1665static int rcu_torture_barrier_cbs(void *arg)
1666{
1667 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07001668 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001669 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001670 struct rcu_head rcu;
1671
1672 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001673 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001674 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001675 do {
1676 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001677 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001678 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001679 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001680 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001681 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001682 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001683 /*
1684 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07001685 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001686 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001687 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001688 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001689 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001690 if (atomic_dec_and_test(&barrier_cbs_count))
1691 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001692 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07001693 if (cur_ops->cb_barrier != NULL)
1694 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001695 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001696 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001697 return 0;
1698}
1699
1700/* kthread function to drive and coordinate RCU barrier testing. */
1701static int rcu_torture_barrier(void *arg)
1702{
1703 int i;
1704
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001705 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001706 do {
1707 atomic_set(&barrier_cbs_invoked, 0);
1708 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001709 /* Ensure barrier_phase ordered after prior assignments. */
1710 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001711 for (i = 0; i < n_barrier_cbs; i++)
1712 wake_up(&barrier_cbs_wq[i]);
1713 wait_event(barrier_wq,
1714 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001715 torture_must_stop());
1716 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001717 break;
1718 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001719 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001720 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1721 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08001722 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1723 atomic_read(&barrier_cbs_invoked),
1724 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001725 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001726 } else {
1727 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001728 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001729 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001730 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001731 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001732 return 0;
1733}
1734
1735/* Initialize RCU barrier testing. */
1736static int rcu_torture_barrier_init(void)
1737{
1738 int i;
1739 int ret;
1740
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07001741 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001742 return 0;
1743 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001744 pr_alert("%s" TORTURE_FLAG
1745 " Call or barrier ops missing for %s,\n",
1746 torture_type, cur_ops->name);
1747 pr_alert("%s" TORTURE_FLAG
1748 " RCU barrier testing omitted from run.\n",
1749 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001750 return 0;
1751 }
1752 atomic_set(&barrier_cbs_count, 0);
1753 atomic_set(&barrier_cbs_invoked, 0);
1754 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001755 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001756 GFP_KERNEL);
1757 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001758 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05001759 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001760 return -ENOMEM;
1761 for (i = 0; i < n_barrier_cbs; i++) {
1762 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001763 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1764 (void *)(long)i,
1765 barrier_cbs_tasks[i]);
1766 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001767 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001768 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001769 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001770}
1771
1772/* Clean up after RCU barrier testing. */
1773static void rcu_torture_barrier_cleanup(void)
1774{
1775 int i;
1776
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001777 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001778 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001779 for (i = 0; i < n_barrier_cbs; i++)
1780 torture_stop_kthread(rcu_torture_barrier_cbs,
1781 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001782 kfree(barrier_cbs_tasks);
1783 barrier_cbs_tasks = NULL;
1784 }
1785 if (barrier_cbs_wq != NULL) {
1786 kfree(barrier_cbs_wq);
1787 barrier_cbs_wq = NULL;
1788 }
1789}
1790
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001791static bool rcu_torture_can_boost(void)
1792{
1793 static int boost_warn_once;
1794 int prio;
1795
1796 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
1797 return false;
1798
1799 prio = rcu_get_gp_kthreads_prio();
1800 if (!prio)
1801 return false;
1802
1803 if (prio < 2) {
1804 if (boost_warn_once == 1)
1805 return false;
1806
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001807 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001808 boost_warn_once = 1;
1809 return false;
1810 }
1811
1812 return true;
1813}
1814
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001815static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001816
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001817static void
1818rcu_torture_cleanup(void)
1819{
Paul E. McKenney034777d2018-04-19 08:43:11 -07001820 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001821 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001822 int i;
1823
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07001824 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08001825 if (cur_ops->cb_barrier != NULL)
1826 cur_ops->cb_barrier();
1827 return;
1828 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08001829
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001830 rcu_torture_barrier_cleanup();
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001831 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001832 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001833
Josh Triplettc8e5b162007-05-08 00:33:20 -07001834 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001835 for (i = 0; i < nrealreaders; i++)
1836 torture_stop_kthread(rcu_torture_reader,
1837 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001838 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001839 }
1840 rcu_torture_current = NULL;
1841
Josh Triplettc8e5b162007-05-08 00:33:20 -07001842 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07001843 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001844 torture_stop_kthread(rcu_torture_fakewriter,
1845 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001846 }
1847 kfree(fakewriter_tasks);
1848 fakewriter_tasks = NULL;
1849 }
1850
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001851 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
1852 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
1853 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
1854 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001855 torture_stop_kthread(rcu_torture_stats, stats_task);
1856 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Paul E. McKenney38706bc2014-08-18 21:12:17 -07001857 for (i = 0; i < ncbflooders; i++)
1858 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001859 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001860 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001861
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07001862 /*
1863 * Wait for all RCU callbacks to fire, then do flavor-specific
1864 * cleanup operations.
1865 */
Paul E. McKenney23269742008-05-12 21:21:05 +02001866 if (cur_ops->cb_barrier != NULL)
1867 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07001868 if (cur_ops->cleanup != NULL)
1869 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001870
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001871 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001872
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001873 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001874 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08001875 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08001876 rcu_torture_print_module_parms(cur_ops,
1877 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08001878 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001879 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07001880 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001881}
1882
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001883#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1884static void rcu_torture_leak_cb(struct rcu_head *rhp)
1885{
1886}
1887
1888static void rcu_torture_err_cb(struct rcu_head *rhp)
1889{
1890 /*
1891 * This -might- happen due to race conditions, but is unlikely.
1892 * The scenario that leads to this happening is that the
1893 * first of the pair of duplicate callbacks is queued,
1894 * someone else starts a grace period that includes that
1895 * callback, then the second of the pair must wait for the
1896 * next grace period. Unlikely, but can happen. If it
1897 * does happen, the debug-objects subsystem won't have splatted.
1898 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001899 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001900}
1901#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1902
1903/*
1904 * Verify that double-free causes debug-objects to complain, but only
1905 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1906 * cannot be carried out.
1907 */
1908static void rcu_test_debug_objects(void)
1909{
1910#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1911 struct rcu_head rh1;
1912 struct rcu_head rh2;
1913
1914 init_rcu_head_on_stack(&rh1);
1915 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001916 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001917
1918 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1919 preempt_disable(); /* Prevent preemption from interrupting test. */
1920 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1921 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1922 local_irq_disable(); /* Make it harder to start a new grace period. */
1923 call_rcu(&rh2, rcu_torture_leak_cb);
1924 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1925 local_irq_enable();
1926 rcu_read_unlock();
1927 preempt_enable();
1928
1929 /* Wait for them all to get done so we can safely return. */
1930 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001931 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001932 destroy_rcu_head_on_stack(&rh1);
1933 destroy_rcu_head_on_stack(&rh2);
1934#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08001935 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07001936#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1937}
1938
Josh Triplett6f8bc5002007-05-08 00:25:24 -07001939static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001940rcu_torture_init(void)
1941{
1942 int i;
1943 int cpu;
1944 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001945 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07001946 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenney2397d072018-05-25 07:29:25 -07001947 &busted_srcud_ops, &sched_ops, &tasks_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001948 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001949
Paul E. McKenneya2f25772017-11-21 20:19:17 -08001950 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07001951 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08001952
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001953 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07001954 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001955 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07001956 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001957 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001958 }
Josh Triplettade5fb82007-05-08 00:33:22 -07001959 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001960 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1961 torture_type);
1962 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07001963 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001964 pr_alert(" %s", torture_ops[i]->name);
1965 pr_alert("\n");
Paul E. McKenney889d4872015-08-24 11:37:58 -07001966 firsterr = -EINVAL;
1967 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001968 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001969 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001970 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08001971 fqs_duration = 0;
1972 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07001973 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07001974 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001975
Paul E. McKenney64e4b432014-03-12 10:26:35 -07001976 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001977 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07001978 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07001979 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07001980 if (nrealreaders <= 0)
1981 nrealreaders = 1;
1982 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001983 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001984
1985 /* Set up the freelist. */
1986
1987 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07001988 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08001989 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001990 list_add_tail(&rcu_tortures[i].rtort_free,
1991 &rcu_torture_freelist);
1992 }
1993
1994 /* Initialize the statistics so that each run gets its own numbers. */
1995
1996 rcu_torture_current = NULL;
1997 rcu_torture_current_version = 0;
1998 atomic_set(&n_rcu_torture_alloc, 0);
1999 atomic_set(&n_rcu_torture_alloc_fail, 0);
2000 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002001 atomic_set(&n_rcu_torture_mberror, 0);
2002 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002003 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002004 n_rcu_torture_boost_ktrerror = 0;
2005 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002006 n_rcu_torture_boost_failure = 0;
2007 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002008 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2009 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002010 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002011 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2012 per_cpu(rcu_torture_count, cpu)[i] = 0;
2013 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2014 }
2015 }
2016
2017 /* Start up the kthreads. */
2018
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002019 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2020 writer_task);
2021 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002022 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002023 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002024 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002025 sizeof(fakewriter_tasks[0]),
2026 GFP_KERNEL);
2027 if (fakewriter_tasks == NULL) {
2028 VERBOSE_TOROUT_ERRSTRING("out of memory");
2029 firsterr = -ENOMEM;
2030 goto unwind;
2031 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002032 }
2033 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002034 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2035 NULL, fakewriter_tasks[i]);
2036 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002037 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002038 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002039 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002040 GFP_KERNEL);
2041 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002042 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002043 firsterr = -ENOMEM;
2044 goto unwind;
2045 }
2046 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002047 firsterr = torture_create_kthread(rcu_torture_reader, NULL,
2048 reader_tasks[i]);
2049 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002050 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002051 }
2052 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002053 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2054 stats_task);
2055 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002056 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002057 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002058 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002059 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2060 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002061 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002062 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002063 if (stutter < 0)
2064 stutter = 0;
2065 if (stutter) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002066 firsterr = torture_stutter_init(stutter * HZ);
2067 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002068 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002069 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002070 if (fqs_duration < 0)
2071 fqs_duration = 0;
2072 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002073 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002074 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2075 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002076 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002077 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002078 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002079 if (test_boost_interval < 1)
2080 test_boost_interval = 1;
2081 if (test_boost_duration < 2)
2082 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002083 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002084
2085 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002086
2087 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2088 rcutorture_booster_init,
2089 rcutorture_booster_cleanup);
2090 if (firsterr < 0)
2091 goto unwind;
2092 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002093 }
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002094 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2095 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002096 goto unwind;
Paul E. McKenney028be122018-05-08 09:20:34 -07002097 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002098 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002099 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002100 firsterr = rcu_torture_stall_init();
2101 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002102 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002103 firsterr = rcu_torture_barrier_init();
2104 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002105 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002106 if (object_debug)
2107 rcu_test_debug_objects();
Paul E. McKenney38706bc2014-08-18 21:12:17 -07002108 if (cbflood_n_burst > 0) {
2109 /* Create the cbflood threads */
2110 ncbflooders = (num_online_cpus() + 3) / 4;
2111 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
2112 GFP_KERNEL);
2113 if (!cbflood_task) {
2114 VERBOSE_TOROUT_ERRSTRING("out of memory");
2115 firsterr = -ENOMEM;
2116 goto unwind;
2117 }
2118 for (i = 0; i < ncbflooders; i++) {
2119 firsterr = torture_create_kthread(rcu_torture_cbflood,
2120 NULL,
2121 cbflood_task[i]);
2122 if (firsterr)
2123 goto unwind;
2124 }
2125 }
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002126 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002127 return 0;
2128
2129unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002130 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002131 rcu_torture_cleanup();
2132 return firsterr;
2133}
2134
2135module_init(rcu_torture_init);
2136module_exit(rcu_torture_cleanup);