blob: f28b88ecb47a02f1521a5db0387e8242c76c49f5 [file] [log] [blame]
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07002 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenneyf5604f62014-02-26 06:38:59 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenneya241ec62005-10-30 15:03:12 -080017 *
Josh Triplettb772e1d2006-10-04 02:17:13 -070018 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -080019 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -070021 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080022 *
23 * See also: Documentation/RCU/torture.txt
24 */
Paul E. McKenney60500032018-05-15 12:25:05 -070025
26#define pr_fmt(fmt) fmt
27
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/kthread.h>
33#include <linux/err.h>
34#include <linux/spinlock.h>
35#include <linux/smp.h>
36#include <linux/rcupdate.h>
37#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010038#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010039#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080041#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080042#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080046#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070047#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080049#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080050#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070051#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070052#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080053#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070054#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080055#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070056#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070057#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070058#include <linux/sched/sysctl.h>
Paul E. McKenneye0aff972018-10-01 17:40:54 -070059#include <linux/oom.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080060
Paul E. McKenney25c36322017-05-03 09:51:55 -070061#include "rcu.h"
62
Paul E. McKenneya241ec62005-10-30 15:03:12 -080063MODULE_LICENSE("GPL");
Josh Triplette0198b292014-07-30 16:08:42 -070064MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080065
Paul E. McKenney4102ada2013-10-08 20:23:47 -070066
Paul E. McKenney2397d072018-05-25 07:29:25 -070067/* Bits for ->extendables field, extendables param, and related definitions. */
68#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
69#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070070#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
71#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
72#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
73#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
74#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
75#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
76#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
77#define RCUTORTURE_MAX_EXTEND \
78 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
79 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070080#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
81 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070082#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070083
Paul E. McKenney2397d072018-05-25 07:29:25 -070084torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
85 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080086torture_param(int, fqs_duration, 0,
87 "Duration of fqs bursts (us), 0 to disable");
88torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
89torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070090torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
91torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
92torture_param(int, fwd_progress_holdoff, 60,
93 "Time between forward-progress tests (s)");
94torture_param(bool, fwd_progress_need_resched, 1,
95 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070096torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080097torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
98torture_param(bool, gp_normal, false,
99 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700100torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800101torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
102torture_param(int, n_barrier_cbs, 0,
103 "# of callbacks/kthreads for barrier testing");
104torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
105torture_param(int, nreaders, -1, "Number of RCU reader threads");
106torture_param(int, object_debug, 0,
107 "Enable debug-object double call_rcu() testing");
108torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
109torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -0700110 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800111torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
112torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
113torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
114torture_param(int, stall_cpu_holdoff, 10,
115 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700116torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800117torture_param(int, stat_interval, 60,
118 "Number of seconds between stats printk()s");
119torture_param(int, stutter, 5, "Number of seconds to run/halt test");
120torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
121torture_param(int, test_boost_duration, 4,
122 "Duration of each boost test, seconds.");
123torture_param(int, test_boost_interval, 7,
124 "Interval between boost tests, seconds.");
125torture_param(bool, test_no_idle_hz, true,
126 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700127torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800128 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800129
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800130static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800131module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700132MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700133
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800134static int nrealreaders;
135static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700136static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800137static struct task_struct **reader_tasks;
138static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800139static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700140static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800141static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700142static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800143static struct task_struct **barrier_cbs_tasks;
144static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800145
146#define RCU_TORTURE_PIPE_LEN 10
147
148struct rcu_torture {
149 struct rcu_head rtort_rcu;
150 int rtort_pipe_count;
151 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800152 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800153};
154
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800155static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700156static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700157static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800158static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
159static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800160static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
161static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800162static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700163static atomic_t n_rcu_torture_alloc;
164static atomic_t n_rcu_torture_alloc_fail;
165static atomic_t n_rcu_torture_free;
166static atomic_t n_rcu_torture_mberror;
167static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800168static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700169static long n_rcu_torture_boost_ktrerror;
170static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700171static long n_rcu_torture_boost_failure;
172static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700173static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800174static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700175static long n_barrier_successes; /* did rcu_barrier test succeed? */
Josh Triplette3033732006-10-04 02:17:14 -0700176static struct list_head rcu_torture_removed;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800177
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800178static int rcu_torture_writer_state;
179#define RTWS_FIXED_DELAY 0
180#define RTWS_DELAY 1
181#define RTWS_REPLACE 2
182#define RTWS_DEF_FREE 3
183#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700184#define RTWS_COND_GET 5
185#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700186#define RTWS_SYNC 7
187#define RTWS_STUTTER 8
188#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800189static const char * const rcu_torture_writer_state_names[] = {
190 "RTWS_FIXED_DELAY",
191 "RTWS_DELAY",
192 "RTWS_REPLACE",
193 "RTWS_DEF_FREE",
194 "RTWS_EXP_SYNC",
195 "RTWS_COND_GET",
196 "RTWS_COND_SYNC",
197 "RTWS_SYNC",
198 "RTWS_STUTTER",
199 "RTWS_STOPPING",
200};
201
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700202/* Record reader segment types and duration for first failing read. */
203struct rt_read_seg {
204 int rt_readstate;
205 unsigned long rt_delay_jiffies;
206 unsigned long rt_delay_ms;
207 unsigned long rt_delay_us;
208 bool rt_preempted;
209};
210static int err_segs_recorded;
211static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
212static int rt_read_nsegs;
213
Paul E. McKenney18aff332015-11-17 13:35:28 -0800214static const char *rcu_torture_writer_state_getname(void)
215{
216 unsigned int i = READ_ONCE(rcu_torture_writer_state);
217
218 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
219 return "???";
220 return rcu_torture_writer_state_names[i];
221}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800222
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700223#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700224#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700225#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700226#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700227#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700228
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500229#ifdef CONFIG_RCU_TRACE
230static u64 notrace rcu_trace_clock_local(void)
231{
232 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700233
234 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500235 return ts;
236}
237#else /* #ifdef CONFIG_RCU_TRACE */
238static u64 notrace rcu_trace_clock_local(void)
239{
240 return 0ULL;
241}
242#endif /* #else #ifdef CONFIG_RCU_TRACE */
243
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700244static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400245static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700246 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800247static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700248static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800249static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
250static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
251static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700252
Paul E. McKenney48718482018-08-15 15:32:51 -0700253static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
254
Paul E. McKenney343e9092008-12-15 16:13:07 -0800255/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800256 * Allocate an element from the rcu_tortures pool.
257 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800258static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800259rcu_torture_alloc(void)
260{
261 struct list_head *p;
262
Ingo Molnaradac1662006-01-25 19:50:12 +0100263 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800264 if (list_empty(&rcu_torture_freelist)) {
265 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100266 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800267 return NULL;
268 }
269 atomic_inc(&n_rcu_torture_alloc);
270 p = rcu_torture_freelist.next;
271 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100272 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800273 return container_of(p, struct rcu_torture, rtort_free);
274}
275
276/*
277 * Free an element to the rcu_tortures pool.
278 */
279static void
280rcu_torture_free(struct rcu_torture *p)
281{
282 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100283 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800284 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100285 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800286}
287
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800288/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700289 * Operations vector for selecting different types of tests.
290 */
291
292struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800293 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700294 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700295 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700296 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700297 void (*read_delay)(struct torture_random_state *rrsp,
298 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700299 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700300 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700301 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700302 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700303 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700304 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700305 unsigned long (*get_state)(void);
306 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800307 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200308 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800309 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400310 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700311 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700312 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700313 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700314 int extendables;
315 int ext_irq_conflict;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400316 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700317};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700318
319static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700320
321/*
322 * Definitions for rcu torture testing.
323 */
324
Josh Tripletta49a4af2006-09-29 01:59:30 -0700325static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700326{
327 rcu_read_lock();
328 return 0;
329}
330
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700331static void
332rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700333{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700334 unsigned long started;
335 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700336 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700337 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700338 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700339
Josh Triplettb8d57a72009-09-08 15:54:35 -0700340 /* We want a short delay sometimes to make a reader delay the grace
341 * period, and we want a long delay occasionally to trigger
342 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700343
Paul E. McKenney48718482018-08-15 15:32:51 -0700344 if (!rcu_fwd_cb_nodelay &&
345 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700346 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700347 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700348 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
349 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700350 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700351 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700352 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700353 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
354 started, completed);
355 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700356 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700357 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700358 rtrsp->rt_delay_us = shortdelay_us;
359 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800360 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700361 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700362 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700363 rtrsp->rt_preempted = true;
364 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700365}
366
Josh Tripletta49a4af2006-09-29 01:59:30 -0700367static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700368{
369 rcu_read_unlock();
370}
371
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700372/*
373 * Update callback in the pipe. This should be invoked after a grace period.
374 */
375static bool
376rcu_torture_pipe_update_one(struct rcu_torture *rp)
377{
378 int i;
379
380 i = rp->rtort_pipe_count;
381 if (i > RCU_TORTURE_PIPE_LEN)
382 i = RCU_TORTURE_PIPE_LEN;
383 atomic_inc(&rcu_torture_wcount[i]);
384 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
385 rp->rtort_mbtest = 0;
386 return true;
387 }
388 return false;
389}
390
391/*
392 * Update all callbacks in the pipe. Suitable for synchronous grace-period
393 * primitives.
394 */
395static void
396rcu_torture_pipe_update(struct rcu_torture *old_rp)
397{
398 struct rcu_torture *rp;
399 struct rcu_torture *rp1;
400
401 if (old_rp)
402 list_add(&old_rp->rtort_free, &rcu_torture_removed);
403 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
404 if (rcu_torture_pipe_update_one(rp)) {
405 list_del(&rp->rtort_free);
406 rcu_torture_free(rp);
407 }
408 }
409}
410
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700411static void
412rcu_torture_cb(struct rcu_head *p)
413{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700414 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
415
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800416 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700417 /* Test is ending, just drop callbacks on the floor. */
418 /* The next initialization will pick up the pieces. */
419 return;
420 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700421 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700422 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700423 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700424 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700425}
426
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800427static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800428{
429 return 0;
430}
431
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700432static void rcu_torture_deferred_free(struct rcu_torture *p)
433{
434 call_rcu(&p->rtort_rcu, rcu_torture_cb);
435}
436
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700437static void rcu_sync_torture_init(void)
438{
439 INIT_LIST_HEAD(&rcu_torture_removed);
440}
441
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700442static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800443 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700444 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700445 .readlock = rcu_torture_read_lock,
446 .read_delay = rcu_read_delay,
447 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700448 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700449 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700450 .deferred_free = rcu_torture_deferred_free,
451 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700452 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700453 .get_state = get_state_synchronize_rcu,
454 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800455 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700456 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800457 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700458 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700459 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700460 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700461 .can_boost = rcu_can_boost(),
Paul E. McKenneyc0335742018-06-21 16:17:46 -0700462 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700463 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700464};
465
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700466/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800467 * Don't even think about trying any of these in real life!!!
468 * The names includes "busted", and they really means it!
469 * The only purpose of these functions is to provide a buggy RCU
470 * implementation to make sure that rcutorture correctly emits
471 * buggy-RCU error messages.
472 */
473static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
474{
475 /* This is a deliberate bug for testing purposes only! */
476 rcu_torture_cb(&p->rtort_rcu);
477}
478
479static void synchronize_rcu_busted(void)
480{
481 /* This is a deliberate bug for testing purposes only! */
482}
483
484static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800485call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800486{
487 /* This is a deliberate bug for testing purposes only! */
488 func(head);
489}
490
491static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800492 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800493 .init = rcu_sync_torture_init,
494 .readlock = rcu_torture_read_lock,
495 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
496 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700497 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800498 .deferred_free = rcu_busted_torture_deferred_free,
499 .sync = synchronize_rcu_busted,
500 .exp_sync = synchronize_rcu_busted,
501 .call = call_rcu_busted,
502 .cb_barrier = NULL,
503 .fqs = NULL,
504 .stats = NULL,
505 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700506 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800507};
508
509/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700510 * Definitions for srcu torture testing.
511 */
512
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800513DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700514static struct srcu_struct srcu_ctld;
515static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700516
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700517static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700518{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700519 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700520}
521
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700522static void
523srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700524{
525 long delay;
526 const long uspertick = 1000000 / HZ;
527 const long longdelay = 10;
528
529 /* We want there to be long-running readers, but not all the time. */
530
Paul E. McKenney51b11302014-01-27 11:49:39 -0800531 delay = torture_random(rrsp) %
532 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700533 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700534 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700535 rtrsp->rt_delay_jiffies = longdelay;
536 } else {
537 rcu_read_delay(rrsp, rtrsp);
538 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700539}
540
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700541static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700542{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700543 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700544}
545
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800546static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700547{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700548 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700549}
550
Lai Jiangshan9059c942012-03-19 16:12:14 +0800551static void srcu_torture_deferred_free(struct rcu_torture *rp)
552{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700553 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800554}
555
Josh Triplettb772e1d2006-10-04 02:17:13 -0700556static void srcu_torture_synchronize(void)
557{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700558 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700559}
560
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700561static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800562 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700563{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700564 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700565}
566
567static void srcu_torture_barrier(void)
568{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700569 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700570}
571
Joe Percheseea203f2014-07-14 09:16:15 -0400572static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700573{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700574 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700575}
576
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700577static void srcu_torture_synchronize_expedited(void)
578{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700579 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700580}
581
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700582static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800583 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800584 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700585 .readlock = srcu_torture_read_lock,
586 .read_delay = srcu_read_delay,
587 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700588 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800589 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700590 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700591 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700592 .call = srcu_torture_call,
593 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700594 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700595 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700596 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700597};
598
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700599static void srcu_torture_init(void)
600{
601 rcu_sync_torture_init();
602 WARN_ON(init_srcu_struct(&srcu_ctld));
603 srcu_ctlp = &srcu_ctld;
604}
605
606static void srcu_torture_cleanup(void)
607{
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700608 static DEFINE_TORTURE_RANDOM(rand);
609
610 if (torture_random(&rand) & 0x800)
611 cleanup_srcu_struct(&srcu_ctld);
612 else
613 cleanup_srcu_struct_quiesced(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700614 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
615}
616
617/* As above, but dynamically allocated. */
618static struct rcu_torture_ops srcud_ops = {
619 .ttype = SRCU_FLAVOR,
620 .init = srcu_torture_init,
621 .cleanup = srcu_torture_cleanup,
622 .readlock = srcu_torture_read_lock,
623 .read_delay = srcu_read_delay,
624 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700625 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700626 .deferred_free = srcu_torture_deferred_free,
627 .sync = srcu_torture_synchronize,
628 .exp_sync = srcu_torture_synchronize_expedited,
629 .call = srcu_torture_call,
630 .cb_barrier = srcu_torture_barrier,
631 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700632 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700633 .name = "srcud"
634};
635
Paul E. McKenney2397d072018-05-25 07:29:25 -0700636/* As above, but broken due to inappropriate reader extension. */
637static struct rcu_torture_ops busted_srcud_ops = {
638 .ttype = SRCU_FLAVOR,
639 .init = srcu_torture_init,
640 .cleanup = srcu_torture_cleanup,
641 .readlock = srcu_torture_read_lock,
642 .read_delay = rcu_read_delay,
643 .readunlock = srcu_torture_read_unlock,
644 .get_gp_seq = srcu_torture_completed,
645 .deferred_free = srcu_torture_deferred_free,
646 .sync = srcu_torture_synchronize,
647 .exp_sync = srcu_torture_synchronize_expedited,
648 .call = srcu_torture_call,
649 .cb_barrier = srcu_torture_barrier,
650 .stats = srcu_torture_stats,
651 .irq_capable = 1,
652 .extendables = RCUTORTURE_MAX_EXTEND,
653 .name = "busted_srcud"
654};
655
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700656/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700657 * Definitions for RCU-tasks torture testing.
658 */
659
660static int tasks_torture_read_lock(void)
661{
662 return 0;
663}
664
665static void tasks_torture_read_unlock(int idx)
666{
667}
668
669static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
670{
671 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
672}
673
674static struct rcu_torture_ops tasks_ops = {
675 .ttype = RCU_TASKS_FLAVOR,
676 .init = rcu_sync_torture_init,
677 .readlock = tasks_torture_read_lock,
678 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
679 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700680 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700681 .deferred_free = rcu_tasks_torture_deferred_free,
682 .sync = synchronize_rcu_tasks,
683 .exp_sync = synchronize_rcu_tasks,
684 .call = call_rcu_tasks,
685 .cb_barrier = rcu_barrier_tasks,
686 .fqs = NULL,
687 .stats = NULL,
688 .irq_capable = 1,
689 .name = "tasks"
690};
691
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700692static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
693{
694 if (!cur_ops->gp_diff)
695 return new - old;
696 return cur_ops->gp_diff(new, old);
697}
698
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700699static bool __maybe_unused torturing_tasks(void)
700{
701 return cur_ops == &tasks_ops;
702}
703
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700704/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700705 * RCU torture priority-boost testing. Runs one real-time thread per
706 * CPU for moderate bursts, repeatedly registering RCU callbacks and
707 * spinning waiting for them to be invoked. If a given callback takes
708 * too long to be invoked, we assume that priority inversion has occurred.
709 */
710
711struct rcu_boost_inflight {
712 struct rcu_head rcu;
713 int inflight;
714};
715
716static void rcu_torture_boost_cb(struct rcu_head *head)
717{
718 struct rcu_boost_inflight *rbip =
719 container_of(head, struct rcu_boost_inflight, rcu);
720
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700721 /* Ensure RCU-core accesses precede clearing ->inflight */
722 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700723}
724
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700725static int old_rt_runtime = -1;
726
727static void rcu_torture_disable_rt_throttle(void)
728{
729 /*
730 * Disable RT throttling so that rcutorture's boost threads don't get
731 * throttled. Only possible if rcutorture is built-in otherwise the
732 * user should manually do this by setting the sched_rt_period_us and
733 * sched_rt_runtime sysctls.
734 */
735 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
736 return;
737
738 old_rt_runtime = sysctl_sched_rt_runtime;
739 sysctl_sched_rt_runtime = -1;
740}
741
742static void rcu_torture_enable_rt_throttle(void)
743{
744 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
745 return;
746
747 sysctl_sched_rt_runtime = old_rt_runtime;
748 old_rt_runtime = -1;
749}
750
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700751static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
752{
753 if (end - start > test_boost_duration * HZ - HZ / 2) {
754 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
755 n_rcu_torture_boost_failure++;
756
757 return true; /* failed */
758 }
759
760 return false; /* passed */
761}
762
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700763static int rcu_torture_boost(void *arg)
764{
765 unsigned long call_rcu_time;
766 unsigned long endtime;
767 unsigned long oldstarttime;
768 struct rcu_boost_inflight rbi = { .inflight = 0 };
769 struct sched_param sp;
770
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800771 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700772
773 /* Set real-time priority. */
774 sp.sched_priority = 1;
775 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800776 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700777 n_rcu_torture_boost_rterror++;
778 }
779
Paul E. McKenney561190e2011-03-30 09:10:44 -0700780 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700781 /* Each pass through the following loop does one boost-test cycle. */
782 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700783 /* Track if the test failed already in this test interval? */
784 bool failed = false;
785
786 /* Increment n_rcu_torture_boosts once per boost-test */
787 while (!kthread_should_stop()) {
788 if (mutex_trylock(&boost_mutex)) {
789 n_rcu_torture_boosts++;
790 mutex_unlock(&boost_mutex);
791 break;
792 }
793 schedule_timeout_uninterruptible(1);
794 }
795 if (kthread_should_stop())
796 goto checkwait;
797
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700798 /* Wait for the next test interval. */
799 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700800 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800801 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800802 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800803 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700804 goto checkwait;
805 }
806
807 /* Do one boost-test interval. */
808 endtime = oldstarttime + test_boost_duration * HZ;
809 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700810 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700811 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700812 if (!smp_load_acquire(&rbi.inflight)) {
813 /* RCU core before ->inflight = 1. */
814 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700815 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700816 /* Check if the boost test failed */
817 failed = failed ||
818 rcu_torture_boost_failed(call_rcu_time,
819 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700820 call_rcu_time = jiffies;
821 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800822 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800823 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700824 goto checkwait;
825 }
826
827 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700828 * If boost never happened, then inflight will always be 1, in
829 * this case the boost check would never happen in the above
830 * loop so do another one here.
831 */
832 if (!failed && smp_load_acquire(&rbi.inflight))
833 rcu_torture_boost_failed(call_rcu_time, jiffies);
834
835 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700836 * Set the start time of the next test interval.
837 * Yes, this is vulnerable to long delays, but such
838 * delays simply cause a false negative for the next
839 * interval. Besides, we are running at RT priority,
840 * so delays should be relatively rare.
841 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700842 while (oldstarttime == boost_starttime &&
843 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700844 if (mutex_trylock(&boost_mutex)) {
845 boost_starttime = jiffies +
846 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700847 mutex_unlock(&boost_mutex);
848 break;
849 }
850 schedule_timeout_uninterruptible(1);
851 }
852
853 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800854checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800855 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700856
857 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700858 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800859 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700860 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800861 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700862 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800863 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700864 return 0;
865}
866
867/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800868 * RCU torture force-quiescent-state kthread. Repeatedly induces
869 * bursts of calls to force_quiescent_state(), increasing the probability
870 * of occurrence of some important types of race conditions.
871 */
872static int
873rcu_torture_fqs(void *arg)
874{
875 unsigned long fqs_resume_time;
876 int fqs_burst_remaining;
877
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800878 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800879 do {
880 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700881 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
882 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800883 schedule_timeout_interruptible(1);
884 }
885 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700886 while (fqs_burst_remaining > 0 &&
887 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800888 cur_ops->fqs();
889 udelay(fqs_holdoff);
890 fqs_burst_remaining -= fqs_holdoff;
891 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800892 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800893 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800894 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800895 return 0;
896}
897
898/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800899 * RCU torture writer kthread. Repeatedly substitutes a new structure
900 * for that pointed to by rcu_torture_current, freeing the old structure
901 * after a series of grace periods (the "pipeline").
902 */
903static int
904rcu_torture_writer(void *arg)
905{
Paul E. McKenney9efafb82015-12-31 18:11:47 -0800906 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -0800907 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700908 unsigned long gp_snap;
909 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700910 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800911 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800912 struct rcu_torture *rp;
913 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800914 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700915 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
916 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700917 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800918
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800919 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800920 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800921 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800922 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800923 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800924
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700925 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -0800926 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700927 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800928 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700929 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800930 pr_info("%s: Testing conditional GPs.\n", __func__);
931 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800932 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800933 }
934 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700935 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800936 pr_info("%s: Testing expedited GPs.\n", __func__);
937 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800938 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800939 }
940 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700941 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800942 pr_info("%s: Testing asynchronous GPs.\n", __func__);
943 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800944 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800945 }
946 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700947 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800948 pr_info("%s: Testing normal GPs.\n", __func__);
949 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800950 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800951 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700952 if (WARN_ONCE(nsynctypes == 0,
953 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700954 /*
955 * No updates primitives, so don't try updating.
956 * The resulting test won't be testing much, hence the
957 * above WARN_ONCE().
958 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700959 rcu_torture_writer_state = RTWS_STOPPING;
960 torture_kthread_stopping("rcu_torture_writer");
961 }
962
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800963 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800964 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800965 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700966 rp = rcu_torture_alloc();
967 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800968 continue;
969 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800970 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800971 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800972 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700973 old_rp = rcu_dereference_check(rcu_torture_current,
974 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -0800975 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800976 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700977 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -0700978 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800979 i = old_rp->rtort_pipe_count;
980 if (i > RCU_TORTURE_PIPE_LEN)
981 i = RCU_TORTURE_PIPE_LEN;
982 atomic_inc(&rcu_torture_wcount[i]);
983 old_rp->rtort_pipe_count++;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700984 switch (synctype[torture_random(&rand) % nsynctypes]) {
985 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800986 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700987 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700988 break;
989 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800990 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700991 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700992 rcu_torture_pipe_update(old_rp);
993 break;
994 case RTWS_COND_GET:
995 rcu_torture_writer_state = RTWS_COND_GET;
996 gp_snap = cur_ops->get_state();
997 i = torture_random(&rand) % 16;
998 if (i != 0)
999 schedule_timeout_interruptible(i);
1000 udelay(torture_random(&rand) % 1000);
1001 rcu_torture_writer_state = RTWS_COND_SYNC;
1002 cur_ops->cond_sync(gp_snap);
1003 rcu_torture_pipe_update(old_rp);
1004 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001005 case RTWS_SYNC:
1006 rcu_torture_writer_state = RTWS_SYNC;
1007 cur_ops->sync();
1008 rcu_torture_pipe_update(old_rp);
1009 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001010 default:
1011 WARN_ON_ONCE(1);
1012 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001013 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001014 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001015 WRITE_ONCE(rcu_torture_current_version,
1016 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001017 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1018 if (can_expedite &&
1019 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1020 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1021 if (expediting >= 0)
1022 rcu_expedite_gp();
1023 else
1024 rcu_unexpedite_gp();
1025 if (++expediting > 3)
1026 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001027 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1028 can_expedite = !rcu_gp_is_expedited() &&
1029 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001030 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001031 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001032 if (stutter_wait("rcu_torture_writer"))
1033 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1034 if (list_empty(&rcu_tortures[i].rtort_free))
1035 WARN_ON_ONCE(1);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001036 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001037 /* Reset expediting back to unexpedited. */
1038 if (expediting > 0)
1039 expediting = -expediting;
1040 while (can_expedite && expediting++ < 0)
1041 rcu_unexpedite_gp();
1042 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001043 if (!can_expedite)
1044 pr_alert("%s" TORTURE_FLAG
1045 " Dynamic grace-period expediting was disabled.\n",
1046 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001047 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001048 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001049 return 0;
1050}
1051
1052/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001053 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1054 * delay between calls.
1055 */
1056static int
1057rcu_torture_fakewriter(void *arg)
1058{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001059 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001060
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001061 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001062 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001063
1064 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001065 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1066 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001067 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001068 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001069 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001070 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001071 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001072 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001073 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001074 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001075 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001076 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001077 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001078 cur_ops->exp_sync();
1079 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001080 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001081 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001082
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001083 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001084 return 0;
1085}
1086
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001087static void rcu_torture_timer_cb(struct rcu_head *rhp)
1088{
1089 kfree(rhp);
1090}
1091
Josh Triplettb772e1d2006-10-04 02:17:13 -07001092/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001093 * Do one extension of an RCU read-side critical section using the
1094 * current reader state in readstate (set to zero for initial entry
1095 * to extended critical section), set the new state as specified by
1096 * newstate (set to zero for final exit from extended critical section),
1097 * and random-number-generator state in trsp. If this is neither the
1098 * beginning or end of the critical section and if there was actually a
1099 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001100 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001101static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001102 struct torture_random_state *trsp,
1103 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001104{
Paul E. McKenney2397d072018-05-25 07:29:25 -07001105 int idxnew = -1;
1106 int idxold = *readstate;
1107 int statesnew = ~*readstate & newstate;
1108 int statesold = *readstate & ~newstate;
1109
1110 WARN_ON_ONCE(idxold < 0);
1111 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001112 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001113
1114 /* First, put new protection in place to avoid critical-section gap. */
1115 if (statesnew & RCUTORTURE_RDR_BH)
1116 local_bh_disable();
1117 if (statesnew & RCUTORTURE_RDR_IRQ)
1118 local_irq_disable();
1119 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1120 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001121 if (statesnew & RCUTORTURE_RDR_RBH)
1122 rcu_read_lock_bh();
1123 if (statesnew & RCUTORTURE_RDR_SCHED)
1124 rcu_read_lock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001125 if (statesnew & RCUTORTURE_RDR_RCU)
1126 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1127
1128 /* Next, remove old protection, irq first due to bh conflict. */
1129 if (statesold & RCUTORTURE_RDR_IRQ)
1130 local_irq_enable();
1131 if (statesold & RCUTORTURE_RDR_BH)
1132 local_bh_enable();
1133 if (statesold & RCUTORTURE_RDR_PREEMPT)
1134 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001135 if (statesold & RCUTORTURE_RDR_RBH)
1136 rcu_read_unlock_bh();
1137 if (statesold & RCUTORTURE_RDR_SCHED)
1138 rcu_read_unlock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001139 if (statesold & RCUTORTURE_RDR_RCU)
1140 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1141
1142 /* Delay if neither beginning nor end and there was a change. */
1143 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001144 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001145
1146 /* Update the reader state. */
1147 if (idxnew == -1)
1148 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1149 WARN_ON_ONCE(idxnew < 0);
1150 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1151 *readstate = idxnew | newstate;
1152 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1153 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1154}
1155
1156/* Return the biggest extendables mask given current RCU and boot parameters. */
1157static int rcutorture_extend_mask_max(void)
1158{
1159 int mask;
1160
1161 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1162 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1163 mask = mask | RCUTORTURE_RDR_RCU;
1164 return mask;
1165}
1166
1167/* Return a random protection state mask, but with at least one bit set. */
1168static int
1169rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1170{
1171 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001172 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001173 unsigned long randmask2 = randmask1 >> 3;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001174
1175 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001176 /* Most of the time lots of bits, half the time only one bit. */
1177 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001178 mask = mask & randmask2;
1179 else
1180 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001181 /* Can't enable bh w/irq disabled. */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001182 if ((mask & RCUTORTURE_RDR_IRQ) &&
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001183 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1184 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1185 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001186 if ((mask & RCUTORTURE_RDR_IRQ) &&
1187 !(mask & cur_ops->ext_irq_conflict) &&
1188 (oldmask & cur_ops->ext_irq_conflict))
1189 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1190 return mask ?: RCUTORTURE_RDR_RCU;
1191}
1192
1193/*
1194 * Do a randomly selected number of extensions of an existing RCU read-side
1195 * critical section.
1196 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001197static struct rt_read_seg *
1198rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1199 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001200{
1201 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001202 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001203 int mask = rcutorture_extend_mask_max();
1204
1205 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1206 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001207 return rtrsp; /* Current RCU reader not extendable. */
1208 /* Bias towards larger numbers of loops. */
1209 i = (torture_random(trsp) >> 3);
1210 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1211 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001212 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001213 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001214 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001215 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001216}
1217
1218/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001219 * Do one read-side critical section, returning false if there was
1220 * no data to read. Can be invoked both from process context and
1221 * from a timer handler.
1222 */
1223static bool rcu_torture_one_read(struct torture_random_state *trsp)
1224{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001225 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001226 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001227 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001228 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001229 struct rcu_torture *p;
1230 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001231 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001232 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1233 struct rt_read_seg *rtrsp = &rtseg[0];
1234 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001235 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001236
Paul E. McKenney2397d072018-05-25 07:29:25 -07001237 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001238 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001239 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001240 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001241 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001242 rcu_read_lock_bh_held() ||
1243 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001244 srcu_read_lock_held(srcu_ctlp) ||
1245 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001246 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001247 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001248 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001249 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001250 }
1251 if (p->rtort_mbtest == 0)
1252 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001253 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001254 preempt_disable();
1255 pipe_count = p->rtort_pipe_count;
1256 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1257 /* Should not happen, but... */
1258 pipe_count = RCU_TORTURE_PIPE_LEN;
1259 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001260 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001261 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001262 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1263 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001264 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001265 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001266 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001267 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001268 if (completed > RCU_TORTURE_PIPE_LEN) {
1269 /* Should not happen, but... */
1270 completed = RCU_TORTURE_PIPE_LEN;
1271 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001272 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001273 preempt_enable();
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001274 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001275 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001276
1277 /* If error or close call, record the sequence of reader protections. */
1278 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1279 i = 0;
1280 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1281 err_segs[i++] = *rtrsp1;
1282 rt_read_nsegs = i;
1283 }
1284
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001285 return true;
1286}
1287
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001288static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1289
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001290/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001291 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1292 * incrementing the corresponding element of the pipeline array. The
1293 * counter in the element should never be greater than 1, otherwise, the
1294 * RCU implementation is broken.
1295 */
1296static void rcu_torture_timer(struct timer_list *unused)
1297{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001298 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001299 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001300
1301 /* Test call_rcu() invocation from interrupt handler. */
1302 if (cur_ops->call) {
1303 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1304
1305 if (rhp)
1306 cur_ops->call(rhp, rcu_torture_timer_cb);
1307 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001308}
1309
1310/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001311 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1312 * incrementing the corresponding element of the pipeline array. The
1313 * counter in the element should never be greater than 1, otherwise, the
1314 * RCU implementation is broken.
1315 */
1316static int
1317rcu_torture_reader(void *arg)
1318{
Paul E. McKenney444da512018-07-04 14:14:42 -07001319 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001320 long myid = (long)arg;
1321 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001322 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001323 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001324
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001325 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001326 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001327 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001328 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001329
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001330 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001331 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001332 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001333 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001334 }
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001335 if (!rcu_torture_one_read(&rand))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001336 schedule_timeout_interruptible(HZ);
Paul E. McKenney444da512018-07-04 14:14:42 -07001337 if (time_after(jiffies, lastsleep)) {
1338 schedule_timeout_interruptible(1);
1339 lastsleep = jiffies + 10;
1340 }
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001341 while (num_online_cpus() < mynumonline && !torture_must_stop())
1342 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001343 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001344 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001345 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001346 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001347 destroy_timer_on_stack(&t);
1348 }
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001349 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001350 return 0;
1351}
1352
1353/*
Joe Percheseea203f2014-07-14 09:16:15 -04001354 * Print torture statistics. Caller must ensure that there is only
1355 * one call to this function at a given time!!! This is normally
1356 * accomplished by relying on the module system to only have one copy
1357 * of the module loaded, and then by giving the rcu_torture_stats
1358 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1359 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001360 */
Chen Gangd1008952013-11-07 10:30:25 +08001361static void
Joe Percheseea203f2014-07-14 09:16:15 -04001362rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001363{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001364 int cpu;
1365 int i;
1366 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1367 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001368 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001369 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001370 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001371
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001372 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001373 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1374 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1375 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1376 }
1377 }
1378 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1379 if (pipesummary[i] != 0)
1380 break;
1381 }
Joe Percheseea203f2014-07-14 09:16:15 -04001382
1383 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1384 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1385 rcu_torture_current,
1386 rcu_torture_current_version,
1387 list_empty(&rcu_torture_freelist),
1388 atomic_read(&n_rcu_torture_alloc),
1389 atomic_read(&n_rcu_torture_alloc_fail),
1390 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001391 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001392 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001393 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001394 n_rcu_torture_boost_ktrerror,
1395 n_rcu_torture_boost_rterror);
1396 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1397 n_rcu_torture_boost_failure,
1398 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001399 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001400 torture_onoff_stats();
Paul E. McKenneyfc6f9c52018-08-27 14:43:05 -07001401 pr_cont("barrier: %ld/%ld:%ld\n",
Joe Percheseea203f2014-07-14 09:16:15 -04001402 n_barrier_successes,
1403 n_barrier_attempts,
1404 n_rcu_torture_barrier_error);
1405
1406 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001407 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001408 n_rcu_torture_barrier_error != 0 ||
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001409 n_rcu_torture_boost_ktrerror != 0 ||
1410 n_rcu_torture_boost_rterror != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001411 n_rcu_torture_boost_failure != 0 ||
1412 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001413 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001414 atomic_inc(&n_rcu_torture_error);
Ingo Molnar5af970a2008-06-18 10:09:48 +02001415 WARN_ON_ONCE(1);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001416 }
Joe Percheseea203f2014-07-14 09:16:15 -04001417 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001418 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001419 pr_cont(" %ld", pipesummary[i]);
1420 pr_cont("\n");
1421
1422 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1423 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001424 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001425 pr_cont(" %ld", batchsummary[i]);
1426 pr_cont("\n");
1427
1428 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1429 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001430 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001431 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001432 }
Joe Percheseea203f2014-07-14 09:16:15 -04001433 pr_cont("\n");
1434
Josh Triplettc8e5b162007-05-08 00:33:20 -07001435 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001436 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001437 if (rtcv_snap == rcu_torture_current_version &&
1438 rcu_torture_current != NULL) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001439 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001440 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001441
1442 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001443 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001444 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001445 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001446 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001447 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001448 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001449 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001450 wtp == NULL ? ~0UL : wtp->state,
1451 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001452 if (!splatted && wtp) {
1453 sched_show_task(wtp);
1454 splatted = true;
1455 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001456 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001457 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001458 }
1459 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001460}
1461
1462/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001463 * Periodically prints torture statistics, if periodic statistics printing
1464 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001465 */
1466static int
1467rcu_torture_stats(void *arg)
1468{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001469 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001470 do {
1471 schedule_timeout_interruptible(stat_interval * HZ);
1472 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001473 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001474 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001475 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001476 return 0;
1477}
1478
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001479static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001480rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001481{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001482 pr_alert("%s" TORTURE_FLAG
1483 "--- %s: nreaders=%d nfakewriters=%d "
1484 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1485 "shuffle_interval=%d stutter=%d irqreader=%d "
1486 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1487 "test_boost=%d/%d test_boost_interval=%d "
1488 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001489 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001490 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001491 "onoff_interval=%d onoff_holdoff=%d\n",
1492 torture_type, tag, nrealreaders, nfakewriters,
1493 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1494 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1495 test_boost, cur_ops->can_boost,
1496 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001497 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001498 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001499 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001500}
1501
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001502static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001503{
1504 struct task_struct *t;
1505
1506 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001507 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001508 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001509 t = boost_tasks[cpu];
1510 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001511 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001512 mutex_unlock(&boost_mutex);
1513
1514 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001515 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001516 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001517}
1518
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001519static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001520{
1521 int retval;
1522
1523 if (boost_tasks[cpu] != NULL)
1524 return 0; /* Already created, nothing more to do. */
1525
1526 /* Don't allow time recalculation while creating a new task. */
1527 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001528 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001529 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001530 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1531 cpu_to_node(cpu),
1532 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001533 if (IS_ERR(boost_tasks[cpu])) {
1534 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001535 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001536 n_rcu_torture_boost_ktrerror++;
1537 boost_tasks[cpu] = NULL;
1538 mutex_unlock(&boost_mutex);
1539 return retval;
1540 }
1541 kthread_bind(boost_tasks[cpu], cpu);
1542 wake_up_process(boost_tasks[cpu]);
1543 mutex_unlock(&boost_mutex);
1544 return 0;
1545}
1546
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001547/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001548 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1549 * induces a CPU stall for the time specified by stall_cpu.
1550 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001551static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001552{
1553 unsigned long stop_at;
1554
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001555 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001556 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001557 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001558 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001559 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001560 }
1561 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001562 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001563 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001564 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001565 if (stall_cpu_irqsoff)
1566 local_irq_disable();
1567 else
1568 preempt_disable();
1569 pr_alert("rcu_torture_stall start on CPU %d.\n",
1570 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001571 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1572 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001573 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001574 if (stall_cpu_irqsoff)
1575 local_irq_enable();
1576 else
1577 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001578 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001579 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001580 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001581 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001582 while (!kthread_should_stop())
1583 schedule_timeout_interruptible(10 * HZ);
1584 return 0;
1585}
1586
1587/* Spawn CPU-stall kthread, if stall_cpu specified. */
1588static int __init rcu_torture_stall_init(void)
1589{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001590 if (stall_cpu <= 0)
1591 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001592 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001593}
1594
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001595/* State structure for forward-progress self-propagating RCU callback. */
1596struct fwd_cb_state {
1597 struct rcu_head rh;
1598 int stop;
1599};
1600
1601/*
1602 * Forward-progress self-propagating RCU callback function. Because
1603 * callbacks run from softirq, this function is an implicit RCU read-side
1604 * critical section.
1605 */
1606static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1607{
1608 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1609
1610 if (READ_ONCE(fcsp->stop)) {
1611 WRITE_ONCE(fcsp->stop, 2);
1612 return;
1613 }
1614 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1615}
1616
Paul E. McKenney48718482018-08-15 15:32:51 -07001617/* State for continuous-flood RCU callbacks. */
1618struct rcu_fwd_cb {
1619 struct rcu_head rh;
1620 struct rcu_fwd_cb *rfc_next;
1621 int rfc_gps;
1622};
1623static DEFINE_SPINLOCK(rcu_fwd_lock);
1624static struct rcu_fwd_cb *rcu_fwd_cb_head;
1625static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1626static long n_launders_cb;
1627static unsigned long rcu_fwd_startat;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001628static bool rcu_fwd_emergency_stop;
Paul E. McKenney48718482018-08-15 15:32:51 -07001629#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1630#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1631#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1632static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / HZ];
1633
1634/* Callback function for continuous-flood RCU callbacks. */
1635static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1636{
1637 int i;
1638 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1639 struct rcu_fwd_cb **rfcpp;
1640
1641 rfcp->rfc_next = NULL;
1642 rfcp->rfc_gps++;
1643 spin_lock(&rcu_fwd_lock);
1644 rfcpp = rcu_fwd_cb_tail;
1645 rcu_fwd_cb_tail = &rfcp->rfc_next;
1646 WRITE_ONCE(*rfcpp, rfcp);
1647 WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
1648 i = ((jiffies - rcu_fwd_startat) / HZ);
1649 if (i >= ARRAY_SIZE(n_launders_hist))
1650 i = ARRAY_SIZE(n_launders_hist) - 1;
1651 n_launders_hist[i]++;
1652 spin_unlock(&rcu_fwd_lock);
1653}
1654
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001655/* Carry out need_resched()/cond_resched() forward-progress testing. */
1656static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
Paul E. McKenney1b272912018-07-18 14:32:31 -07001657{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001658 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001659 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001660 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001661 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001662 int idx;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001663 int sd;
1664 int sd4;
1665 bool selfpropcb = false;
1666 unsigned long stopat;
1667 static DEFINE_TORTURE_RANDOM(trs);
1668
1669 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1670 init_rcu_head_on_stack(&fcs.rh);
1671 selfpropcb = true;
1672 }
1673
1674 /* Tight loop containing cond_resched(). */
1675 if (selfpropcb) {
1676 WRITE_ONCE(fcs.stop, 0);
1677 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1678 }
1679 cver = READ_ONCE(rcu_torture_current_version);
1680 gps = cur_ops->get_gp_seq();
1681 sd = cur_ops->stall_dur() + 1;
1682 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1683 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney61670ad2018-10-01 13:27:41 -07001684 WRITE_ONCE(rcu_fwd_startat, jiffies);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001685 stopat = rcu_fwd_startat + dur;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001686 while (time_before(jiffies, stopat) &&
1687 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001688 idx = cur_ops->readlock();
1689 udelay(10);
1690 cur_ops->readunlock(idx);
1691 if (!fwd_progress_need_resched || need_resched())
1692 cond_resched();
1693 }
1694 (*tested_tries)++;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001695 if (!time_before(jiffies, stopat) &&
1696 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001697 (*tested)++;
1698 cver = READ_ONCE(rcu_torture_current_version) - cver;
1699 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1700 WARN_ON(!cver && gps < 2);
1701 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1702 }
1703 if (selfpropcb) {
1704 WRITE_ONCE(fcs.stop, 1);
1705 cur_ops->sync(); /* Wait for running CB to complete. */
1706 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1707 }
1708
1709 if (selfpropcb) {
1710 WARN_ON(READ_ONCE(fcs.stop) != 2);
1711 destroy_rcu_head_on_stack(&fcs.rh);
1712 }
1713}
1714
1715/* Carry out call_rcu() forward-progress testing. */
1716static void rcu_torture_fwd_prog_cr(void)
1717{
1718 unsigned long cver;
1719 unsigned long gps;
1720 int i;
Paul E. McKenney48718482018-08-15 15:32:51 -07001721 int j;
1722 long n_launders;
1723 long n_launders_cb_snap;
1724 long n_launders_sa;
1725 long n_max_cbs;
1726 long n_max_gps;
1727 struct rcu_fwd_cb *rfcp;
1728 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001729 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07001730 unsigned long stoppedat;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001731
1732 /* Loop continuously posting RCU callbacks. */
1733 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1734 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney61670ad2018-10-01 13:27:41 -07001735 WRITE_ONCE(rcu_fwd_startat, jiffies);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001736 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
1737 n_launders = 0;
1738 n_launders_cb = 0;
1739 n_launders_sa = 0;
1740 n_max_cbs = 0;
1741 n_max_gps = 0;
1742 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
1743 n_launders_hist[i] = 0;
1744 cver = READ_ONCE(rcu_torture_current_version);
1745 gps = cur_ops->get_gp_seq();
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001746 while (time_before(jiffies, stopat) &&
1747 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001748 rfcp = READ_ONCE(rcu_fwd_cb_head);
1749 rfcpn = NULL;
1750 if (rfcp)
1751 rfcpn = READ_ONCE(rfcp->rfc_next);
1752 if (rfcpn) {
1753 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1754 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
1755 break;
1756 rcu_fwd_cb_head = rfcpn;
1757 n_launders++;
1758 n_launders_sa++;
1759 } else {
1760 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
1761 if (WARN_ON_ONCE(!rfcp)) {
1762 schedule_timeout_interruptible(1);
1763 continue;
1764 }
1765 n_max_cbs++;
1766 n_launders_sa = 0;
1767 rfcp->rfc_gps = 0;
1768 }
1769 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
1770 cond_resched();
1771 }
1772 stoppedat = jiffies;
1773 n_launders_cb_snap = READ_ONCE(n_launders_cb);
1774 cver = READ_ONCE(rcu_torture_current_version) - cver;
1775 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1776 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
1777 for (;;) {
1778 rfcp = rcu_fwd_cb_head;
1779 if (!rfcp)
1780 break;
1781 rcu_fwd_cb_head = rfcp->rfc_next;
1782 kfree(rfcp);
1783 }
1784 rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1785 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1786 if (!torture_must_stop()) {
1787 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
1788 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1789 __func__,
1790 stoppedat - rcu_fwd_startat, jiffies - stoppedat,
1791 n_launders + n_max_cbs - n_launders_cb_snap,
1792 n_launders, n_launders_sa,
1793 n_max_gps, n_max_cbs, cver, gps);
1794 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
1795 if (n_launders_hist[i] > 0)
1796 break;
1797 pr_alert("Callback-invocation histogram:");
1798 for (j = 0; j <= i; j++)
1799 pr_cont(" %ds: %ld", j + 1, n_launders_hist[j]);
1800 pr_cont("\n");
1801 }
1802}
1803
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001804
1805/*
1806 * OOM notifier, but this only prints diagnostic information for the
1807 * current forward-progress test.
1808 */
1809static int rcutorture_oom_notify(struct notifier_block *self,
1810 unsigned long notused, void *nfreed)
1811{
1812 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
1813 WRITE_ONCE(rcu_fwd_emergency_stop, true);
1814 return NOTIFY_OK;
1815}
1816
1817static struct notifier_block rcutorture_oom_nb = {
1818 .notifier_call = rcutorture_oom_notify
1819};
1820
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001821/* Carry out grace-period forward-progress testing. */
1822static int rcu_torture_fwd_prog(void *args)
1823{
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001824 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001825 int tested_tries = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001826
1827 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenney5ab7ab82018-09-21 18:08:09 -07001828 rcu_bind_current_to_nocb();
Paul E. McKenneyfecad502018-07-20 12:18:11 -07001829 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1830 set_user_nice(current, MAX_NICE);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001831 do {
1832 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001833 WRITE_ONCE(rcu_fwd_emergency_stop, false);
1834 register_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001835 rcu_torture_fwd_prog_nr(&tested, &tested_tries);
1836 rcu_torture_fwd_prog_cr();
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001837 unregister_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney48718482018-08-15 15:32:51 -07001838
Paul E. McKenney1b272912018-07-18 14:32:31 -07001839 /* Avoid slow periods, better to test when busy. */
1840 stutter_wait("rcu_torture_fwd_prog");
1841 } while (!torture_must_stop());
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001842 /* Short runs might not contain a valid forward-progress attempt. */
1843 WARN_ON(!tested && tested_tries >= 5);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001844 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001845 torture_kthread_stopping("rcu_torture_fwd_prog");
1846 return 0;
1847}
1848
1849/* If forward-progress checking is requested and feasible, spawn the thread. */
1850static int __init rcu_torture_fwd_prog_init(void)
1851{
1852 if (!fwd_progress)
1853 return 0; /* Not requested, so don't do it. */
1854 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
1855 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1856 return 0;
1857 }
1858 if (stall_cpu > 0) {
1859 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1860 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
1861 return -EINVAL; /* In module, can fail back to user. */
1862 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1863 return 0;
1864 }
1865 if (fwd_progress_holdoff <= 0)
1866 fwd_progress_holdoff = 1;
1867 if (fwd_progress_div <= 0)
1868 fwd_progress_div = 4;
1869 return torture_create_kthread(rcu_torture_fwd_prog,
1870 NULL, fwd_prog_task);
1871}
1872
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001873/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05301874static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001875{
1876 atomic_inc(&barrier_cbs_invoked);
1877}
1878
1879/* kthread function to register callbacks used to test RCU barriers. */
1880static int rcu_torture_barrier_cbs(void *arg)
1881{
1882 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07001883 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001884 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001885 struct rcu_head rcu;
1886
1887 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001888 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001889 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001890 do {
1891 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001892 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001893 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001894 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001895 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001896 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001897 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001898 /*
1899 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07001900 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001901 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001902 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001903 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001904 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001905 if (atomic_dec_and_test(&barrier_cbs_count))
1906 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001907 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07001908 if (cur_ops->cb_barrier != NULL)
1909 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001910 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001911 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001912 return 0;
1913}
1914
1915/* kthread function to drive and coordinate RCU barrier testing. */
1916static int rcu_torture_barrier(void *arg)
1917{
1918 int i;
1919
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001920 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001921 do {
1922 atomic_set(&barrier_cbs_invoked, 0);
1923 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001924 /* Ensure barrier_phase ordered after prior assignments. */
1925 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001926 for (i = 0; i < n_barrier_cbs; i++)
1927 wake_up(&barrier_cbs_wq[i]);
1928 wait_event(barrier_wq,
1929 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001930 torture_must_stop());
1931 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001932 break;
1933 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001934 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001935 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1936 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08001937 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1938 atomic_read(&barrier_cbs_invoked),
1939 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001940 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001941 } else {
1942 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001943 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001944 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001945 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001946 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001947 return 0;
1948}
1949
1950/* Initialize RCU barrier testing. */
1951static int rcu_torture_barrier_init(void)
1952{
1953 int i;
1954 int ret;
1955
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07001956 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001957 return 0;
1958 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001959 pr_alert("%s" TORTURE_FLAG
1960 " Call or barrier ops missing for %s,\n",
1961 torture_type, cur_ops->name);
1962 pr_alert("%s" TORTURE_FLAG
1963 " RCU barrier testing omitted from run.\n",
1964 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001965 return 0;
1966 }
1967 atomic_set(&barrier_cbs_count, 0);
1968 atomic_set(&barrier_cbs_invoked, 0);
1969 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001970 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001971 GFP_KERNEL);
1972 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001973 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05001974 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001975 return -ENOMEM;
1976 for (i = 0; i < n_barrier_cbs; i++) {
1977 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001978 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1979 (void *)(long)i,
1980 barrier_cbs_tasks[i]);
1981 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001982 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001983 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001984 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001985}
1986
1987/* Clean up after RCU barrier testing. */
1988static void rcu_torture_barrier_cleanup(void)
1989{
1990 int i;
1991
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001992 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001993 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001994 for (i = 0; i < n_barrier_cbs; i++)
1995 torture_stop_kthread(rcu_torture_barrier_cbs,
1996 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001997 kfree(barrier_cbs_tasks);
1998 barrier_cbs_tasks = NULL;
1999 }
2000 if (barrier_cbs_wq != NULL) {
2001 kfree(barrier_cbs_wq);
2002 barrier_cbs_wq = NULL;
2003 }
2004}
2005
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002006static bool rcu_torture_can_boost(void)
2007{
2008 static int boost_warn_once;
2009 int prio;
2010
2011 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2012 return false;
2013
2014 prio = rcu_get_gp_kthreads_prio();
2015 if (!prio)
2016 return false;
2017
2018 if (prio < 2) {
2019 if (boost_warn_once == 1)
2020 return false;
2021
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002022 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002023 boost_warn_once = 1;
2024 return false;
2025 }
2026
2027 return true;
2028}
2029
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002030static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002031
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002032static void
2033rcu_torture_cleanup(void)
2034{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002035 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07002036 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002037 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002038 int i;
2039
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002040 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08002041 if (cur_ops->cb_barrier != NULL)
2042 cur_ops->cb_barrier();
2043 return;
2044 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002045
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002046 rcu_torture_barrier_cleanup();
Paul E. McKenney1b272912018-07-18 14:32:31 -07002047 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002048 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002049 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002050
Josh Triplettc8e5b162007-05-08 00:33:20 -07002051 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002052 for (i = 0; i < nrealreaders; i++)
2053 torture_stop_kthread(rcu_torture_reader,
2054 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002055 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002056 }
2057 rcu_torture_current = NULL;
2058
Josh Triplettc8e5b162007-05-08 00:33:20 -07002059 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07002060 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002061 torture_stop_kthread(rcu_torture_fakewriter,
2062 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002063 }
2064 kfree(fakewriter_tasks);
2065 fakewriter_tasks = NULL;
2066 }
2067
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002068 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2069 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2070 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2071 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002072 torture_stop_kthread(rcu_torture_stats, stats_task);
2073 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002074 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002075 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002076
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002077 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002078 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002079 * cleanup operations.
2080 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002081 if (cur_ops->cb_barrier != NULL)
2082 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002083 if (cur_ops->cleanup != NULL)
2084 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002085
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002086 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002087
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002088 if (err_segs_recorded) {
2089 pr_alert("Failure/close-call rcutorture reader segments:\n");
2090 if (rt_read_nsegs == 0)
2091 pr_alert("\t: No segments recorded!!!\n");
2092 firsttime = 1;
2093 for (i = 0; i < rt_read_nsegs; i++) {
2094 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2095 if (err_segs[i].rt_delay_jiffies != 0) {
2096 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2097 err_segs[i].rt_delay_jiffies);
2098 firsttime = 0;
2099 }
2100 if (err_segs[i].rt_delay_ms != 0) {
2101 pr_cont("%s%ldms", firsttime ? "" : "+",
2102 err_segs[i].rt_delay_ms);
2103 firsttime = 0;
2104 }
2105 if (err_segs[i].rt_delay_us != 0) {
2106 pr_cont("%s%ldus", firsttime ? "" : "+",
2107 err_segs[i].rt_delay_us);
2108 firsttime = 0;
2109 }
2110 pr_cont("%s\n",
2111 err_segs[i].rt_preempted ? "preempted" : "");
2112
2113 }
2114 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002115 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002116 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08002117 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08002118 rcu_torture_print_module_parms(cur_ops,
2119 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08002120 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002121 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002122 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002123}
2124
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002125#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2126static void rcu_torture_leak_cb(struct rcu_head *rhp)
2127{
2128}
2129
2130static void rcu_torture_err_cb(struct rcu_head *rhp)
2131{
2132 /*
2133 * This -might- happen due to race conditions, but is unlikely.
2134 * The scenario that leads to this happening is that the
2135 * first of the pair of duplicate callbacks is queued,
2136 * someone else starts a grace period that includes that
2137 * callback, then the second of the pair must wait for the
2138 * next grace period. Unlikely, but can happen. If it
2139 * does happen, the debug-objects subsystem won't have splatted.
2140 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002141 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002142}
2143#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2144
2145/*
2146 * Verify that double-free causes debug-objects to complain, but only
2147 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2148 * cannot be carried out.
2149 */
2150static void rcu_test_debug_objects(void)
2151{
2152#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2153 struct rcu_head rh1;
2154 struct rcu_head rh2;
2155
2156 init_rcu_head_on_stack(&rh1);
2157 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002158 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002159
2160 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2161 preempt_disable(); /* Prevent preemption from interrupting test. */
2162 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2163 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2164 local_irq_disable(); /* Make it harder to start a new grace period. */
2165 call_rcu(&rh2, rcu_torture_leak_cb);
2166 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2167 local_irq_enable();
2168 rcu_read_unlock();
2169 preempt_enable();
2170
2171 /* Wait for them all to get done so we can safely return. */
2172 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002173 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002174 destroy_rcu_head_on_stack(&rh1);
2175 destroy_rcu_head_on_stack(&rh2);
2176#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002177 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002178#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2179}
2180
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002181static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002182rcu_torture_init(void)
2183{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002184 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002185 int cpu;
2186 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002187 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07002188 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2189 &busted_srcud_ops, &tasks_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002190 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002191
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002192 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002193 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002194
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002195 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002196 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002197 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002198 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002199 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002200 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002201 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002202 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2203 torture_type);
2204 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002205 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002206 pr_cont(" %s", torture_ops[i]->name);
2207 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002208 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002209 firsterr = -EINVAL;
2210 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002211 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002212 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002213 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002214 fqs_duration = 0;
2215 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002216 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002217 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002218
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002219 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002220 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002221 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002222 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002223 if (nrealreaders <= 0)
2224 nrealreaders = 1;
2225 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002226 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002227
2228 /* Set up the freelist. */
2229
2230 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002231 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002232 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002233 list_add_tail(&rcu_tortures[i].rtort_free,
2234 &rcu_torture_freelist);
2235 }
2236
2237 /* Initialize the statistics so that each run gets its own numbers. */
2238
2239 rcu_torture_current = NULL;
2240 rcu_torture_current_version = 0;
2241 atomic_set(&n_rcu_torture_alloc, 0);
2242 atomic_set(&n_rcu_torture_alloc_fail, 0);
2243 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002244 atomic_set(&n_rcu_torture_mberror, 0);
2245 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002246 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002247 n_rcu_torture_boost_ktrerror = 0;
2248 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002249 n_rcu_torture_boost_failure = 0;
2250 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002251 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2252 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002253 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002254 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2255 per_cpu(rcu_torture_count, cpu)[i] = 0;
2256 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2257 }
2258 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002259 err_segs_recorded = 0;
2260 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002261
2262 /* Start up the kthreads. */
2263
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002264 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2265 writer_task);
2266 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002267 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002268 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002269 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002270 sizeof(fakewriter_tasks[0]),
2271 GFP_KERNEL);
2272 if (fakewriter_tasks == NULL) {
2273 VERBOSE_TOROUT_ERRSTRING("out of memory");
2274 firsterr = -ENOMEM;
2275 goto unwind;
2276 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002277 }
2278 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002279 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2280 NULL, fakewriter_tasks[i]);
2281 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002282 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002283 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002284 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002285 GFP_KERNEL);
2286 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002287 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002288 firsterr = -ENOMEM;
2289 goto unwind;
2290 }
2291 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002292 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002293 reader_tasks[i]);
2294 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002295 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002296 }
2297 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002298 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2299 stats_task);
2300 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002301 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002302 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002303 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002304 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2305 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002306 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002307 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002308 if (stutter < 0)
2309 stutter = 0;
2310 if (stutter) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002311 firsterr = torture_stutter_init(stutter * HZ);
2312 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002313 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002314 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002315 if (fqs_duration < 0)
2316 fqs_duration = 0;
2317 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002318 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002319 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2320 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002321 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002322 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002323 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002324 if (test_boost_interval < 1)
2325 test_boost_interval = 1;
2326 if (test_boost_duration < 2)
2327 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002328 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002329
2330 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002331
2332 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2333 rcutorture_booster_init,
2334 rcutorture_booster_cleanup);
2335 if (firsterr < 0)
2336 goto unwind;
2337 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002338 }
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002339 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2340 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002341 goto unwind;
Paul E. McKenney028be122018-05-08 09:20:34 -07002342 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002343 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002344 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002345 firsterr = rcu_torture_stall_init();
2346 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002347 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002348 firsterr = rcu_torture_fwd_prog_init();
2349 if (firsterr)
2350 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002351 firsterr = rcu_torture_barrier_init();
2352 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002353 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002354 if (object_debug)
2355 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002356 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002357 return 0;
2358
2359unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002360 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002361 rcu_torture_cleanup();
2362 return firsterr;
2363}
2364
2365module_init(rcu_torture_init);
2366module_exit(rcu_torture_cleanup);