blob: 17f480129a780112e9c60d991bcef00e2aa97ca6 [file] [log] [blame]
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07002 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenneyf5604f62014-02-26 06:38:59 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenneya241ec62005-10-30 15:03:12 -080017 *
Josh Triplettb772e1d2006-10-04 02:17:13 -070018 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -080019 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -070021 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080022 *
23 * See also: Documentation/RCU/torture.txt
24 */
Paul E. McKenney60500032018-05-15 12:25:05 -070025
26#define pr_fmt(fmt) fmt
27
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/kthread.h>
33#include <linux/err.h>
34#include <linux/spinlock.h>
35#include <linux/smp.h>
36#include <linux/rcupdate.h>
37#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010038#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010039#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080041#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080042#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080046#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070047#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080049#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080050#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070051#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070052#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080053#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070054#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080055#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070056#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070057#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070058#include <linux/sched/sysctl.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080059
Paul E. McKenney25c36322017-05-03 09:51:55 -070060#include "rcu.h"
61
Paul E. McKenneya241ec62005-10-30 15:03:12 -080062MODULE_LICENSE("GPL");
Josh Triplette0198b292014-07-30 16:08:42 -070063MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080064
Paul E. McKenney4102ada2013-10-08 20:23:47 -070065
Paul E. McKenney2397d072018-05-25 07:29:25 -070066/* Bits for ->extendables field, extendables param, and related definitions. */
67#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
68#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070069#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
70#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
71#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
72#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
73#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
74#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
75#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
76#define RCUTORTURE_MAX_EXTEND \
77 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
78 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070079#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
80 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070081#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070082
Paul E. McKenney2397d072018-05-25 07:29:25 -070083torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
84 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080085torture_param(int, fqs_duration, 0,
86 "Duration of fqs bursts (us), 0 to disable");
87torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
88torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070089torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
90torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
91torture_param(int, fwd_progress_holdoff, 60,
92 "Time between forward-progress tests (s)");
93torture_param(bool, fwd_progress_need_resched, 1,
94 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070095torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080096torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
97torture_param(bool, gp_normal, false,
98 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070099torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800100torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
101torture_param(int, n_barrier_cbs, 0,
102 "# of callbacks/kthreads for barrier testing");
103torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
104torture_param(int, nreaders, -1, "Number of RCU reader threads");
105torture_param(int, object_debug, 0,
106 "Enable debug-object double call_rcu() testing");
107torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
108torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -0700109 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800110torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
111torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
112torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
113torture_param(int, stall_cpu_holdoff, 10,
114 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700115torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800116torture_param(int, stat_interval, 60,
117 "Number of seconds between stats printk()s");
118torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120torture_param(int, test_boost_duration, 4,
121 "Duration of each boost test, seconds.");
122torture_param(int, test_boost_interval, 7,
123 "Interval between boost tests, seconds.");
124torture_param(bool, test_no_idle_hz, true,
125 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700126torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800127 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800128
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800129static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800130module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700131MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700132
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800133static int nrealreaders;
134static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700135static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800136static struct task_struct **reader_tasks;
137static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800138static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700139static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800140static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700141static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800142static struct task_struct **barrier_cbs_tasks;
143static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800144
145#define RCU_TORTURE_PIPE_LEN 10
146
147struct rcu_torture {
148 struct rcu_head rtort_rcu;
149 int rtort_pipe_count;
150 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800151 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800152};
153
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800154static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700155static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700156static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800157static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
158static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800159static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
160static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800161static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700162static atomic_t n_rcu_torture_alloc;
163static atomic_t n_rcu_torture_alloc_fail;
164static atomic_t n_rcu_torture_free;
165static atomic_t n_rcu_torture_mberror;
166static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800167static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700168static long n_rcu_torture_boost_ktrerror;
169static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700170static long n_rcu_torture_boost_failure;
171static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700172static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800173static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700174static long n_barrier_successes; /* did rcu_barrier test succeed? */
Josh Triplette3033732006-10-04 02:17:14 -0700175static struct list_head rcu_torture_removed;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800176
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800177static int rcu_torture_writer_state;
178#define RTWS_FIXED_DELAY 0
179#define RTWS_DELAY 1
180#define RTWS_REPLACE 2
181#define RTWS_DEF_FREE 3
182#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700183#define RTWS_COND_GET 5
184#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700185#define RTWS_SYNC 7
186#define RTWS_STUTTER 8
187#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800188static const char * const rcu_torture_writer_state_names[] = {
189 "RTWS_FIXED_DELAY",
190 "RTWS_DELAY",
191 "RTWS_REPLACE",
192 "RTWS_DEF_FREE",
193 "RTWS_EXP_SYNC",
194 "RTWS_COND_GET",
195 "RTWS_COND_SYNC",
196 "RTWS_SYNC",
197 "RTWS_STUTTER",
198 "RTWS_STOPPING",
199};
200
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700201/* Record reader segment types and duration for first failing read. */
202struct rt_read_seg {
203 int rt_readstate;
204 unsigned long rt_delay_jiffies;
205 unsigned long rt_delay_ms;
206 unsigned long rt_delay_us;
207 bool rt_preempted;
208};
209static int err_segs_recorded;
210static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
211static int rt_read_nsegs;
212
Paul E. McKenney18aff332015-11-17 13:35:28 -0800213static const char *rcu_torture_writer_state_getname(void)
214{
215 unsigned int i = READ_ONCE(rcu_torture_writer_state);
216
217 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
218 return "???";
219 return rcu_torture_writer_state_names[i];
220}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800221
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700222#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700223#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700224#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700225#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700226#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700227
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500228#ifdef CONFIG_RCU_TRACE
229static u64 notrace rcu_trace_clock_local(void)
230{
231 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700232
233 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500234 return ts;
235}
236#else /* #ifdef CONFIG_RCU_TRACE */
237static u64 notrace rcu_trace_clock_local(void)
238{
239 return 0ULL;
240}
241#endif /* #else #ifdef CONFIG_RCU_TRACE */
242
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700243static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400244static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700245 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800246static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700247static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800248static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
249static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
250static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700251
Paul E. McKenney48718482018-08-15 15:32:51 -0700252static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
253
Paul E. McKenney343e9092008-12-15 16:13:07 -0800254/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800255 * Allocate an element from the rcu_tortures pool.
256 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800257static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800258rcu_torture_alloc(void)
259{
260 struct list_head *p;
261
Ingo Molnaradac1662006-01-25 19:50:12 +0100262 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800263 if (list_empty(&rcu_torture_freelist)) {
264 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100265 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800266 return NULL;
267 }
268 atomic_inc(&n_rcu_torture_alloc);
269 p = rcu_torture_freelist.next;
270 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100271 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800272 return container_of(p, struct rcu_torture, rtort_free);
273}
274
275/*
276 * Free an element to the rcu_tortures pool.
277 */
278static void
279rcu_torture_free(struct rcu_torture *p)
280{
281 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100282 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800283 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100284 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800285}
286
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800287/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700288 * Operations vector for selecting different types of tests.
289 */
290
291struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800292 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700293 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700294 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700295 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700296 void (*read_delay)(struct torture_random_state *rrsp,
297 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700298 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700299 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700300 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700301 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700302 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700303 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700304 unsigned long (*get_state)(void);
305 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800306 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200307 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800308 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400309 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700310 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700311 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700312 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700313 int extendables;
314 int ext_irq_conflict;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400315 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700316};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700317
318static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700319
320/*
321 * Definitions for rcu torture testing.
322 */
323
Josh Tripletta49a4af2006-09-29 01:59:30 -0700324static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700325{
326 rcu_read_lock();
327 return 0;
328}
329
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700330static void
331rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700332{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700333 unsigned long started;
334 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700335 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700336 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700337 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700338
Josh Triplettb8d57a72009-09-08 15:54:35 -0700339 /* We want a short delay sometimes to make a reader delay the grace
340 * period, and we want a long delay occasionally to trigger
341 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700342
Paul E. McKenney48718482018-08-15 15:32:51 -0700343 if (!rcu_fwd_cb_nodelay &&
344 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700345 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700346 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700347 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
348 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700349 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700350 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700351 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700352 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
353 started, completed);
354 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700355 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700356 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700357 rtrsp->rt_delay_us = shortdelay_us;
358 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800359 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700360 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700361 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700362 rtrsp->rt_preempted = true;
363 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700364}
365
Josh Tripletta49a4af2006-09-29 01:59:30 -0700366static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700367{
368 rcu_read_unlock();
369}
370
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700371/*
372 * Update callback in the pipe. This should be invoked after a grace period.
373 */
374static bool
375rcu_torture_pipe_update_one(struct rcu_torture *rp)
376{
377 int i;
378
379 i = rp->rtort_pipe_count;
380 if (i > RCU_TORTURE_PIPE_LEN)
381 i = RCU_TORTURE_PIPE_LEN;
382 atomic_inc(&rcu_torture_wcount[i]);
383 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
384 rp->rtort_mbtest = 0;
385 return true;
386 }
387 return false;
388}
389
390/*
391 * Update all callbacks in the pipe. Suitable for synchronous grace-period
392 * primitives.
393 */
394static void
395rcu_torture_pipe_update(struct rcu_torture *old_rp)
396{
397 struct rcu_torture *rp;
398 struct rcu_torture *rp1;
399
400 if (old_rp)
401 list_add(&old_rp->rtort_free, &rcu_torture_removed);
402 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
403 if (rcu_torture_pipe_update_one(rp)) {
404 list_del(&rp->rtort_free);
405 rcu_torture_free(rp);
406 }
407 }
408}
409
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700410static void
411rcu_torture_cb(struct rcu_head *p)
412{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700413 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
414
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800415 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700416 /* Test is ending, just drop callbacks on the floor. */
417 /* The next initialization will pick up the pieces. */
418 return;
419 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700420 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700421 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700422 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700423 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700424}
425
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800426static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800427{
428 return 0;
429}
430
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700431static void rcu_torture_deferred_free(struct rcu_torture *p)
432{
433 call_rcu(&p->rtort_rcu, rcu_torture_cb);
434}
435
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700436static void rcu_sync_torture_init(void)
437{
438 INIT_LIST_HEAD(&rcu_torture_removed);
439}
440
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700441static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800442 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700443 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700444 .readlock = rcu_torture_read_lock,
445 .read_delay = rcu_read_delay,
446 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700447 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700448 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700449 .deferred_free = rcu_torture_deferred_free,
450 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700451 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700452 .get_state = get_state_synchronize_rcu,
453 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800454 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700455 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800456 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700457 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700458 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700459 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700460 .can_boost = rcu_can_boost(),
Paul E. McKenneyc0335742018-06-21 16:17:46 -0700461 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700462 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700463};
464
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700465/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800466 * Don't even think about trying any of these in real life!!!
467 * The names includes "busted", and they really means it!
468 * The only purpose of these functions is to provide a buggy RCU
469 * implementation to make sure that rcutorture correctly emits
470 * buggy-RCU error messages.
471 */
472static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
473{
474 /* This is a deliberate bug for testing purposes only! */
475 rcu_torture_cb(&p->rtort_rcu);
476}
477
478static void synchronize_rcu_busted(void)
479{
480 /* This is a deliberate bug for testing purposes only! */
481}
482
483static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800484call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800485{
486 /* This is a deliberate bug for testing purposes only! */
487 func(head);
488}
489
490static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800491 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800492 .init = rcu_sync_torture_init,
493 .readlock = rcu_torture_read_lock,
494 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
495 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700496 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800497 .deferred_free = rcu_busted_torture_deferred_free,
498 .sync = synchronize_rcu_busted,
499 .exp_sync = synchronize_rcu_busted,
500 .call = call_rcu_busted,
501 .cb_barrier = NULL,
502 .fqs = NULL,
503 .stats = NULL,
504 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700505 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800506};
507
508/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700509 * Definitions for srcu torture testing.
510 */
511
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800512DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700513static struct srcu_struct srcu_ctld;
514static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700515
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700516static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700517{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700518 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700519}
520
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700521static void
522srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700523{
524 long delay;
525 const long uspertick = 1000000 / HZ;
526 const long longdelay = 10;
527
528 /* We want there to be long-running readers, but not all the time. */
529
Paul E. McKenney51b11302014-01-27 11:49:39 -0800530 delay = torture_random(rrsp) %
531 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700532 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700533 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700534 rtrsp->rt_delay_jiffies = longdelay;
535 } else {
536 rcu_read_delay(rrsp, rtrsp);
537 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700538}
539
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700540static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700541{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700542 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700543}
544
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800545static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700546{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700547 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700548}
549
Lai Jiangshan9059c942012-03-19 16:12:14 +0800550static void srcu_torture_deferred_free(struct rcu_torture *rp)
551{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700552 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800553}
554
Josh Triplettb772e1d2006-10-04 02:17:13 -0700555static void srcu_torture_synchronize(void)
556{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700557 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700558}
559
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700560static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800561 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700562{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700563 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700564}
565
566static void srcu_torture_barrier(void)
567{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700568 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700569}
570
Joe Percheseea203f2014-07-14 09:16:15 -0400571static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700572{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700573 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700574}
575
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700576static void srcu_torture_synchronize_expedited(void)
577{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700578 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700579}
580
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700581static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800582 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800583 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700584 .readlock = srcu_torture_read_lock,
585 .read_delay = srcu_read_delay,
586 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700587 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800588 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700589 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700590 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700591 .call = srcu_torture_call,
592 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700593 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700594 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700595 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700596};
597
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700598static void srcu_torture_init(void)
599{
600 rcu_sync_torture_init();
601 WARN_ON(init_srcu_struct(&srcu_ctld));
602 srcu_ctlp = &srcu_ctld;
603}
604
605static void srcu_torture_cleanup(void)
606{
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700607 static DEFINE_TORTURE_RANDOM(rand);
608
609 if (torture_random(&rand) & 0x800)
610 cleanup_srcu_struct(&srcu_ctld);
611 else
612 cleanup_srcu_struct_quiesced(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700613 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
614}
615
616/* As above, but dynamically allocated. */
617static struct rcu_torture_ops srcud_ops = {
618 .ttype = SRCU_FLAVOR,
619 .init = srcu_torture_init,
620 .cleanup = srcu_torture_cleanup,
621 .readlock = srcu_torture_read_lock,
622 .read_delay = srcu_read_delay,
623 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700624 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700625 .deferred_free = srcu_torture_deferred_free,
626 .sync = srcu_torture_synchronize,
627 .exp_sync = srcu_torture_synchronize_expedited,
628 .call = srcu_torture_call,
629 .cb_barrier = srcu_torture_barrier,
630 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700631 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700632 .name = "srcud"
633};
634
Paul E. McKenney2397d072018-05-25 07:29:25 -0700635/* As above, but broken due to inappropriate reader extension. */
636static struct rcu_torture_ops busted_srcud_ops = {
637 .ttype = SRCU_FLAVOR,
638 .init = srcu_torture_init,
639 .cleanup = srcu_torture_cleanup,
640 .readlock = srcu_torture_read_lock,
641 .read_delay = rcu_read_delay,
642 .readunlock = srcu_torture_read_unlock,
643 .get_gp_seq = srcu_torture_completed,
644 .deferred_free = srcu_torture_deferred_free,
645 .sync = srcu_torture_synchronize,
646 .exp_sync = srcu_torture_synchronize_expedited,
647 .call = srcu_torture_call,
648 .cb_barrier = srcu_torture_barrier,
649 .stats = srcu_torture_stats,
650 .irq_capable = 1,
651 .extendables = RCUTORTURE_MAX_EXTEND,
652 .name = "busted_srcud"
653};
654
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700655/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700656 * Definitions for RCU-tasks torture testing.
657 */
658
659static int tasks_torture_read_lock(void)
660{
661 return 0;
662}
663
664static void tasks_torture_read_unlock(int idx)
665{
666}
667
668static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
669{
670 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
671}
672
673static struct rcu_torture_ops tasks_ops = {
674 .ttype = RCU_TASKS_FLAVOR,
675 .init = rcu_sync_torture_init,
676 .readlock = tasks_torture_read_lock,
677 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
678 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700679 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700680 .deferred_free = rcu_tasks_torture_deferred_free,
681 .sync = synchronize_rcu_tasks,
682 .exp_sync = synchronize_rcu_tasks,
683 .call = call_rcu_tasks,
684 .cb_barrier = rcu_barrier_tasks,
685 .fqs = NULL,
686 .stats = NULL,
687 .irq_capable = 1,
688 .name = "tasks"
689};
690
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700691static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
692{
693 if (!cur_ops->gp_diff)
694 return new - old;
695 return cur_ops->gp_diff(new, old);
696}
697
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700698static bool __maybe_unused torturing_tasks(void)
699{
700 return cur_ops == &tasks_ops;
701}
702
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700703/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700704 * RCU torture priority-boost testing. Runs one real-time thread per
705 * CPU for moderate bursts, repeatedly registering RCU callbacks and
706 * spinning waiting for them to be invoked. If a given callback takes
707 * too long to be invoked, we assume that priority inversion has occurred.
708 */
709
710struct rcu_boost_inflight {
711 struct rcu_head rcu;
712 int inflight;
713};
714
715static void rcu_torture_boost_cb(struct rcu_head *head)
716{
717 struct rcu_boost_inflight *rbip =
718 container_of(head, struct rcu_boost_inflight, rcu);
719
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700720 /* Ensure RCU-core accesses precede clearing ->inflight */
721 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700722}
723
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700724static int old_rt_runtime = -1;
725
726static void rcu_torture_disable_rt_throttle(void)
727{
728 /*
729 * Disable RT throttling so that rcutorture's boost threads don't get
730 * throttled. Only possible if rcutorture is built-in otherwise the
731 * user should manually do this by setting the sched_rt_period_us and
732 * sched_rt_runtime sysctls.
733 */
734 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
735 return;
736
737 old_rt_runtime = sysctl_sched_rt_runtime;
738 sysctl_sched_rt_runtime = -1;
739}
740
741static void rcu_torture_enable_rt_throttle(void)
742{
743 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
744 return;
745
746 sysctl_sched_rt_runtime = old_rt_runtime;
747 old_rt_runtime = -1;
748}
749
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700750static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
751{
752 if (end - start > test_boost_duration * HZ - HZ / 2) {
753 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
754 n_rcu_torture_boost_failure++;
755
756 return true; /* failed */
757 }
758
759 return false; /* passed */
760}
761
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700762static int rcu_torture_boost(void *arg)
763{
764 unsigned long call_rcu_time;
765 unsigned long endtime;
766 unsigned long oldstarttime;
767 struct rcu_boost_inflight rbi = { .inflight = 0 };
768 struct sched_param sp;
769
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800770 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700771
772 /* Set real-time priority. */
773 sp.sched_priority = 1;
774 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800775 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700776 n_rcu_torture_boost_rterror++;
777 }
778
Paul E. McKenney561190e2011-03-30 09:10:44 -0700779 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700780 /* Each pass through the following loop does one boost-test cycle. */
781 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700782 /* Track if the test failed already in this test interval? */
783 bool failed = false;
784
785 /* Increment n_rcu_torture_boosts once per boost-test */
786 while (!kthread_should_stop()) {
787 if (mutex_trylock(&boost_mutex)) {
788 n_rcu_torture_boosts++;
789 mutex_unlock(&boost_mutex);
790 break;
791 }
792 schedule_timeout_uninterruptible(1);
793 }
794 if (kthread_should_stop())
795 goto checkwait;
796
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700797 /* Wait for the next test interval. */
798 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700799 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800800 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800801 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800802 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700803 goto checkwait;
804 }
805
806 /* Do one boost-test interval. */
807 endtime = oldstarttime + test_boost_duration * HZ;
808 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700809 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700810 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700811 if (!smp_load_acquire(&rbi.inflight)) {
812 /* RCU core before ->inflight = 1. */
813 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700814 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700815 /* Check if the boost test failed */
816 failed = failed ||
817 rcu_torture_boost_failed(call_rcu_time,
818 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700819 call_rcu_time = jiffies;
820 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800821 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800822 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700823 goto checkwait;
824 }
825
826 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700827 * If boost never happened, then inflight will always be 1, in
828 * this case the boost check would never happen in the above
829 * loop so do another one here.
830 */
831 if (!failed && smp_load_acquire(&rbi.inflight))
832 rcu_torture_boost_failed(call_rcu_time, jiffies);
833
834 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700835 * Set the start time of the next test interval.
836 * Yes, this is vulnerable to long delays, but such
837 * delays simply cause a false negative for the next
838 * interval. Besides, we are running at RT priority,
839 * so delays should be relatively rare.
840 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700841 while (oldstarttime == boost_starttime &&
842 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700843 if (mutex_trylock(&boost_mutex)) {
844 boost_starttime = jiffies +
845 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700846 mutex_unlock(&boost_mutex);
847 break;
848 }
849 schedule_timeout_uninterruptible(1);
850 }
851
852 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800853checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800854 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700855
856 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700857 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800858 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700859 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800860 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700861 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800862 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700863 return 0;
864}
865
866/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800867 * RCU torture force-quiescent-state kthread. Repeatedly induces
868 * bursts of calls to force_quiescent_state(), increasing the probability
869 * of occurrence of some important types of race conditions.
870 */
871static int
872rcu_torture_fqs(void *arg)
873{
874 unsigned long fqs_resume_time;
875 int fqs_burst_remaining;
876
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800877 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800878 do {
879 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700880 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
881 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800882 schedule_timeout_interruptible(1);
883 }
884 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700885 while (fqs_burst_remaining > 0 &&
886 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800887 cur_ops->fqs();
888 udelay(fqs_holdoff);
889 fqs_burst_remaining -= fqs_holdoff;
890 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800891 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800892 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800893 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800894 return 0;
895}
896
897/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800898 * RCU torture writer kthread. Repeatedly substitutes a new structure
899 * for that pointed to by rcu_torture_current, freeing the old structure
900 * after a series of grace periods (the "pipeline").
901 */
902static int
903rcu_torture_writer(void *arg)
904{
Paul E. McKenney9efafb82015-12-31 18:11:47 -0800905 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -0800906 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700907 unsigned long gp_snap;
908 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700909 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800910 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800911 struct rcu_torture *rp;
912 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800913 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700914 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
915 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700916 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800917
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800918 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800919 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800920 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800921 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800922 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800923
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700924 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -0800925 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700926 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800927 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700928 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800929 pr_info("%s: Testing conditional GPs.\n", __func__);
930 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800931 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800932 }
933 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700934 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800935 pr_info("%s: Testing expedited GPs.\n", __func__);
936 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800937 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800938 }
939 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700940 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800941 pr_info("%s: Testing asynchronous GPs.\n", __func__);
942 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800943 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800944 }
945 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700946 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800947 pr_info("%s: Testing normal GPs.\n", __func__);
948 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800949 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800950 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700951 if (WARN_ONCE(nsynctypes == 0,
952 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700953 /*
954 * No updates primitives, so don't try updating.
955 * The resulting test won't be testing much, hence the
956 * above WARN_ONCE().
957 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700958 rcu_torture_writer_state = RTWS_STOPPING;
959 torture_kthread_stopping("rcu_torture_writer");
960 }
961
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800962 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800963 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800964 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700965 rp = rcu_torture_alloc();
966 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800967 continue;
968 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800969 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800970 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800971 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700972 old_rp = rcu_dereference_check(rcu_torture_current,
973 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -0800974 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800975 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700976 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -0700977 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800978 i = old_rp->rtort_pipe_count;
979 if (i > RCU_TORTURE_PIPE_LEN)
980 i = RCU_TORTURE_PIPE_LEN;
981 atomic_inc(&rcu_torture_wcount[i]);
982 old_rp->rtort_pipe_count++;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700983 switch (synctype[torture_random(&rand) % nsynctypes]) {
984 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800985 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700986 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700987 break;
988 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800989 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700990 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700991 rcu_torture_pipe_update(old_rp);
992 break;
993 case RTWS_COND_GET:
994 rcu_torture_writer_state = RTWS_COND_GET;
995 gp_snap = cur_ops->get_state();
996 i = torture_random(&rand) % 16;
997 if (i != 0)
998 schedule_timeout_interruptible(i);
999 udelay(torture_random(&rand) % 1000);
1000 rcu_torture_writer_state = RTWS_COND_SYNC;
1001 cur_ops->cond_sync(gp_snap);
1002 rcu_torture_pipe_update(old_rp);
1003 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001004 case RTWS_SYNC:
1005 rcu_torture_writer_state = RTWS_SYNC;
1006 cur_ops->sync();
1007 rcu_torture_pipe_update(old_rp);
1008 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001009 default:
1010 WARN_ON_ONCE(1);
1011 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001012 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001013 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001014 WRITE_ONCE(rcu_torture_current_version,
1015 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001016 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1017 if (can_expedite &&
1018 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1019 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1020 if (expediting >= 0)
1021 rcu_expedite_gp();
1022 else
1023 rcu_unexpedite_gp();
1024 if (++expediting > 3)
1025 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001026 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1027 can_expedite = !rcu_gp_is_expedited() &&
1028 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001029 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001030 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001031 if (stutter_wait("rcu_torture_writer"))
1032 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1033 if (list_empty(&rcu_tortures[i].rtort_free))
1034 WARN_ON_ONCE(1);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001035 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001036 /* Reset expediting back to unexpedited. */
1037 if (expediting > 0)
1038 expediting = -expediting;
1039 while (can_expedite && expediting++ < 0)
1040 rcu_unexpedite_gp();
1041 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001042 if (!can_expedite)
1043 pr_alert("%s" TORTURE_FLAG
1044 " Dynamic grace-period expediting was disabled.\n",
1045 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001046 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001047 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001048 return 0;
1049}
1050
1051/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001052 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1053 * delay between calls.
1054 */
1055static int
1056rcu_torture_fakewriter(void *arg)
1057{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001058 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001059
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001060 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001061 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001062
1063 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001064 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1065 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001066 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001067 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001068 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001069 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001070 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001071 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001072 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001073 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001074 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001075 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001076 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001077 cur_ops->exp_sync();
1078 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001079 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001080 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001081
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001082 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001083 return 0;
1084}
1085
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001086static void rcu_torture_timer_cb(struct rcu_head *rhp)
1087{
1088 kfree(rhp);
1089}
1090
Josh Triplettb772e1d2006-10-04 02:17:13 -07001091/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001092 * Do one extension of an RCU read-side critical section using the
1093 * current reader state in readstate (set to zero for initial entry
1094 * to extended critical section), set the new state as specified by
1095 * newstate (set to zero for final exit from extended critical section),
1096 * and random-number-generator state in trsp. If this is neither the
1097 * beginning or end of the critical section and if there was actually a
1098 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001099 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001100static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001101 struct torture_random_state *trsp,
1102 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001103{
Paul E. McKenney2397d072018-05-25 07:29:25 -07001104 int idxnew = -1;
1105 int idxold = *readstate;
1106 int statesnew = ~*readstate & newstate;
1107 int statesold = *readstate & ~newstate;
1108
1109 WARN_ON_ONCE(idxold < 0);
1110 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001111 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001112
1113 /* First, put new protection in place to avoid critical-section gap. */
1114 if (statesnew & RCUTORTURE_RDR_BH)
1115 local_bh_disable();
1116 if (statesnew & RCUTORTURE_RDR_IRQ)
1117 local_irq_disable();
1118 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1119 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001120 if (statesnew & RCUTORTURE_RDR_RBH)
1121 rcu_read_lock_bh();
1122 if (statesnew & RCUTORTURE_RDR_SCHED)
1123 rcu_read_lock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001124 if (statesnew & RCUTORTURE_RDR_RCU)
1125 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1126
1127 /* Next, remove old protection, irq first due to bh conflict. */
1128 if (statesold & RCUTORTURE_RDR_IRQ)
1129 local_irq_enable();
1130 if (statesold & RCUTORTURE_RDR_BH)
1131 local_bh_enable();
1132 if (statesold & RCUTORTURE_RDR_PREEMPT)
1133 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001134 if (statesold & RCUTORTURE_RDR_RBH)
1135 rcu_read_unlock_bh();
1136 if (statesold & RCUTORTURE_RDR_SCHED)
1137 rcu_read_unlock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001138 if (statesold & RCUTORTURE_RDR_RCU)
1139 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1140
1141 /* Delay if neither beginning nor end and there was a change. */
1142 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001143 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001144
1145 /* Update the reader state. */
1146 if (idxnew == -1)
1147 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1148 WARN_ON_ONCE(idxnew < 0);
1149 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1150 *readstate = idxnew | newstate;
1151 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1152 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1153}
1154
1155/* Return the biggest extendables mask given current RCU and boot parameters. */
1156static int rcutorture_extend_mask_max(void)
1157{
1158 int mask;
1159
1160 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1161 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1162 mask = mask | RCUTORTURE_RDR_RCU;
1163 return mask;
1164}
1165
1166/* Return a random protection state mask, but with at least one bit set. */
1167static int
1168rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1169{
1170 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001171 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001172 unsigned long randmask2 = randmask1 >> 3;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001173
1174 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001175 /* Most of the time lots of bits, half the time only one bit. */
1176 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001177 mask = mask & randmask2;
1178 else
1179 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001180 /* Can't enable bh w/irq disabled. */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001181 if ((mask & RCUTORTURE_RDR_IRQ) &&
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001182 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1183 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1184 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001185 if ((mask & RCUTORTURE_RDR_IRQ) &&
1186 !(mask & cur_ops->ext_irq_conflict) &&
1187 (oldmask & cur_ops->ext_irq_conflict))
1188 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1189 return mask ?: RCUTORTURE_RDR_RCU;
1190}
1191
1192/*
1193 * Do a randomly selected number of extensions of an existing RCU read-side
1194 * critical section.
1195 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001196static struct rt_read_seg *
1197rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1198 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001199{
1200 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001201 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001202 int mask = rcutorture_extend_mask_max();
1203
1204 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1205 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001206 return rtrsp; /* Current RCU reader not extendable. */
1207 /* Bias towards larger numbers of loops. */
1208 i = (torture_random(trsp) >> 3);
1209 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1210 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001211 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001212 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001213 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001214 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001215}
1216
1217/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001218 * Do one read-side critical section, returning false if there was
1219 * no data to read. Can be invoked both from process context and
1220 * from a timer handler.
1221 */
1222static bool rcu_torture_one_read(struct torture_random_state *trsp)
1223{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001224 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001225 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001226 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001227 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001228 struct rcu_torture *p;
1229 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001230 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001231 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1232 struct rt_read_seg *rtrsp = &rtseg[0];
1233 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001234 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001235
Paul E. McKenney2397d072018-05-25 07:29:25 -07001236 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001237 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001238 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001239 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001240 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001241 rcu_read_lock_bh_held() ||
1242 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001243 srcu_read_lock_held(srcu_ctlp) ||
1244 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001245 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001246 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001247 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001248 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001249 }
1250 if (p->rtort_mbtest == 0)
1251 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001252 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001253 preempt_disable();
1254 pipe_count = p->rtort_pipe_count;
1255 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1256 /* Should not happen, but... */
1257 pipe_count = RCU_TORTURE_PIPE_LEN;
1258 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001259 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001260 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001261 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1262 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001263 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001264 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001265 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001266 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001267 if (completed > RCU_TORTURE_PIPE_LEN) {
1268 /* Should not happen, but... */
1269 completed = RCU_TORTURE_PIPE_LEN;
1270 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001271 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001272 preempt_enable();
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001273 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001274 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001275
1276 /* If error or close call, record the sequence of reader protections. */
1277 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1278 i = 0;
1279 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1280 err_segs[i++] = *rtrsp1;
1281 rt_read_nsegs = i;
1282 }
1283
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001284 return true;
1285}
1286
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001287static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1288
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001289/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001290 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1291 * incrementing the corresponding element of the pipeline array. The
1292 * counter in the element should never be greater than 1, otherwise, the
1293 * RCU implementation is broken.
1294 */
1295static void rcu_torture_timer(struct timer_list *unused)
1296{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001297 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001298 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001299
1300 /* Test call_rcu() invocation from interrupt handler. */
1301 if (cur_ops->call) {
1302 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1303
1304 if (rhp)
1305 cur_ops->call(rhp, rcu_torture_timer_cb);
1306 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001307}
1308
1309/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001310 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1311 * incrementing the corresponding element of the pipeline array. The
1312 * counter in the element should never be greater than 1, otherwise, the
1313 * RCU implementation is broken.
1314 */
1315static int
1316rcu_torture_reader(void *arg)
1317{
Paul E. McKenney444da512018-07-04 14:14:42 -07001318 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001319 long myid = (long)arg;
1320 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001321 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001322 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001323
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001324 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001325 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001326 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001327 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Ingo Molnardbdf65b2005-11-13 16:07:22 -08001328
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001329 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001330 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001331 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001332 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001333 }
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001334 if (!rcu_torture_one_read(&rand))
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001335 schedule_timeout_interruptible(HZ);
Paul E. McKenney444da512018-07-04 14:14:42 -07001336 if (time_after(jiffies, lastsleep)) {
1337 schedule_timeout_interruptible(1);
1338 lastsleep = jiffies + 10;
1339 }
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001340 while (num_online_cpus() < mynumonline && !torture_must_stop())
1341 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001342 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001343 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001344 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001345 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001346 destroy_timer_on_stack(&t);
1347 }
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001348 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001349 return 0;
1350}
1351
1352/*
Joe Percheseea203f2014-07-14 09:16:15 -04001353 * Print torture statistics. Caller must ensure that there is only
1354 * one call to this function at a given time!!! This is normally
1355 * accomplished by relying on the module system to only have one copy
1356 * of the module loaded, and then by giving the rcu_torture_stats
1357 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1358 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001359 */
Chen Gangd1008952013-11-07 10:30:25 +08001360static void
Joe Percheseea203f2014-07-14 09:16:15 -04001361rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001362{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001363 int cpu;
1364 int i;
1365 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1366 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001367 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001368 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001369 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001370
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001371 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001372 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1373 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1374 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1375 }
1376 }
1377 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1378 if (pipesummary[i] != 0)
1379 break;
1380 }
Joe Percheseea203f2014-07-14 09:16:15 -04001381
1382 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1383 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1384 rcu_torture_current,
1385 rcu_torture_current_version,
1386 list_empty(&rcu_torture_freelist),
1387 atomic_read(&n_rcu_torture_alloc),
1388 atomic_read(&n_rcu_torture_alloc_fail),
1389 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001390 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001391 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001392 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001393 n_rcu_torture_boost_ktrerror,
1394 n_rcu_torture_boost_rterror);
1395 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1396 n_rcu_torture_boost_failure,
1397 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001398 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001399 torture_onoff_stats();
Paul E. McKenneyfc6f9c52018-08-27 14:43:05 -07001400 pr_cont("barrier: %ld/%ld:%ld\n",
Joe Percheseea203f2014-07-14 09:16:15 -04001401 n_barrier_successes,
1402 n_barrier_attempts,
1403 n_rcu_torture_barrier_error);
1404
1405 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001406 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001407 n_rcu_torture_barrier_error != 0 ||
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001408 n_rcu_torture_boost_ktrerror != 0 ||
1409 n_rcu_torture_boost_rterror != 0 ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001410 n_rcu_torture_boost_failure != 0 ||
1411 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001412 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001413 atomic_inc(&n_rcu_torture_error);
Ingo Molnar5af970a2008-06-18 10:09:48 +02001414 WARN_ON_ONCE(1);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001415 }
Joe Percheseea203f2014-07-14 09:16:15 -04001416 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001417 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001418 pr_cont(" %ld", pipesummary[i]);
1419 pr_cont("\n");
1420
1421 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1422 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001423 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001424 pr_cont(" %ld", batchsummary[i]);
1425 pr_cont("\n");
1426
1427 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1428 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001429 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001430 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001431 }
Joe Percheseea203f2014-07-14 09:16:15 -04001432 pr_cont("\n");
1433
Josh Triplettc8e5b162007-05-08 00:33:20 -07001434 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001435 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001436 if (rtcv_snap == rcu_torture_current_version &&
1437 rcu_torture_current != NULL) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001438 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001439 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001440
1441 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001442 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001443 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001444 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001445 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001446 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001447 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001448 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001449 wtp == NULL ? ~0UL : wtp->state,
1450 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001451 if (!splatted && wtp) {
1452 sched_show_task(wtp);
1453 splatted = true;
1454 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001455 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001456 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001457 }
1458 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001459}
1460
1461/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001462 * Periodically prints torture statistics, if periodic statistics printing
1463 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001464 */
1465static int
1466rcu_torture_stats(void *arg)
1467{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001468 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001469 do {
1470 schedule_timeout_interruptible(stat_interval * HZ);
1471 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001472 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001473 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001474 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001475 return 0;
1476}
1477
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001478static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001479rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001480{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001481 pr_alert("%s" TORTURE_FLAG
1482 "--- %s: nreaders=%d nfakewriters=%d "
1483 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1484 "shuffle_interval=%d stutter=%d irqreader=%d "
1485 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1486 "test_boost=%d/%d test_boost_interval=%d "
1487 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001488 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001489 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001490 "onoff_interval=%d onoff_holdoff=%d\n",
1491 torture_type, tag, nrealreaders, nfakewriters,
1492 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1493 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1494 test_boost, cur_ops->can_boost,
1495 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001496 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001497 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001498 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001499}
1500
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001501static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001502{
1503 struct task_struct *t;
1504
1505 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001506 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001507 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001508 t = boost_tasks[cpu];
1509 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001510 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001511 mutex_unlock(&boost_mutex);
1512
1513 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001514 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001515 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001516}
1517
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001518static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001519{
1520 int retval;
1521
1522 if (boost_tasks[cpu] != NULL)
1523 return 0; /* Already created, nothing more to do. */
1524
1525 /* Don't allow time recalculation while creating a new task. */
1526 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001527 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001528 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001529 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1530 cpu_to_node(cpu),
1531 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001532 if (IS_ERR(boost_tasks[cpu])) {
1533 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001534 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001535 n_rcu_torture_boost_ktrerror++;
1536 boost_tasks[cpu] = NULL;
1537 mutex_unlock(&boost_mutex);
1538 return retval;
1539 }
1540 kthread_bind(boost_tasks[cpu], cpu);
1541 wake_up_process(boost_tasks[cpu]);
1542 mutex_unlock(&boost_mutex);
1543 return 0;
1544}
1545
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001546/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001547 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1548 * induces a CPU stall for the time specified by stall_cpu.
1549 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001550static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001551{
1552 unsigned long stop_at;
1553
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001554 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001555 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001556 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001557 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001558 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001559 }
1560 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001561 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001562 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001563 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001564 if (stall_cpu_irqsoff)
1565 local_irq_disable();
1566 else
1567 preempt_disable();
1568 pr_alert("rcu_torture_stall start on CPU %d.\n",
1569 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001570 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1571 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001572 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001573 if (stall_cpu_irqsoff)
1574 local_irq_enable();
1575 else
1576 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001577 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001578 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001579 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001580 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001581 while (!kthread_should_stop())
1582 schedule_timeout_interruptible(10 * HZ);
1583 return 0;
1584}
1585
1586/* Spawn CPU-stall kthread, if stall_cpu specified. */
1587static int __init rcu_torture_stall_init(void)
1588{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001589 if (stall_cpu <= 0)
1590 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001591 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001592}
1593
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001594/* State structure for forward-progress self-propagating RCU callback. */
1595struct fwd_cb_state {
1596 struct rcu_head rh;
1597 int stop;
1598};
1599
1600/*
1601 * Forward-progress self-propagating RCU callback function. Because
1602 * callbacks run from softirq, this function is an implicit RCU read-side
1603 * critical section.
1604 */
1605static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1606{
1607 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1608
1609 if (READ_ONCE(fcsp->stop)) {
1610 WRITE_ONCE(fcsp->stop, 2);
1611 return;
1612 }
1613 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1614}
1615
Paul E. McKenney48718482018-08-15 15:32:51 -07001616/* State for continuous-flood RCU callbacks. */
1617struct rcu_fwd_cb {
1618 struct rcu_head rh;
1619 struct rcu_fwd_cb *rfc_next;
1620 int rfc_gps;
1621};
1622static DEFINE_SPINLOCK(rcu_fwd_lock);
1623static struct rcu_fwd_cb *rcu_fwd_cb_head;
1624static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1625static long n_launders_cb;
1626static unsigned long rcu_fwd_startat;
1627#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1628#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1629#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1630static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / HZ];
1631
1632/* Callback function for continuous-flood RCU callbacks. */
1633static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1634{
1635 int i;
1636 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1637 struct rcu_fwd_cb **rfcpp;
1638
1639 rfcp->rfc_next = NULL;
1640 rfcp->rfc_gps++;
1641 spin_lock(&rcu_fwd_lock);
1642 rfcpp = rcu_fwd_cb_tail;
1643 rcu_fwd_cb_tail = &rfcp->rfc_next;
1644 WRITE_ONCE(*rfcpp, rfcp);
1645 WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
1646 i = ((jiffies - rcu_fwd_startat) / HZ);
1647 if (i >= ARRAY_SIZE(n_launders_hist))
1648 i = ARRAY_SIZE(n_launders_hist) - 1;
1649 n_launders_hist[i]++;
1650 spin_unlock(&rcu_fwd_lock);
1651}
1652
Paul E. McKenney1b272912018-07-18 14:32:31 -07001653/* Carry out grace-period forward-progress testing. */
1654static int rcu_torture_fwd_prog(void *args)
1655{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001656 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001657 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001658 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001659 unsigned long gps;
Paul E. McKenney48718482018-08-15 15:32:51 -07001660 int i;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001661 int idx;
Paul E. McKenney48718482018-08-15 15:32:51 -07001662 int j;
1663 long n_launders;
1664 long n_launders_cb_snap;
1665 long n_launders_sa;
1666 long n_max_cbs;
1667 long n_max_gps;
1668 struct rcu_fwd_cb *rfcp;
1669 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney08a7a2e2018-07-19 13:07:20 -07001670 int sd;
1671 int sd4;
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001672 bool selfpropcb = false;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001673 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07001674 unsigned long stoppedat;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001675 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001676 int tested_tries = 0;
Paul E. McKenney08a7a2e2018-07-19 13:07:20 -07001677 static DEFINE_TORTURE_RANDOM(trs);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001678
1679 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenneyfecad502018-07-20 12:18:11 -07001680 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1681 set_user_nice(current, MAX_NICE);
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001682 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1683 init_rcu_head_on_stack(&fcs.rh);
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001684 selfpropcb = true;
1685 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001686 do {
1687 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenney48718482018-08-15 15:32:51 -07001688
1689 /* Tight loop containing cond_resched(). */
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001690 if (selfpropcb) {
1691 WRITE_ONCE(fcs.stop, 0);
1692 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1693 }
Paul E. McKenney119248b2018-07-18 15:39:37 -07001694 cver = READ_ONCE(rcu_torture_current_version);
1695 gps = cur_ops->get_gp_seq();
Paul E. McKenney08a7a2e2018-07-19 13:07:20 -07001696 sd = cur_ops->stall_dur() + 1;
1697 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001698 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney48718482018-08-15 15:32:51 -07001699 rcu_fwd_startat = jiffies;
1700 stopat = rcu_fwd_startat + dur;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001701 while (time_before(jiffies, stopat) && !torture_must_stop()) {
1702 idx = cur_ops->readlock();
1703 udelay(10);
1704 cur_ops->readunlock(idx);
1705 if (!fwd_progress_need_resched || need_resched())
1706 cond_resched();
1707 }
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001708 tested_tries++;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001709 if (!time_before(jiffies, stopat) && !torture_must_stop()) {
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001710 tested++;
1711 cver = READ_ONCE(rcu_torture_current_version) - cver;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001712 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001713 WARN_ON(!cver && gps < 2);
1714 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001715 }
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001716 if (selfpropcb) {
1717 WRITE_ONCE(fcs.stop, 1);
1718 cur_ops->sync(); /* Wait for running CB to complete. */
1719 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1720 }
Paul E. McKenney48718482018-08-15 15:32:51 -07001721
1722 /* Loop continuously posting RCU callbacks. */
1723 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1724 cur_ops->sync(); /* Later readers see above write. */
1725 rcu_fwd_startat = jiffies;
1726 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
1727 n_launders = 0;
1728 n_launders_cb = 0;
1729 n_launders_sa = 0;
1730 n_max_cbs = 0;
1731 n_max_gps = 0;
1732 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
1733 n_launders_hist[i] = 0;
1734 cver = READ_ONCE(rcu_torture_current_version);
1735 gps = cur_ops->get_gp_seq();
1736 while (time_before(jiffies, stopat) && !torture_must_stop()) {
1737 rfcp = READ_ONCE(rcu_fwd_cb_head);
1738 rfcpn = NULL;
1739 if (rfcp)
1740 rfcpn = READ_ONCE(rfcp->rfc_next);
1741 if (rfcpn) {
1742 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1743 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
1744 break;
1745 rcu_fwd_cb_head = rfcpn;
1746 n_launders++;
1747 n_launders_sa++;
1748 } else {
1749 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
1750 if (WARN_ON_ONCE(!rfcp)) {
1751 schedule_timeout_interruptible(1);
1752 continue;
1753 }
1754 n_max_cbs++;
1755 n_launders_sa = 0;
1756 rfcp->rfc_gps = 0;
1757 }
1758 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
1759 cond_resched();
1760 }
1761 stoppedat = jiffies;
1762 n_launders_cb_snap = READ_ONCE(n_launders_cb);
1763 cver = READ_ONCE(rcu_torture_current_version) - cver;
1764 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1765 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
1766 for (;;) {
1767 rfcp = rcu_fwd_cb_head;
1768 if (!rfcp)
1769 break;
1770 rcu_fwd_cb_head = rfcp->rfc_next;
1771 kfree(rfcp);
1772 }
1773 rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1774 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1775 if (!torture_must_stop()) {
1776 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
1777 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1778 __func__,
1779 stoppedat - rcu_fwd_startat,
1780 jiffies - stoppedat,
1781 n_launders + n_max_cbs - n_launders_cb_snap,
1782 n_launders, n_launders_sa,
1783 n_max_gps, n_max_cbs, cver, gps);
1784 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
1785 if (n_launders_hist[i] > 0)
1786 break;
1787 pr_alert("Callback-invocation histogram:");
1788 for (j = 0; j <= i; j++)
1789 pr_cont(" %ds: %ld", j + 1, n_launders_hist[j]);
1790 pr_cont("\n");
1791 }
1792
Paul E. McKenney1b272912018-07-18 14:32:31 -07001793 /* Avoid slow periods, better to test when busy. */
1794 stutter_wait("rcu_torture_fwd_prog");
1795 } while (!torture_must_stop());
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001796 if (selfpropcb) {
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001797 WARN_ON(READ_ONCE(fcs.stop) != 2);
1798 destroy_rcu_head_on_stack(&fcs.rh);
1799 }
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001800 /* Short runs might not contain a valid forward-progress attempt. */
1801 WARN_ON(!tested && tested_tries >= 5);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001802 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001803 torture_kthread_stopping("rcu_torture_fwd_prog");
1804 return 0;
1805}
1806
1807/* If forward-progress checking is requested and feasible, spawn the thread. */
1808static int __init rcu_torture_fwd_prog_init(void)
1809{
1810 if (!fwd_progress)
1811 return 0; /* Not requested, so don't do it. */
1812 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
1813 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1814 return 0;
1815 }
1816 if (stall_cpu > 0) {
1817 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1818 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
1819 return -EINVAL; /* In module, can fail back to user. */
1820 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1821 return 0;
1822 }
1823 if (fwd_progress_holdoff <= 0)
1824 fwd_progress_holdoff = 1;
1825 if (fwd_progress_div <= 0)
1826 fwd_progress_div = 4;
1827 return torture_create_kthread(rcu_torture_fwd_prog,
1828 NULL, fwd_prog_task);
1829}
1830
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001831/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05301832static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001833{
1834 atomic_inc(&barrier_cbs_invoked);
1835}
1836
1837/* kthread function to register callbacks used to test RCU barriers. */
1838static int rcu_torture_barrier_cbs(void *arg)
1839{
1840 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07001841 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001842 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001843 struct rcu_head rcu;
1844
1845 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001846 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001847 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001848 do {
1849 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001850 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001851 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001852 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001853 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001854 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001855 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001856 /*
1857 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07001858 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001859 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001860 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001861 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07001862 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001863 if (atomic_dec_and_test(&barrier_cbs_count))
1864 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001865 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07001866 if (cur_ops->cb_barrier != NULL)
1867 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001868 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001869 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001870 return 0;
1871}
1872
1873/* kthread function to drive and coordinate RCU barrier testing. */
1874static int rcu_torture_barrier(void *arg)
1875{
1876 int i;
1877
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001878 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001879 do {
1880 atomic_set(&barrier_cbs_invoked, 0);
1881 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07001882 /* Ensure barrier_phase ordered after prior assignments. */
1883 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001884 for (i = 0; i < n_barrier_cbs; i++)
1885 wake_up(&barrier_cbs_wq[i]);
1886 wait_event(barrier_wq,
1887 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001888 torture_must_stop());
1889 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001890 break;
1891 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07001892 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001893 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1894 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08001895 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1896 atomic_read(&barrier_cbs_invoked),
1897 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001898 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001899 } else {
1900 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001901 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001902 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001903 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001904 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001905 return 0;
1906}
1907
1908/* Initialize RCU barrier testing. */
1909static int rcu_torture_barrier_init(void)
1910{
1911 int i;
1912 int ret;
1913
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07001914 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001915 return 0;
1916 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001917 pr_alert("%s" TORTURE_FLAG
1918 " Call or barrier ops missing for %s,\n",
1919 torture_type, cur_ops->name);
1920 pr_alert("%s" TORTURE_FLAG
1921 " RCU barrier testing omitted from run.\n",
1922 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001923 return 0;
1924 }
1925 atomic_set(&barrier_cbs_count, 0);
1926 atomic_set(&barrier_cbs_invoked, 0);
1927 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001928 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001929 GFP_KERNEL);
1930 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08001931 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05001932 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001933 return -ENOMEM;
1934 for (i = 0; i < n_barrier_cbs; i++) {
1935 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001936 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1937 (void *)(long)i,
1938 barrier_cbs_tasks[i]);
1939 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001940 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001941 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001942 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001943}
1944
1945/* Clean up after RCU barrier testing. */
1946static void rcu_torture_barrier_cleanup(void)
1947{
1948 int i;
1949
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001950 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001951 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001952 for (i = 0; i < n_barrier_cbs; i++)
1953 torture_stop_kthread(rcu_torture_barrier_cbs,
1954 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001955 kfree(barrier_cbs_tasks);
1956 barrier_cbs_tasks = NULL;
1957 }
1958 if (barrier_cbs_wq != NULL) {
1959 kfree(barrier_cbs_wq);
1960 barrier_cbs_wq = NULL;
1961 }
1962}
1963
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001964static bool rcu_torture_can_boost(void)
1965{
1966 static int boost_warn_once;
1967 int prio;
1968
1969 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
1970 return false;
1971
1972 prio = rcu_get_gp_kthreads_prio();
1973 if (!prio)
1974 return false;
1975
1976 if (prio < 2) {
1977 if (boost_warn_once == 1)
1978 return false;
1979
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07001980 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07001981 boost_warn_once = 1;
1982 return false;
1983 }
1984
1985 return true;
1986}
1987
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001988static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001989
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001990static void
1991rcu_torture_cleanup(void)
1992{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001993 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07001994 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001995 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001996 int i;
1997
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07001998 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08001999 if (cur_ops->cb_barrier != NULL)
2000 cur_ops->cb_barrier();
2001 return;
2002 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002003
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002004 rcu_torture_barrier_cleanup();
Paul E. McKenney1b272912018-07-18 14:32:31 -07002005 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002006 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002007 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002008
Josh Triplettc8e5b162007-05-08 00:33:20 -07002009 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002010 for (i = 0; i < nrealreaders; i++)
2011 torture_stop_kthread(rcu_torture_reader,
2012 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002013 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002014 }
2015 rcu_torture_current = NULL;
2016
Josh Triplettc8e5b162007-05-08 00:33:20 -07002017 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07002018 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002019 torture_stop_kthread(rcu_torture_fakewriter,
2020 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002021 }
2022 kfree(fakewriter_tasks);
2023 fakewriter_tasks = NULL;
2024 }
2025
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002026 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2027 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2028 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2029 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002030 torture_stop_kthread(rcu_torture_stats, stats_task);
2031 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002032 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002033 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002034
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002035 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002036 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002037 * cleanup operations.
2038 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002039 if (cur_ops->cb_barrier != NULL)
2040 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002041 if (cur_ops->cleanup != NULL)
2042 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002043
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002044 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002045
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002046 if (err_segs_recorded) {
2047 pr_alert("Failure/close-call rcutorture reader segments:\n");
2048 if (rt_read_nsegs == 0)
2049 pr_alert("\t: No segments recorded!!!\n");
2050 firsttime = 1;
2051 for (i = 0; i < rt_read_nsegs; i++) {
2052 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2053 if (err_segs[i].rt_delay_jiffies != 0) {
2054 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2055 err_segs[i].rt_delay_jiffies);
2056 firsttime = 0;
2057 }
2058 if (err_segs[i].rt_delay_ms != 0) {
2059 pr_cont("%s%ldms", firsttime ? "" : "+",
2060 err_segs[i].rt_delay_ms);
2061 firsttime = 0;
2062 }
2063 if (err_segs[i].rt_delay_us != 0) {
2064 pr_cont("%s%ldus", firsttime ? "" : "+",
2065 err_segs[i].rt_delay_us);
2066 firsttime = 0;
2067 }
2068 pr_cont("%s\n",
2069 err_segs[i].rt_preempted ? "preempted" : "");
2070
2071 }
2072 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002073 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002074 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08002075 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08002076 rcu_torture_print_module_parms(cur_ops,
2077 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08002078 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002079 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002080 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002081}
2082
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002083#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2084static void rcu_torture_leak_cb(struct rcu_head *rhp)
2085{
2086}
2087
2088static void rcu_torture_err_cb(struct rcu_head *rhp)
2089{
2090 /*
2091 * This -might- happen due to race conditions, but is unlikely.
2092 * The scenario that leads to this happening is that the
2093 * first of the pair of duplicate callbacks is queued,
2094 * someone else starts a grace period that includes that
2095 * callback, then the second of the pair must wait for the
2096 * next grace period. Unlikely, but can happen. If it
2097 * does happen, the debug-objects subsystem won't have splatted.
2098 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002099 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002100}
2101#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2102
2103/*
2104 * Verify that double-free causes debug-objects to complain, but only
2105 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2106 * cannot be carried out.
2107 */
2108static void rcu_test_debug_objects(void)
2109{
2110#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2111 struct rcu_head rh1;
2112 struct rcu_head rh2;
2113
2114 init_rcu_head_on_stack(&rh1);
2115 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002116 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002117
2118 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2119 preempt_disable(); /* Prevent preemption from interrupting test. */
2120 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2121 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2122 local_irq_disable(); /* Make it harder to start a new grace period. */
2123 call_rcu(&rh2, rcu_torture_leak_cb);
2124 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2125 local_irq_enable();
2126 rcu_read_unlock();
2127 preempt_enable();
2128
2129 /* Wait for them all to get done so we can safely return. */
2130 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002131 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002132 destroy_rcu_head_on_stack(&rh1);
2133 destroy_rcu_head_on_stack(&rh2);
2134#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002135 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002136#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2137}
2138
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002139static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002140rcu_torture_init(void)
2141{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002142 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002143 int cpu;
2144 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002145 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07002146 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2147 &busted_srcud_ops, &tasks_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002148 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002149
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002150 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002151 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002152
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002153 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002154 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002155 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002156 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002157 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002158 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002159 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002160 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2161 torture_type);
2162 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002163 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002164 pr_cont(" %s", torture_ops[i]->name);
2165 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002166 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002167 firsterr = -EINVAL;
2168 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002169 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002170 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002171 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002172 fqs_duration = 0;
2173 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002174 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002175 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002176
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002177 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002178 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002179 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002180 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002181 if (nrealreaders <= 0)
2182 nrealreaders = 1;
2183 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002184 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002185
2186 /* Set up the freelist. */
2187
2188 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002189 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002190 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002191 list_add_tail(&rcu_tortures[i].rtort_free,
2192 &rcu_torture_freelist);
2193 }
2194
2195 /* Initialize the statistics so that each run gets its own numbers. */
2196
2197 rcu_torture_current = NULL;
2198 rcu_torture_current_version = 0;
2199 atomic_set(&n_rcu_torture_alloc, 0);
2200 atomic_set(&n_rcu_torture_alloc_fail, 0);
2201 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002202 atomic_set(&n_rcu_torture_mberror, 0);
2203 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002204 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002205 n_rcu_torture_boost_ktrerror = 0;
2206 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002207 n_rcu_torture_boost_failure = 0;
2208 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002209 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2210 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002211 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002212 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2213 per_cpu(rcu_torture_count, cpu)[i] = 0;
2214 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2215 }
2216 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002217 err_segs_recorded = 0;
2218 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002219
2220 /* Start up the kthreads. */
2221
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002222 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2223 writer_task);
2224 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002225 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002226 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002227 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002228 sizeof(fakewriter_tasks[0]),
2229 GFP_KERNEL);
2230 if (fakewriter_tasks == NULL) {
2231 VERBOSE_TOROUT_ERRSTRING("out of memory");
2232 firsterr = -ENOMEM;
2233 goto unwind;
2234 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002235 }
2236 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002237 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2238 NULL, fakewriter_tasks[i]);
2239 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002240 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002241 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002242 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002243 GFP_KERNEL);
2244 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002245 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002246 firsterr = -ENOMEM;
2247 goto unwind;
2248 }
2249 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002250 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002251 reader_tasks[i]);
2252 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002253 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002254 }
2255 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002256 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2257 stats_task);
2258 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002259 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002260 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002261 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002262 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2263 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002264 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002265 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002266 if (stutter < 0)
2267 stutter = 0;
2268 if (stutter) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002269 firsterr = torture_stutter_init(stutter * HZ);
2270 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002271 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002272 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002273 if (fqs_duration < 0)
2274 fqs_duration = 0;
2275 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002276 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002277 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2278 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002279 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002280 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002281 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002282 if (test_boost_interval < 1)
2283 test_boost_interval = 1;
2284 if (test_boost_duration < 2)
2285 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002286 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002287
2288 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002289
2290 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2291 rcutorture_booster_init,
2292 rcutorture_booster_cleanup);
2293 if (firsterr < 0)
2294 goto unwind;
2295 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002296 }
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002297 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2298 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002299 goto unwind;
Paul E. McKenney028be122018-05-08 09:20:34 -07002300 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002301 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002302 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002303 firsterr = rcu_torture_stall_init();
2304 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002305 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002306 firsterr = rcu_torture_fwd_prog_init();
2307 if (firsterr)
2308 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002309 firsterr = rcu_torture_barrier_init();
2310 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002311 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002312 if (object_debug)
2313 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002314 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002315 return 0;
2316
2317unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002318 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002319 rcu_torture_cleanup();
2320 return firsterr;
2321}
2322
2323module_init(rcu_torture_init);
2324module_exit(rcu_torture_cleanup);