blob: 1aeecc165b2168b697f1410d8f4f38b79e755ec6 [file] [log] [blame]
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07003 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08004 *
Josh Triplettb772e1d2006-10-04 02:17:13 -07005 * Copyright (C) IBM Corporation, 2005, 2006
Paul E. McKenneya241ec62005-10-30 15:03:12 -08006 *
Paul E. McKenney2e24ce82019-01-17 10:16:42 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Josh Triplette0198b292014-07-30 16:08:42 -07008 * Josh Triplett <josh@joshtriplett.org>
Paul E. McKenneya241ec62005-10-30 15:03:12 -08009 *
10 * See also: Documentation/RCU/torture.txt
11 */
Paul E. McKenney60500032018-05-15 12:25:05 -070012
13#define pr_fmt(fmt) fmt
14
Paul E. McKenneya241ec62005-10-30 15:03:12 -080015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
23#include <linux/rcupdate.h>
24#include <linux/interrupt.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010026#include <uapi/linux/sched/types.h>
Arun Sharma600634972011-07-26 16:09:06 -070027#include <linux/atomic.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080028#include <linux/bitops.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080029#include <linux/completion.h>
30#include <linux/moduleparam.h>
31#include <linux/percpu.h>
32#include <linux/notifier.h>
Paul E. McKenney343e9092008-12-15 16:13:07 -080033#include <linux/reboot.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070034#include <linux/freezer.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080035#include <linux/cpu.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080036#include <linux/delay.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080037#include <linux/stat.h>
Paul E. McKenneyb2896d22006-10-04 02:17:03 -070038#include <linux/srcu.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070039#include <linux/slab.h>
Paul E. McKenney52494532012-11-14 16:26:40 -080040#include <linux/trace_clock.h>
Harvey Harrisonf07767f2008-10-20 10:23:38 -070041#include <asm/byteorder.h>
Paul E. McKenney51b11302014-01-27 11:49:39 -080042#include <linux/torture.h>
Paul E. McKenney38706bc2014-08-18 21:12:17 -070043#include <linux/vmalloc.h>
Paul E. McKenney0032f4e2017-08-30 10:40:17 -070044#include <linux/sched/debug.h>
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -070045#include <linux/sched/sysctl.h>
Paul E. McKenneye0aff972018-10-01 17:40:54 -070046#include <linux/oom.h>
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -070047#include <linux/tick.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080048
Paul E. McKenney25c36322017-05-03 09:51:55 -070049#include "rcu.h"
50
Paul E. McKenneya241ec62005-10-30 15:03:12 -080051MODULE_LICENSE("GPL");
Paul E. McKenney2e24ce82019-01-17 10:16:42 -080052MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
Paul E. McKenneya241ec62005-10-30 15:03:12 -080053
Paul E. McKenney4102ada2013-10-08 20:23:47 -070054
Paul E. McKenney2397d072018-05-25 07:29:25 -070055/* Bits for ->extendables field, extendables param, and related definitions. */
56#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
57#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
Paul E. McKenney2ceebc02018-07-06 15:16:12 -070058#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
59#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
60#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
61#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
62#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
63#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
64#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
65#define RCUTORTURE_MAX_EXTEND \
66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
Paul E. McKenney2397d072018-05-25 07:29:25 -070068#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
69 /* Must be power of two minus one. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -070070#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
Paul E. McKenney2397d072018-05-25 07:29:25 -070071
Paul E. McKenney2397d072018-05-25 07:29:25 -070072torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
Paul E. McKenney9e250222014-01-27 16:27:00 -080074torture_param(int, fqs_duration, 0,
75 "Duration of fqs bursts (us), 0 to disable");
76torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
Paul E. McKenney1b272912018-07-18 14:32:31 -070078torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80torture_param(int, fwd_progress_holdoff, 60,
81 "Time between forward-progress tests (s)");
82torture_param(bool, fwd_progress_need_resched, 1,
83 "Hide cond_resched() behind need_resched()");
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -070084torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080085torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86torture_param(bool, gp_normal, false,
87 "Use normal (non-expedited) GP wait primitives");
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -070088torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
Paul E. McKenney9e250222014-01-27 16:27:00 -080089torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
90torture_param(int, n_barrier_cbs, 0,
91 "# of callbacks/kthreads for barrier testing");
92torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
93torture_param(int, nreaders, -1, "Number of RCU reader threads");
94torture_param(int, object_debug, 0,
95 "Enable debug-object double call_rcu() testing");
96torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
97torture_param(int, onoff_interval, 0,
Paul E. McKenney028be122018-05-08 09:20:34 -070098 "Time between CPU hotplugs (jiffies), 0=disable");
Paul E. McKenney9e250222014-01-27 16:27:00 -080099torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
100torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
101torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
102torture_param(int, stall_cpu_holdoff, 10,
103 "Time to wait before starting stall (s).");
Paul E. McKenney2b1516e2017-08-18 16:11:37 -0700104torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800105torture_param(int, stat_interval, 60,
106 "Number of seconds between stats printk()s");
107torture_param(int, stutter, 5, "Number of seconds to run/halt test");
108torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
109torture_param(int, test_boost_duration, 4,
110 "Duration of each boost test, seconds.");
111torture_param(int, test_boost_interval, 7,
112 "Interval between boost tests, seconds.");
113torture_param(bool, test_no_idle_hz, true,
114 "Test support for tickless idle CPUs");
Paul E. McKenney90127d62018-05-09 10:29:18 -0700115torture_param(int, verbose, 1,
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800116 "Enable verbose debugging printk()s");
Paul E. McKenney9e250222014-01-27 16:27:00 -0800117
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -0800118static char *torture_type = "rcu";
Josh Triplettd6ad6712007-03-06 01:42:13 -0800119module_param(torture_type, charp, 0444);
Paul E. McKenneyc770c822018-07-07 10:28:07 -0700120MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700121
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800122static int nrealreaders;
123static struct task_struct *writer_task;
Josh Triplettb772e1d2006-10-04 02:17:13 -0700124static struct task_struct **fakewriter_tasks;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800125static struct task_struct **reader_tasks;
126static struct task_struct *stats_task;
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800127static struct task_struct *fqs_task;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700128static struct task_struct *boost_tasks[NR_CPUS];
Paul E. McKenneyc13f3752012-01-20 15:36:33 -0800129static struct task_struct *stall_task;
Paul E. McKenney1b272912018-07-18 14:32:31 -0700130static struct task_struct *fwd_prog_task;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800131static struct task_struct **barrier_cbs_tasks;
132static struct task_struct *barrier_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800133
134#define RCU_TORTURE_PIPE_LEN 10
135
136struct rcu_torture {
137 struct rcu_head rtort_rcu;
138 int rtort_pipe_count;
139 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800140 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800141};
142
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800143static LIST_HEAD(rcu_torture_freelist);
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -0700144static struct rcu_torture __rcu *rcu_torture_current;
Paul E. McKenney4a298652011-04-03 21:33:51 -0700145static unsigned long rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800146static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
147static DEFINE_SPINLOCK(rcu_torture_lock);
Paul E. McKenney67522be2016-03-01 08:52:19 -0800148static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
149static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800150static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700151static atomic_t n_rcu_torture_alloc;
152static atomic_t n_rcu_torture_alloc_fail;
153static atomic_t n_rcu_torture_free;
154static atomic_t n_rcu_torture_mberror;
155static atomic_t n_rcu_torture_error;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800156static long n_rcu_torture_barrier_error;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700157static long n_rcu_torture_boost_ktrerror;
158static long n_rcu_torture_boost_rterror;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700159static long n_rcu_torture_boost_failure;
160static long n_rcu_torture_boosts;
Paul E. McKenney8da9a592018-05-22 11:17:51 -0700161static atomic_long_t n_rcu_torture_timers;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800162static long n_barrier_attempts;
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -0700163static long n_barrier_successes; /* did rcu_barrier test succeed? */
Josh Triplette3033732006-10-04 02:17:14 -0700164static struct list_head rcu_torture_removed;
Paul E. McKenney60013d52019-07-10 08:30:00 -0700165static unsigned long shutdown_jiffies;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800166
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800167static int rcu_torture_writer_state;
168#define RTWS_FIXED_DELAY 0
169#define RTWS_DELAY 1
170#define RTWS_REPLACE 2
171#define RTWS_DEF_FREE 3
172#define RTWS_EXP_SYNC 4
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700173#define RTWS_COND_GET 5
174#define RTWS_COND_SYNC 6
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700175#define RTWS_SYNC 7
176#define RTWS_STUTTER 8
177#define RTWS_STOPPING 9
Paul E. McKenney18aff332015-11-17 13:35:28 -0800178static const char * const rcu_torture_writer_state_names[] = {
179 "RTWS_FIXED_DELAY",
180 "RTWS_DELAY",
181 "RTWS_REPLACE",
182 "RTWS_DEF_FREE",
183 "RTWS_EXP_SYNC",
184 "RTWS_COND_GET",
185 "RTWS_COND_SYNC",
186 "RTWS_SYNC",
187 "RTWS_STUTTER",
188 "RTWS_STOPPING",
189};
190
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700191/* Record reader segment types and duration for first failing read. */
192struct rt_read_seg {
193 int rt_readstate;
194 unsigned long rt_delay_jiffies;
195 unsigned long rt_delay_ms;
196 unsigned long rt_delay_us;
197 bool rt_preempted;
198};
199static int err_segs_recorded;
200static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
201static int rt_read_nsegs;
202
Paul E. McKenney18aff332015-11-17 13:35:28 -0800203static const char *rcu_torture_writer_state_getname(void)
204{
205 unsigned int i = READ_ONCE(rcu_torture_writer_state);
206
207 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
208 return "???";
209 return rcu_torture_writer_state_names[i];
210}
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800211
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700212#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700213#define rcu_can_boost() 1
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700214#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700215#define rcu_can_boost() 0
Paul E. McKenney3acf4a92011-04-17 23:45:23 -0700216#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700217
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500218#ifdef CONFIG_RCU_TRACE
219static u64 notrace rcu_trace_clock_local(void)
220{
221 u64 ts = trace_clock_local();
Paul E. McKenneya3b7b6c2017-06-23 16:07:17 -0700222
223 (void)do_div(ts, NSEC_PER_USEC);
Steven Rostedte4aa0da2013-02-04 13:36:13 -0500224 return ts;
225}
226#else /* #ifdef CONFIG_RCU_TRACE */
227static u64 notrace rcu_trace_clock_local(void)
228{
229 return 0ULL;
230}
231#endif /* #else #ifdef CONFIG_RCU_TRACE */
232
Paul E. McKenney60013d52019-07-10 08:30:00 -0700233/*
234 * Stop aggressive CPU-hog tests a bit before the end of the test in order
235 * to avoid interfering with test shutdown.
236 */
237static bool shutdown_time_arrived(void)
238{
239 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
240}
241
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700242static unsigned long boost_starttime; /* jiffies of next boost test start. */
Pranith Kumar58ade2d2014-06-11 16:39:43 -0400243static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700244 /* and boost task create/destroy. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800245static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -0700246static bool barrier_phase; /* Test phase. */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800247static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
248static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
249static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700250
Paul E. McKenney48718482018-08-15 15:32:51 -0700251static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
252
Paul E. McKenney343e9092008-12-15 16:13:07 -0800253/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800254 * Allocate an element from the rcu_tortures pool.
255 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800256static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800257rcu_torture_alloc(void)
258{
259 struct list_head *p;
260
Ingo Molnaradac1662006-01-25 19:50:12 +0100261 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800262 if (list_empty(&rcu_torture_freelist)) {
263 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100264 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800265 return NULL;
266 }
267 atomic_inc(&n_rcu_torture_alloc);
268 p = rcu_torture_freelist.next;
269 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100270 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800271 return container_of(p, struct rcu_torture, rtort_free);
272}
273
274/*
275 * Free an element to the rcu_tortures pool.
276 */
277static void
278rcu_torture_free(struct rcu_torture *p)
279{
280 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100281 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800282 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100283 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800284}
285
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800286/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700287 * Operations vector for selecting different types of tests.
288 */
289
290struct rcu_torture_ops {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800291 int ttype;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700292 void (*init)(void);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700293 void (*cleanup)(void);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700294 int (*readlock)(void);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700295 void (*read_delay)(struct torture_random_state *rrsp,
296 struct rt_read_seg *rtrsp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700297 void (*readunlock)(int idx);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700298 unsigned long (*get_gp_seq)(void);
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700299 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700300 void (*deferred_free)(struct rcu_torture *p);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700301 void (*sync)(void);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700302 void (*exp_sync)(void);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700303 unsigned long (*get_state)(void);
304 void (*cond_sync)(unsigned long oldstate);
Boqun Fengdb3e8db2015-07-29 13:29:39 +0800305 call_rcu_func_t call;
Paul E. McKenney23269742008-05-12 21:21:05 +0200306 void (*cb_barrier)(void);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800307 void (*fqs)(void);
Joe Percheseea203f2014-07-14 09:16:15 -0400308 void (*stats)(void);
Paul E. McKenney1b272912018-07-18 14:32:31 -0700309 int (*stall_dur)(void);
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700310 int irq_capable;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700311 int can_boost;
Paul E. McKenney2397d072018-05-25 07:29:25 -0700312 int extendables;
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700313 int slow_gps;
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400314 const char *name;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700315};
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700316
317static struct rcu_torture_ops *cur_ops;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700318
319/*
320 * Definitions for rcu torture testing.
321 */
322
Josh Tripletta49a4af2006-09-29 01:59:30 -0700323static int rcu_torture_read_lock(void) __acquires(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700324{
325 rcu_read_lock();
326 return 0;
327}
328
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700329static void
330rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700331{
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700332 unsigned long started;
333 unsigned long completed;
Josh Triplettb8d57a72009-09-08 15:54:35 -0700334 const unsigned long shortdelay_us = 200;
Paul E. McKenney1e696762018-07-20 12:04:12 -0700335 unsigned long longdelay_ms = 300;
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700336 unsigned long long ts;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700337
Josh Triplettb8d57a72009-09-08 15:54:35 -0700338 /* We want a short delay sometimes to make a reader delay the grace
339 * period, and we want a long delay occasionally to trigger
340 * force_quiescent_state. */
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700341
Paul E. McKenney48718482018-08-15 15:32:51 -0700342 if (!rcu_fwd_cb_nodelay &&
343 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700344 started = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700345 ts = rcu_trace_clock_local();
Paul E. McKenney1e696762018-07-20 12:04:12 -0700346 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
347 longdelay_ms = 5; /* Avoid triggering BH limits. */
Josh Triplettb8d57a72009-09-08 15:54:35 -0700348 mdelay(longdelay_ms);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700349 rtrsp->rt_delay_ms = longdelay_ms;
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700350 completed = cur_ops->get_gp_seq();
Paul E. McKenneyd0af39e2016-10-10 18:26:04 -0700351 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
352 started, completed);
353 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700354 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
Josh Triplettb8d57a72009-09-08 15:54:35 -0700355 udelay(shortdelay_us);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700356 rtrsp->rt_delay_us = shortdelay_us;
357 }
Paul E. McKenney51b11302014-01-27 11:49:39 -0800358 if (!preempt_count() &&
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700359 !(torture_random(rrsp) % (nrealreaders * 500))) {
Paul E. McKenneycc1321c2017-10-16 11:05:03 -0700360 torture_preempt_schedule(); /* QS only if preemptible. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700361 rtrsp->rt_preempted = true;
362 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700363}
364
Josh Tripletta49a4af2006-09-29 01:59:30 -0700365static void rcu_torture_read_unlock(int idx) __releases(RCU)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700366{
367 rcu_read_unlock();
368}
369
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700370/*
371 * Update callback in the pipe. This should be invoked after a grace period.
372 */
373static bool
374rcu_torture_pipe_update_one(struct rcu_torture *rp)
375{
376 int i;
377
378 i = rp->rtort_pipe_count;
379 if (i > RCU_TORTURE_PIPE_LEN)
380 i = RCU_TORTURE_PIPE_LEN;
381 atomic_inc(&rcu_torture_wcount[i]);
382 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
383 rp->rtort_mbtest = 0;
384 return true;
385 }
386 return false;
387}
388
389/*
390 * Update all callbacks in the pipe. Suitable for synchronous grace-period
391 * primitives.
392 */
393static void
394rcu_torture_pipe_update(struct rcu_torture *old_rp)
395{
396 struct rcu_torture *rp;
397 struct rcu_torture *rp1;
398
399 if (old_rp)
400 list_add(&old_rp->rtort_free, &rcu_torture_removed);
401 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
402 if (rcu_torture_pipe_update_one(rp)) {
403 list_del(&rp->rtort_free);
404 rcu_torture_free(rp);
405 }
406 }
407}
408
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700409static void
410rcu_torture_cb(struct rcu_head *p)
411{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700412 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
413
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800414 if (torture_must_stop_irq()) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700415 /* Test is ending, just drop callbacks on the floor. */
416 /* The next initialization will pick up the pieces. */
417 return;
418 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700419 if (rcu_torture_pipe_update_one(rp))
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700420 rcu_torture_free(rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700421 else
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700422 cur_ops->deferred_free(rp);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700423}
424
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800425static unsigned long rcu_no_completed(void)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800426{
427 return 0;
428}
429
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700430static void rcu_torture_deferred_free(struct rcu_torture *p)
431{
432 call_rcu(&p->rtort_rcu, rcu_torture_cb);
433}
434
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700435static void rcu_sync_torture_init(void)
436{
437 INIT_LIST_HEAD(&rcu_torture_removed);
438}
439
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700440static struct rcu_torture_ops rcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800441 .ttype = RCU_FLAVOR,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700442 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700443 .readlock = rcu_torture_read_lock,
444 .read_delay = rcu_read_delay,
445 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700446 .get_gp_seq = rcu_get_gp_seq,
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700447 .gp_diff = rcu_seq_diff,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700448 .deferred_free = rcu_torture_deferred_free,
449 .sync = synchronize_rcu,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700450 .exp_sync = synchronize_rcu_expedited,
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700451 .get_state = get_state_synchronize_rcu,
452 .cond_sync = cond_synchronize_rcu,
Paul E. McKenneyfae4b542012-02-20 17:51:45 -0800453 .call = call_rcu,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700454 .cb_barrier = rcu_barrier,
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800455 .fqs = rcu_force_quiescent_state,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700456 .stats = NULL,
Paul E. McKenney1b272912018-07-18 14:32:31 -0700457 .stall_dur = rcu_jiffies_till_stall_check,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700458 .irq_capable = 1,
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700459 .can_boost = rcu_can_boost(),
Paul E. McKenneyc0335742018-06-21 16:17:46 -0700460 .extendables = RCUTORTURE_MAX_EXTEND,
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700461 .name = "rcu"
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700462};
463
Paul E. McKenneyc32e0662006-06-27 02:54:04 -0700464/*
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800465 * Don't even think about trying any of these in real life!!!
466 * The names includes "busted", and they really means it!
467 * The only purpose of these functions is to provide a buggy RCU
468 * implementation to make sure that rcutorture correctly emits
469 * buggy-RCU error messages.
470 */
471static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
472{
473 /* This is a deliberate bug for testing purposes only! */
474 rcu_torture_cb(&p->rtort_rcu);
475}
476
477static void synchronize_rcu_busted(void)
478{
479 /* This is a deliberate bug for testing purposes only! */
480}
481
482static void
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800483call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800484{
485 /* This is a deliberate bug for testing purposes only! */
486 func(head);
487}
488
489static struct rcu_torture_ops rcu_busted_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800490 .ttype = INVALID_RCU_FLAVOR,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800491 .init = rcu_sync_torture_init,
492 .readlock = rcu_torture_read_lock,
493 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
494 .readunlock = rcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700495 .get_gp_seq = rcu_no_completed,
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800496 .deferred_free = rcu_busted_torture_deferred_free,
497 .sync = synchronize_rcu_busted,
498 .exp_sync = synchronize_rcu_busted,
499 .call = call_rcu_busted,
500 .cb_barrier = NULL,
501 .fqs = NULL,
502 .stats = NULL,
503 .irq_capable = 1,
Paul E. McKenneyb3c98312017-06-06 16:39:00 -0700504 .name = "busted"
Paul E. McKenneyff20e252014-02-06 08:45:56 -0800505};
506
507/*
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700508 * Definitions for srcu torture testing.
509 */
510
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800511DEFINE_STATIC_SRCU(srcu_ctl);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700512static struct srcu_struct srcu_ctld;
513static struct srcu_struct *srcu_ctlp = &srcu_ctl;
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700514
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700515static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700516{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700517 return srcu_read_lock(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700518}
519
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700520static void
521srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700522{
523 long delay;
524 const long uspertick = 1000000 / HZ;
525 const long longdelay = 10;
526
527 /* We want there to be long-running readers, but not all the time. */
528
Paul E. McKenney51b11302014-01-27 11:49:39 -0800529 delay = torture_random(rrsp) %
530 (nrealreaders * 2 * longdelay * uspertick);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700531 if (!delay && in_task()) {
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700532 schedule_timeout_interruptible(longdelay);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -0700533 rtrsp->rt_delay_jiffies = longdelay;
534 } else {
535 rcu_read_delay(rrsp, rtrsp);
536 }
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700537}
538
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700539static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700540{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700541 srcu_read_unlock(srcu_ctlp, idx);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700542}
543
Paul E. McKenney6b80da42014-11-21 14:19:26 -0800544static unsigned long srcu_torture_completed(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700545{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700546 return srcu_batches_completed(srcu_ctlp);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700547}
548
Lai Jiangshan9059c942012-03-19 16:12:14 +0800549static void srcu_torture_deferred_free(struct rcu_torture *rp)
550{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700551 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
Lai Jiangshan9059c942012-03-19 16:12:14 +0800552}
553
Josh Triplettb772e1d2006-10-04 02:17:13 -0700554static void srcu_torture_synchronize(void)
555{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700556 synchronize_srcu(srcu_ctlp);
Josh Triplettb772e1d2006-10-04 02:17:13 -0700557}
558
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700559static void srcu_torture_call(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800560 rcu_callback_t func)
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700561{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700562 call_srcu(srcu_ctlp, head, func);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700563}
564
565static void srcu_torture_barrier(void)
566{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700567 srcu_barrier(srcu_ctlp);
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700568}
569
Joe Percheseea203f2014-07-14 09:16:15 -0400570static void srcu_torture_stats(void)
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700571{
Paul E. McKenney115a1a52017-05-22 13:31:03 -0700572 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700573}
574
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700575static void srcu_torture_synchronize_expedited(void)
576{
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700577 synchronize_srcu_expedited(srcu_ctlp);
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700578}
579
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700580static struct rcu_torture_ops srcu_ops = {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800581 .ttype = SRCU_FLAVOR,
Lai Jiangshancda4dc82012-10-13 01:14:17 +0800582 .init = rcu_sync_torture_init,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700583 .readlock = srcu_torture_read_lock,
584 .read_delay = srcu_read_delay,
585 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700586 .get_gp_seq = srcu_torture_completed,
Lai Jiangshan9059c942012-03-19 16:12:14 +0800587 .deferred_free = srcu_torture_deferred_free,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700588 .sync = srcu_torture_synchronize,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -0700589 .exp_sync = srcu_torture_synchronize_expedited,
Paul E. McKenneye3f8d372012-05-08 10:21:50 -0700590 .call = srcu_torture_call,
591 .cb_barrier = srcu_torture_barrier,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700592 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700593 .irq_capable = 1,
Paul E. McKenney0acc5122009-06-25 09:08:17 -0700594 .name = "srcu"
Paul E. McKenneyb2896d22006-10-04 02:17:03 -0700595};
596
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700597static void srcu_torture_init(void)
598{
599 rcu_sync_torture_init();
600 WARN_ON(init_srcu_struct(&srcu_ctld));
601 srcu_ctlp = &srcu_ctld;
602}
603
604static void srcu_torture_cleanup(void)
605{
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800606 cleanup_srcu_struct(&srcu_ctld);
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700607 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
608}
609
610/* As above, but dynamically allocated. */
611static struct rcu_torture_ops srcud_ops = {
612 .ttype = SRCU_FLAVOR,
613 .init = srcu_torture_init,
614 .cleanup = srcu_torture_cleanup,
615 .readlock = srcu_torture_read_lock,
616 .read_delay = srcu_read_delay,
617 .readunlock = srcu_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700618 .get_gp_seq = srcu_torture_completed,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700619 .deferred_free = srcu_torture_deferred_free,
620 .sync = srcu_torture_synchronize,
621 .exp_sync = srcu_torture_synchronize_expedited,
622 .call = srcu_torture_call,
623 .cb_barrier = srcu_torture_barrier,
624 .stats = srcu_torture_stats,
Paul E. McKenney5e741fa2017-06-06 12:52:44 -0700625 .irq_capable = 1,
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -0700626 .name = "srcud"
627};
628
Paul E. McKenney2397d072018-05-25 07:29:25 -0700629/* As above, but broken due to inappropriate reader extension. */
630static struct rcu_torture_ops busted_srcud_ops = {
631 .ttype = SRCU_FLAVOR,
632 .init = srcu_torture_init,
633 .cleanup = srcu_torture_cleanup,
634 .readlock = srcu_torture_read_lock,
635 .read_delay = rcu_read_delay,
636 .readunlock = srcu_torture_read_unlock,
637 .get_gp_seq = srcu_torture_completed,
638 .deferred_free = srcu_torture_deferred_free,
639 .sync = srcu_torture_synchronize,
640 .exp_sync = srcu_torture_synchronize_expedited,
641 .call = srcu_torture_call,
642 .cb_barrier = srcu_torture_barrier,
643 .stats = srcu_torture_stats,
644 .irq_capable = 1,
645 .extendables = RCUTORTURE_MAX_EXTEND,
646 .name = "busted_srcud"
647};
648
Josh Triplett4b6c2cc2006-10-04 02:17:16 -0700649/*
Paul E. McKenney69c60452014-07-01 11:59:36 -0700650 * Definitions for RCU-tasks torture testing.
651 */
652
653static int tasks_torture_read_lock(void)
654{
655 return 0;
656}
657
658static void tasks_torture_read_unlock(int idx)
659{
660}
661
662static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
663{
664 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
665}
666
667static struct rcu_torture_ops tasks_ops = {
668 .ttype = RCU_TASKS_FLAVOR,
669 .init = rcu_sync_torture_init,
670 .readlock = tasks_torture_read_lock,
671 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
672 .readunlock = tasks_torture_read_unlock,
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -0700673 .get_gp_seq = rcu_no_completed,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700674 .deferred_free = rcu_tasks_torture_deferred_free,
675 .sync = synchronize_rcu_tasks,
676 .exp_sync = synchronize_rcu_tasks,
677 .call = call_rcu_tasks,
678 .cb_barrier = rcu_barrier_tasks,
679 .fqs = NULL,
680 .stats = NULL,
681 .irq_capable = 1,
Paul E. McKenney5eabea52019-04-12 09:02:46 -0700682 .slow_gps = 1,
Paul E. McKenney69c60452014-07-01 11:59:36 -0700683 .name = "tasks"
684};
685
Paul E. McKenneyc682db52019-04-19 07:38:27 -0700686/*
687 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
688 * This implementation does not necessarily work well with CPU hotplug.
689 */
690
691static void synchronize_rcu_trivial(void)
692{
693 int cpu;
694
695 for_each_online_cpu(cpu) {
696 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
697 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
698 }
699}
700
701static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
702{
703 preempt_disable();
704 return 0;
705}
706
707static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
708{
709 preempt_enable();
710}
711
712static struct rcu_torture_ops trivial_ops = {
713 .ttype = RCU_TRIVIAL_FLAVOR,
714 .init = rcu_sync_torture_init,
715 .readlock = rcu_torture_read_lock_trivial,
716 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
717 .readunlock = rcu_torture_read_unlock_trivial,
718 .get_gp_seq = rcu_no_completed,
719 .sync = synchronize_rcu_trivial,
720 .exp_sync = synchronize_rcu_trivial,
721 .fqs = NULL,
722 .stats = NULL,
723 .irq_capable = 1,
724 .name = "trivial"
725};
726
Paul E. McKenneyd72193122018-05-15 15:24:41 -0700727static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
728{
729 if (!cur_ops->gp_diff)
730 return new - old;
731 return cur_ops->gp_diff(new, old);
732}
733
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -0700734static bool __maybe_unused torturing_tasks(void)
735{
736 return cur_ops == &tasks_ops;
737}
738
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700739/*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700740 * RCU torture priority-boost testing. Runs one real-time thread per
741 * CPU for moderate bursts, repeatedly registering RCU callbacks and
742 * spinning waiting for them to be invoked. If a given callback takes
743 * too long to be invoked, we assume that priority inversion has occurred.
744 */
745
746struct rcu_boost_inflight {
747 struct rcu_head rcu;
748 int inflight;
749};
750
751static void rcu_torture_boost_cb(struct rcu_head *head)
752{
753 struct rcu_boost_inflight *rbip =
754 container_of(head, struct rcu_boost_inflight, rcu);
755
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700756 /* Ensure RCU-core accesses precede clearing ->inflight */
757 smp_store_release(&rbip->inflight, 0);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700758}
759
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -0700760static int old_rt_runtime = -1;
761
762static void rcu_torture_disable_rt_throttle(void)
763{
764 /*
765 * Disable RT throttling so that rcutorture's boost threads don't get
766 * throttled. Only possible if rcutorture is built-in otherwise the
767 * user should manually do this by setting the sched_rt_period_us and
768 * sched_rt_runtime sysctls.
769 */
770 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
771 return;
772
773 old_rt_runtime = sysctl_sched_rt_runtime;
774 sysctl_sched_rt_runtime = -1;
775}
776
777static void rcu_torture_enable_rt_throttle(void)
778{
779 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
780 return;
781
782 sysctl_sched_rt_runtime = old_rt_runtime;
783 old_rt_runtime = -1;
784}
785
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700786static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
787{
788 if (end - start > test_boost_duration * HZ - HZ / 2) {
789 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
790 n_rcu_torture_boost_failure++;
791
792 return true; /* failed */
793 }
794
795 return false; /* passed */
796}
797
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700798static int rcu_torture_boost(void *arg)
799{
800 unsigned long call_rcu_time;
801 unsigned long endtime;
802 unsigned long oldstarttime;
803 struct rcu_boost_inflight rbi = { .inflight = 0 };
804 struct sched_param sp;
805
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800806 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700807
808 /* Set real-time priority. */
809 sp.sched_priority = 1;
810 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800811 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700812 n_rcu_torture_boost_rterror++;
813 }
814
Paul E. McKenney561190e2011-03-30 09:10:44 -0700815 init_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700816 /* Each pass through the following loop does one boost-test cycle. */
817 do {
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700818 /* Track if the test failed already in this test interval? */
819 bool failed = false;
820
821 /* Increment n_rcu_torture_boosts once per boost-test */
822 while (!kthread_should_stop()) {
823 if (mutex_trylock(&boost_mutex)) {
824 n_rcu_torture_boosts++;
825 mutex_unlock(&boost_mutex);
826 break;
827 }
828 schedule_timeout_uninterruptible(1);
829 }
830 if (kthread_should_stop())
831 goto checkwait;
832
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700833 /* Wait for the next test interval. */
834 oldstarttime = boost_starttime;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700835 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
Paul E. McKenney0e11c8e2013-01-10 16:21:07 -0800836 schedule_timeout_interruptible(oldstarttime - jiffies);
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800837 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800838 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700839 goto checkwait;
840 }
841
842 /* Do one boost-test interval. */
843 endtime = oldstarttime + test_boost_duration * HZ;
844 call_rcu_time = jiffies;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700845 while (ULONG_CMP_LT(jiffies, endtime)) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700846 /* If we don't have a callback in flight, post one. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700847 if (!smp_load_acquire(&rbi.inflight)) {
848 /* RCU core before ->inflight = 1. */
849 smp_store_release(&rbi.inflight, 1);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700850 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700851 /* Check if the boost test failed */
852 failed = failed ||
853 rcu_torture_boost_failed(call_rcu_time,
854 jiffies);
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700855 call_rcu_time = jiffies;
856 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800857 stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800858 if (torture_must_stop())
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700859 goto checkwait;
860 }
861
862 /*
Joel Fernandes (Google)3b745c82018-06-10 16:45:44 -0700863 * If boost never happened, then inflight will always be 1, in
864 * this case the boost check would never happen in the above
865 * loop so do another one here.
866 */
867 if (!failed && smp_load_acquire(&rbi.inflight))
868 rcu_torture_boost_failed(call_rcu_time, jiffies);
869
870 /*
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700871 * Set the start time of the next test interval.
872 * Yes, this is vulnerable to long delays, but such
873 * delays simply cause a false negative for the next
874 * interval. Besides, we are running at RT priority,
875 * so delays should be relatively rare.
876 */
Paul E. McKenneyab8f11e2011-08-18 09:30:32 -0700877 while (oldstarttime == boost_starttime &&
878 !kthread_should_stop()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700879 if (mutex_trylock(&boost_mutex)) {
880 boost_starttime = jiffies +
881 test_boost_interval * HZ;
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700882 mutex_unlock(&boost_mutex);
883 break;
884 }
885 schedule_timeout_uninterruptible(1);
886 }
887
888 /* Go do the stutter. */
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800889checkwait: stutter_wait("rcu_torture_boost");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800890 } while (!torture_must_stop());
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700891
892 /* Clean up and exit. */
Paul E. McKenney6c7ed422015-04-13 11:58:08 -0700893 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800894 torture_shutdown_absorb("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700895 schedule_timeout_uninterruptible(1);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800896 }
Paul E. McKenney9d681972011-06-21 01:48:03 -0700897 destroy_rcu_head_on_stack(&rbi.rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800898 torture_kthread_stopping("rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -0700899 return 0;
900}
901
902/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800903 * RCU torture force-quiescent-state kthread. Repeatedly induces
904 * bursts of calls to force_quiescent_state(), increasing the probability
905 * of occurrence of some important types of race conditions.
906 */
907static int
908rcu_torture_fqs(void *arg)
909{
910 unsigned long fqs_resume_time;
911 int fqs_burst_remaining;
912
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800913 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800914 do {
915 fqs_resume_time = jiffies + fqs_stutter * HZ;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700916 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
917 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800918 schedule_timeout_interruptible(1);
919 }
920 fqs_burst_remaining = fqs_duration;
Paul E. McKenney93898fb2011-08-17 12:39:34 -0700921 while (fqs_burst_remaining > 0 &&
922 !kthread_should_stop()) {
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800923 cur_ops->fqs();
924 udelay(fqs_holdoff);
925 fqs_burst_remaining -= fqs_holdoff;
926 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -0800927 stutter_wait("rcu_torture_fqs");
Paul E. McKenney36970bb2014-01-30 15:49:29 -0800928 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -0800929 torture_kthread_stopping("rcu_torture_fqs");
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800930 return 0;
931}
932
933/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800934 * RCU torture writer kthread. Repeatedly substitutes a new structure
935 * for that pointed to by rcu_torture_current, freeing the old structure
936 * after a series of grace periods (the "pipeline").
937 */
938static int
939rcu_torture_writer(void *arg)
940{
Paul E. McKenney9efafb82015-12-31 18:11:47 -0800941 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -0800942 int expediting = 0;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700943 unsigned long gp_snap;
944 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700945 bool gp_sync1 = gp_sync;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800946 int i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800947 struct rcu_torture *rp;
948 struct rcu_torture *old_rp;
Paul E. McKenney51b11302014-01-27 11:49:39 -0800949 static DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700950 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
951 RTWS_COND_GET, RTWS_SYNC };
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700952 int nsynctypes = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800953
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -0800954 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800955 if (!can_expedite)
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800956 pr_alert("%s" TORTURE_FLAG
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -0800957 " GP expediting controlled from boot/sysfs for %s.\n",
Paul E. McKenneyaa5a8982015-12-31 16:27:06 -0800958 torture_type, cur_ops->name);
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800959
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700960 /* Initialize synctype[] array. If none set, take default. */
Paul E. McKenneyc136f992015-02-19 12:15:19 -0800961 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700962 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800963 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700964 synctype[nsynctypes++] = RTWS_COND_GET;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800965 pr_info("%s: Testing conditional GPs.\n", __func__);
966 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800967 pr_alert("%s: gp_cond without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800968 }
969 if (gp_exp1 && cur_ops->exp_sync) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700970 synctype[nsynctypes++] = RTWS_EXP_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800971 pr_info("%s: Testing expedited GPs.\n", __func__);
972 } else if (gp_exp && !cur_ops->exp_sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800973 pr_alert("%s: gp_exp without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800974 }
975 if (gp_normal1 && cur_ops->deferred_free) {
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700976 synctype[nsynctypes++] = RTWS_DEF_FREE;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800977 pr_info("%s: Testing asynchronous GPs.\n", __func__);
978 } else if (gp_normal && !cur_ops->deferred_free) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800979 pr_alert("%s: gp_normal without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800980 }
981 if (gp_sync1 && cur_ops->sync) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700982 synctype[nsynctypes++] = RTWS_SYNC;
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800983 pr_info("%s: Testing normal GPs.\n", __func__);
984 } else if (gp_sync && !cur_ops->sync) {
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -0800985 pr_alert("%s: gp_sync without primitives.\n", __func__);
Paul E. McKenneydb0c1a82017-12-08 12:23:10 -0800986 }
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700987 if (WARN_ONCE(nsynctypes == 0,
988 "rcu_torture_writer: No update-side primitives.\n")) {
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -0700989 /*
990 * No updates primitives, so don't try updating.
991 * The resulting test won't be testing much, hence the
992 * above WARN_ONCE().
993 */
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -0700994 rcu_torture_writer_state = RTWS_STOPPING;
995 torture_kthread_stopping("rcu_torture_writer");
996 }
997
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800998 do {
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -0800999 rcu_torture_writer_state = RTWS_FIXED_DELAY;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001000 schedule_timeout_uninterruptible(1);
Paul E. McKenneya71fca52009-09-18 10:28:19 -07001001 rp = rcu_torture_alloc();
1002 if (rp == NULL)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001003 continue;
1004 rp->rtort_pipe_count = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001005 rcu_torture_writer_state = RTWS_DELAY;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001006 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001007 rcu_torture_writer_state = RTWS_REPLACE;
Paul E. McKenney0ddea0e2010-09-19 21:06:14 -07001008 old_rp = rcu_dereference_check(rcu_torture_current,
1009 current == writer_task);
Paul E. McKenney996417d2005-11-18 01:10:50 -08001010 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001011 rcu_assign_pointer(rcu_torture_current, rp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -07001012 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
Josh Triplettc8e5b162007-05-08 00:33:20 -07001013 if (old_rp) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001014 i = old_rp->rtort_pipe_count;
1015 if (i > RCU_TORTURE_PIPE_LEN)
1016 i = RCU_TORTURE_PIPE_LEN;
1017 atomic_inc(&rcu_torture_wcount[i]);
1018 old_rp->rtort_pipe_count++;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001019 switch (synctype[torture_random(&rand) % nsynctypes]) {
1020 case RTWS_DEF_FREE:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001021 rcu_torture_writer_state = RTWS_DEF_FREE;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001022 cur_ops->deferred_free(old_rp);
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001023 break;
1024 case RTWS_EXP_SYNC:
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001025 rcu_torture_writer_state = RTWS_EXP_SYNC;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001026 cur_ops->exp_sync();
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001027 rcu_torture_pipe_update(old_rp);
1028 break;
1029 case RTWS_COND_GET:
1030 rcu_torture_writer_state = RTWS_COND_GET;
1031 gp_snap = cur_ops->get_state();
1032 i = torture_random(&rand) % 16;
1033 if (i != 0)
1034 schedule_timeout_interruptible(i);
1035 udelay(torture_random(&rand) % 1000);
1036 rcu_torture_writer_state = RTWS_COND_SYNC;
1037 cur_ops->cond_sync(gp_snap);
1038 rcu_torture_pipe_update(old_rp);
1039 break;
Paul E. McKenneyf0bf8fa2014-03-21 16:17:56 -07001040 case RTWS_SYNC:
1041 rcu_torture_writer_state = RTWS_SYNC;
1042 cur_ops->sync();
1043 rcu_torture_pipe_update(old_rp);
1044 break;
Paul E. McKenneya48f3fa2014-03-18 15:57:41 -07001045 default:
1046 WARN_ON_ONCE(1);
1047 break;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001048 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001049 }
Paul E. McKenney1b272912018-07-18 14:32:31 -07001050 WRITE_ONCE(rcu_torture_current_version,
1051 rcu_torture_current_version + 1);
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001052 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1053 if (can_expedite &&
1054 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1055 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1056 if (expediting >= 0)
1057 rcu_expedite_gp();
1058 else
1059 rcu_unexpedite_gp();
1060 if (++expediting > 3)
1061 expediting = -expediting;
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001062 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1063 can_expedite = !rcu_gp_is_expedited() &&
1064 !rcu_gp_is_normal();
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001065 }
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001066 rcu_torture_writer_state = RTWS_STUTTER;
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001067 if (stutter_wait("rcu_torture_writer") &&
Paul E. McKenney5eabea52019-04-12 09:02:46 -07001068 !READ_ONCE(rcu_fwd_cb_nodelay) &&
Paul E. McKenney3432d762019-04-15 14:50:05 -07001069 !cur_ops->slow_gps &&
1070 !torture_must_stop())
Paul E. McKenney474e59b2018-08-07 14:34:44 -07001071 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001072 if (list_empty(&rcu_tortures[i].rtort_free) &&
1073 rcu_access_pointer(rcu_torture_current) !=
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001074 &rcu_tortures[i]) {
1075 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001076 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
Paul E. McKenney34aa34b2019-05-16 16:15:16 -07001077 }
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001078 } while (!torture_must_stop());
Paul E. McKenney4bb3c5f2015-02-18 16:31:29 -08001079 /* Reset expediting back to unexpedited. */
1080 if (expediting > 0)
1081 expediting = -expediting;
1082 while (can_expedite && expediting++ < 0)
1083 rcu_unexpedite_gp();
1084 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
Paul E. McKenneyf7c0e6a2017-12-08 11:37:24 -08001085 if (!can_expedite)
1086 pr_alert("%s" TORTURE_FLAG
1087 " Dynamic grace-period expediting was disabled.\n",
1088 torture_type);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001089 rcu_torture_writer_state = RTWS_STOPPING;
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001090 torture_kthread_stopping("rcu_torture_writer");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001091 return 0;
1092}
1093
1094/*
Josh Triplettb772e1d2006-10-04 02:17:13 -07001095 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1096 * delay between calls.
1097 */
1098static int
1099rcu_torture_fakewriter(void *arg)
1100{
Paul E. McKenney51b11302014-01-27 11:49:39 -08001101 DEFINE_TORTURE_RANDOM(rand);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001102
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001103 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001104 set_user_nice(current, MAX_NICE);
Josh Triplettb772e1d2006-10-04 02:17:13 -07001105
1106 do {
Paul E. McKenney51b11302014-01-27 11:49:39 -08001107 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1108 udelay(torture_random(&rand) & 0x3ff);
Paul E. McKenney72472a02012-05-29 17:50:51 -07001109 if (cur_ops->cb_barrier != NULL &&
Paul E. McKenney51b11302014-01-27 11:49:39 -08001110 torture_random(&rand) % (nfakewriters * 8) == 0) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001111 cur_ops->cb_barrier();
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001112 } else if (gp_normal == gp_exp) {
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001113 if (cur_ops->sync && torture_random(&rand) & 0x80)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001114 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001115 else if (cur_ops->exp_sync)
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001116 cur_ops->exp_sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001117 } else if (gp_normal && cur_ops->sync) {
Paul E. McKenney72472a02012-05-29 17:50:51 -07001118 cur_ops->sync();
Paul E. McKenneyeb033992017-12-08 10:48:41 -08001119 } else if (cur_ops->exp_sync) {
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07001120 cur_ops->exp_sync();
1121 }
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001122 stutter_wait("rcu_torture_fakewriter");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001123 } while (!torture_must_stop());
Josh Triplettb772e1d2006-10-04 02:17:13 -07001124
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001125 torture_kthread_stopping("rcu_torture_fakewriter");
Josh Triplettb772e1d2006-10-04 02:17:13 -07001126 return 0;
1127}
1128
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001129static void rcu_torture_timer_cb(struct rcu_head *rhp)
1130{
1131 kfree(rhp);
1132}
1133
Josh Triplettb772e1d2006-10-04 02:17:13 -07001134/*
Paul E. McKenney2397d072018-05-25 07:29:25 -07001135 * Do one extension of an RCU read-side critical section using the
1136 * current reader state in readstate (set to zero for initial entry
1137 * to extended critical section), set the new state as specified by
1138 * newstate (set to zero for final exit from extended critical section),
1139 * and random-number-generator state in trsp. If this is neither the
1140 * beginning or end of the critical section and if there was actually a
1141 * change, do a ->read_delay().
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001142 */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001143static void rcutorture_one_extend(int *readstate, int newstate,
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001144 struct torture_random_state *trsp,
1145 struct rt_read_seg *rtrsp)
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001146{
Paul E. McKenney2397d072018-05-25 07:29:25 -07001147 int idxnew = -1;
1148 int idxold = *readstate;
1149 int statesnew = ~*readstate & newstate;
1150 int statesold = *readstate & ~newstate;
1151
1152 WARN_ON_ONCE(idxold < 0);
1153 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001154 rtrsp->rt_readstate = newstate;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001155
1156 /* First, put new protection in place to avoid critical-section gap. */
1157 if (statesnew & RCUTORTURE_RDR_BH)
1158 local_bh_disable();
1159 if (statesnew & RCUTORTURE_RDR_IRQ)
1160 local_irq_disable();
1161 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1162 preempt_disable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001163 if (statesnew & RCUTORTURE_RDR_RBH)
1164 rcu_read_lock_bh();
1165 if (statesnew & RCUTORTURE_RDR_SCHED)
1166 rcu_read_lock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001167 if (statesnew & RCUTORTURE_RDR_RCU)
1168 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1169
1170 /* Next, remove old protection, irq first due to bh conflict. */
1171 if (statesold & RCUTORTURE_RDR_IRQ)
1172 local_irq_enable();
1173 if (statesold & RCUTORTURE_RDR_BH)
1174 local_bh_enable();
1175 if (statesold & RCUTORTURE_RDR_PREEMPT)
1176 preempt_enable();
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001177 if (statesold & RCUTORTURE_RDR_RBH)
1178 rcu_read_unlock_bh();
1179 if (statesold & RCUTORTURE_RDR_SCHED)
1180 rcu_read_unlock_sched();
Paul E. McKenney2397d072018-05-25 07:29:25 -07001181 if (statesold & RCUTORTURE_RDR_RCU)
1182 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1183
1184 /* Delay if neither beginning nor end and there was a change. */
1185 if ((statesnew || statesold) && *readstate && newstate)
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001186 cur_ops->read_delay(trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001187
1188 /* Update the reader state. */
1189 if (idxnew == -1)
1190 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1191 WARN_ON_ONCE(idxnew < 0);
1192 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1193 *readstate = idxnew | newstate;
1194 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1195 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1196}
1197
1198/* Return the biggest extendables mask given current RCU and boot parameters. */
1199static int rcutorture_extend_mask_max(void)
1200{
1201 int mask;
1202
1203 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1204 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1205 mask = mask | RCUTORTURE_RDR_RCU;
1206 return mask;
1207}
1208
1209/* Return a random protection state mask, but with at least one bit set. */
1210static int
1211rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1212{
1213 int mask = rcutorture_extend_mask_max();
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001214 unsigned long randmask1 = torture_random(trsp) >> 8;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001215 unsigned long randmask2 = randmask1 >> 3;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001216
1217 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
Paul E. McKenneya3b0e1e52019-02-28 15:06:13 -08001218 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001219 if (!(randmask1 & 0x7))
Paul E. McKenneybf1bef52018-06-10 08:50:09 -07001220 mask = mask & randmask2;
1221 else
1222 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001223 /* Can't enable bh w/irq disabled. */
Paul E. McKenney2397d072018-05-25 07:29:25 -07001224 if ((mask & RCUTORTURE_RDR_IRQ) &&
Paul E. McKenney2ceebc02018-07-06 15:16:12 -07001225 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1226 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1227 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001228 return mask ?: RCUTORTURE_RDR_RCU;
1229}
1230
1231/*
1232 * Do a randomly selected number of extensions of an existing RCU read-side
1233 * critical section.
1234 */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001235static struct rt_read_seg *
1236rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1237 struct rt_read_seg *rtrsp)
Paul E. McKenney2397d072018-05-25 07:29:25 -07001238{
1239 int i;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001240 int j;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001241 int mask = rcutorture_extend_mask_max();
1242
1243 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1244 if (!((mask - 1) & mask))
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001245 return rtrsp; /* Current RCU reader not extendable. */
1246 /* Bias towards larger numbers of loops. */
1247 i = (torture_random(trsp) >> 3);
1248 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1249 for (j = 0; j < i; j++) {
Paul E. McKenney2397d072018-05-25 07:29:25 -07001250 mask = rcutorture_extend_mask(*readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001251 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001252 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001253 return &rtrsp[j];
Paul E. McKenney2397d072018-05-25 07:29:25 -07001254}
1255
1256/*
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001257 * Do one read-side critical section, returning false if there was
1258 * no data to read. Can be invoked both from process context and
1259 * from a timer handler.
1260 */
1261static bool rcu_torture_one_read(struct torture_random_state *trsp)
1262{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001263 int i;
Paul E. McKenney917963d2014-11-21 17:10:16 -08001264 unsigned long started;
Paul E. McKenney6b80da42014-11-21 14:19:26 -08001265 unsigned long completed;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001266 int newstate;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001267 struct rcu_torture *p;
1268 int pipe_count;
Paul E. McKenney2397d072018-05-25 07:29:25 -07001269 int readstate = 0;
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001270 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1271 struct rt_read_seg *rtrsp = &rtseg[0];
1272 struct rt_read_seg *rtrsp1;
Paul E. McKenney52494532012-11-14 16:26:40 -08001273 unsigned long long ts;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001274
Paul E. McKenney2397d072018-05-25 07:29:25 -07001275 newstate = rcutorture_extend_mask(readstate, trsp);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001276 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001277 started = cur_ops->get_gp_seq();
Steven Rostedte4aa0da2013-02-04 13:36:13 -05001278 ts = rcu_trace_clock_local();
Paul E. McKenney632ee202010-02-22 17:04:45 -08001279 p = rcu_dereference_check(rcu_torture_current,
Paul E. McKenney632ee202010-02-22 17:04:45 -08001280 rcu_read_lock_bh_held() ||
1281 rcu_read_lock_sched_held() ||
Paul E. McKenney5be5d1a2015-06-30 08:57:57 -07001282 srcu_read_lock_held(srcu_ctlp) ||
1283 torturing_tasks());
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001284 if (p == NULL) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001285 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001286 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001287 return false;
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001288 }
1289 if (p->rtort_mbtest == 0)
1290 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001291 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001292 preempt_disable();
1293 pipe_count = p->rtort_pipe_count;
1294 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1295 /* Should not happen, but... */
1296 pipe_count = RCU_TORTURE_PIPE_LEN;
1297 }
Paul E. McKenney17ef2fe2018-04-27 11:39:34 -07001298 completed = cur_ops->get_gp_seq();
Paul E. McKenney52494532012-11-14 16:26:40 -08001299 if (pipe_count > 1) {
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001300 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1301 ts, started, completed);
Paul E. McKenney274529b2016-03-21 19:46:04 -07001302 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenney52494532012-11-14 16:26:40 -08001303 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001304 __this_cpu_inc(rcu_torture_count[pipe_count]);
Paul E. McKenneyd72193122018-05-15 15:24:41 -07001305 completed = rcutorture_seq_diff(completed, started);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001306 if (completed > RCU_TORTURE_PIPE_LEN) {
1307 /* Should not happen, but... */
1308 completed = RCU_TORTURE_PIPE_LEN;
1309 }
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001310 __this_cpu_inc(rcu_torture_batch[completed]);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001311 preempt_enable();
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001312 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
Paul E. McKenney2397d072018-05-25 07:29:25 -07001313 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07001314
1315 /* If error or close call, record the sequence of reader protections. */
1316 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1317 i = 0;
1318 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1319 err_segs[i++] = *rtrsp1;
1320 rt_read_nsegs = i;
1321 }
1322
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001323 return true;
1324}
1325
Paul E. McKenney3025520e2018-05-22 11:38:47 -07001326static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1327
Paul E. McKenney6b06aa72018-05-22 10:56:05 -07001328/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001329 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1330 * incrementing the corresponding element of the pipeline array. The
1331 * counter in the element should never be greater than 1, otherwise, the
1332 * RCU implementation is broken.
1333 */
1334static void rcu_torture_timer(struct timer_list *unused)
1335{
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001336 atomic_long_inc(&n_rcu_torture_timers);
Paul E. McKenney241b4252018-05-22 11:59:31 -07001337 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
Paul E. McKenneyf34c85852017-07-20 15:27:32 -07001338
1339 /* Test call_rcu() invocation from interrupt handler. */
1340 if (cur_ops->call) {
1341 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1342
1343 if (rhp)
1344 cur_ops->call(rhp, rcu_torture_timer_cb);
1345 }
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001346}
1347
1348/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001349 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1350 * incrementing the corresponding element of the pipeline array. The
1351 * counter in the element should never be greater than 1, otherwise, the
1352 * RCU implementation is broken.
1353 */
1354static int
1355rcu_torture_reader(void *arg)
1356{
Paul E. McKenney444da512018-07-04 14:14:42 -07001357 unsigned long lastsleep = jiffies;
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001358 long myid = (long)arg;
1359 int mynumonline = myid;
Paul E. McKenney51b11302014-01-27 11:49:39 -08001360 DEFINE_TORTURE_RANDOM(rand);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001361 struct timer_list t;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001362
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001363 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07001364 set_user_nice(current, MAX_NICE);
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001365 if (irqreader && cur_ops->irq_capable)
Kees Cookfd30b712017-10-22 17:58:54 -07001366 timer_setup_on_stack(&t, rcu_torture_timer, 0);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001367 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001368 do {
Paul E. McKenney0acc5122009-06-25 09:08:17 -07001369 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001370 if (!timer_pending(&t))
Paul E. McKenney6155fec2010-02-22 17:05:04 -08001371 mod_timer(&t, jiffies + 1);
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001372 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001373 if (!rcu_torture_one_read(&rand) && !torture_must_stop())
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001374 schedule_timeout_interruptible(HZ);
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001375 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
Paul E. McKenney444da512018-07-04 14:14:42 -07001376 schedule_timeout_interruptible(1);
1377 lastsleep = jiffies + 10;
1378 }
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07001379 while (num_online_cpus() < mynumonline && !torture_must_stop())
1380 schedule_timeout_interruptible(HZ / 5);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08001381 stutter_wait("rcu_torture_reader");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001382 } while (!torture_must_stop());
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001383 if (irqreader && cur_ops->irq_capable) {
Paul E. McKenney0729fbf2008-06-25 12:24:52 -07001384 del_timer_sync(&t);
Thomas Gleixner424c1b62014-03-23 08:58:27 -07001385 destroy_timer_on_stack(&t);
1386 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001387 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001388 torture_kthread_stopping("rcu_torture_reader");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001389 return 0;
1390}
1391
1392/*
Joe Percheseea203f2014-07-14 09:16:15 -04001393 * Print torture statistics. Caller must ensure that there is only
1394 * one call to this function at a given time!!! This is normally
1395 * accomplished by relying on the module system to only have one copy
1396 * of the module loaded, and then by giving the rcu_torture_stats
1397 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1398 * thread is not running).
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001399 */
Chen Gangd1008952013-11-07 10:30:25 +08001400static void
Joe Percheseea203f2014-07-14 09:16:15 -04001401rcu_torture_stats_print(void)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001402{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001403 int cpu;
1404 int i;
1405 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1406 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001407 static unsigned long rtcv_snap = ULONG_MAX;
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001408 static bool splatted;
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001409 struct task_struct *wtp;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001410
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001411 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001412 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1413 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1414 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1415 }
1416 }
1417 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1418 if (pipesummary[i] != 0)
1419 break;
1420 }
Joe Percheseea203f2014-07-14 09:16:15 -04001421
1422 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney354ea052019-05-25 12:36:53 -07001423 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
Joe Percheseea203f2014-07-14 09:16:15 -04001424 rcu_torture_current,
Paul E. McKenney354ea052019-05-25 12:36:53 -07001425 rcu_torture_current ? "ver" : "VER",
Joe Percheseea203f2014-07-14 09:16:15 -04001426 rcu_torture_current_version,
1427 list_empty(&rcu_torture_freelist),
1428 atomic_read(&n_rcu_torture_alloc),
1429 atomic_read(&n_rcu_torture_alloc_fail),
1430 atomic_read(&n_rcu_torture_free));
SeongJae Park472213a2016-08-13 15:54:35 +09001431 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
Joe Percheseea203f2014-07-14 09:16:15 -04001432 atomic_read(&n_rcu_torture_mberror),
SeongJae Park472213a2016-08-13 15:54:35 +09001433 n_rcu_torture_barrier_error,
Joe Percheseea203f2014-07-14 09:16:15 -04001434 n_rcu_torture_boost_ktrerror,
1435 n_rcu_torture_boost_rterror);
1436 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1437 n_rcu_torture_boost_failure,
1438 n_rcu_torture_boosts,
Paul E. McKenney8da9a592018-05-22 11:17:51 -07001439 atomic_long_read(&n_rcu_torture_timers));
Joe Percheseea203f2014-07-14 09:16:15 -04001440 torture_onoff_stats();
Paul E. McKenneyfc6f9c52018-08-27 14:43:05 -07001441 pr_cont("barrier: %ld/%ld:%ld\n",
Joe Percheseea203f2014-07-14 09:16:15 -04001442 n_barrier_successes,
1443 n_barrier_attempts,
1444 n_rcu_torture_barrier_error);
1445
1446 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001447 if (atomic_read(&n_rcu_torture_mberror) ||
1448 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1449 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08001450 i > 1) {
Joe Percheseea203f2014-07-14 09:16:15 -04001451 pr_cont("%s", "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -08001452 atomic_inc(&n_rcu_torture_error);
Paul E. McKenney8b5ddf82019-08-14 12:02:40 -07001453 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1454 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1455 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1456 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1457 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1458 WARN_ON_ONCE(i > 1); // Too-short grace period
Paul E. McKenney996417d2005-11-18 01:10:50 -08001459 }
Joe Percheseea203f2014-07-14 09:16:15 -04001460 pr_cont("Reader Pipe: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001461 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001462 pr_cont(" %ld", pipesummary[i]);
1463 pr_cont("\n");
1464
1465 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1466 pr_cont("Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07001467 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Joe Percheseea203f2014-07-14 09:16:15 -04001468 pr_cont(" %ld", batchsummary[i]);
1469 pr_cont("\n");
1470
1471 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1472 pr_cont("Free-Block Circulation: ");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001473 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
Joe Percheseea203f2014-07-14 09:16:15 -04001474 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001475 }
Joe Percheseea203f2014-07-14 09:16:15 -04001476 pr_cont("\n");
1477
Josh Triplettc8e5b162007-05-08 00:33:20 -07001478 if (cur_ops->stats)
Joe Percheseea203f2014-07-14 09:16:15 -04001479 cur_ops->stats();
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001480 if (rtcv_snap == rcu_torture_current_version &&
1481 rcu_torture_current != NULL) {
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001482 int __maybe_unused flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001483 unsigned long __maybe_unused gp_seq = 0;
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001484
1485 rcutorture_get_gp_data(cur_ops->ttype,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001486 &flags, &gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001487 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001488 &flags, &gp_seq);
Paul E. McKenney4ffa6692016-06-30 11:56:38 -07001489 wtp = READ_ONCE(writer_task);
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001490 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
Paul E. McKenney18aff332015-11-17 13:35:28 -08001491 rcu_torture_writer_state_getname(),
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001492 rcu_torture_writer_state, gp_seq, flags,
Paul E. McKenney808de392017-06-19 10:03:22 -07001493 wtp == NULL ? ~0UL : wtp->state,
1494 wtp == NULL ? -1 : (int)task_cpu(wtp));
Paul E. McKenney0032f4e2017-08-30 10:40:17 -07001495 if (!splatted && wtp) {
1496 sched_show_task(wtp);
1497 splatted = true;
1498 }
Paul E. McKenneyafea2272014-03-12 07:10:41 -07001499 show_rcu_gp_kthreads();
Paul E. McKenney274529b2016-03-21 19:46:04 -07001500 rcu_ftrace_dump(DUMP_ALL);
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -08001501 }
1502 rtcv_snap = rcu_torture_current_version;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001503}
1504
1505/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001506 * Periodically prints torture statistics, if periodic statistics printing
1507 * was specified via the stat_interval module parameter.
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001508 */
1509static int
1510rcu_torture_stats(void *arg)
1511{
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001512 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001513 do {
1514 schedule_timeout_interruptible(stat_interval * HZ);
1515 rcu_torture_stats_print();
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001516 torture_shutdown_absorb("rcu_torture_stats");
Paul E. McKenney36970bb2014-01-30 15:49:29 -08001517 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08001518 torture_kthread_stopping("rcu_torture_stats");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001519 return 0;
1520}
1521
Paul E. McKenneyeac45e52018-05-17 11:33:17 -07001522static void
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -04001523rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
Paul E. McKenney95c38322006-03-24 03:15:58 -08001524{
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001525 pr_alert("%s" TORTURE_FLAG
1526 "--- %s: nreaders=%d nfakewriters=%d "
1527 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1528 "shuffle_interval=%d stutter=%d irqreader=%d "
1529 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1530 "test_boost=%d/%d test_boost_interval=%d "
1531 "test_boost_duration=%d shutdown_secs=%d "
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001532 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001533 "n_barrier_cbs=%d "
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001534 "onoff_interval=%d onoff_holdoff=%d\n",
1535 torture_type, tag, nrealreaders, nfakewriters,
1536 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1537 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1538 test_boost, cur_ops->can_boost,
1539 test_boost_interval, test_boost_duration, shutdown_secs,
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001540 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
Paul E. McKenney67afeed2012-10-20 12:56:06 -07001541 n_barrier_cbs,
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001542 onoff_interval, onoff_holdoff);
Paul E. McKenney95c38322006-03-24 03:15:58 -08001543}
1544
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001545static int rcutorture_booster_cleanup(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001546{
1547 struct task_struct *t;
1548
1549 if (boost_tasks[cpu] == NULL)
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001550 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001551 mutex_lock(&boost_mutex);
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001552 t = boost_tasks[cpu];
1553 boost_tasks[cpu] = NULL;
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001554 rcu_torture_enable_rt_throttle();
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001555 mutex_unlock(&boost_mutex);
1556
1557 /* This must be outside of the mutex, otherwise deadlock! */
Paul E. McKenney9c029b82014-02-04 11:47:08 -08001558 torture_stop_kthread(rcu_torture_boost, t);
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001559 return 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001560}
1561
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02001562static int rcutorture_booster_init(unsigned int cpu)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001563{
1564 int retval;
1565
1566 if (boost_tasks[cpu] != NULL)
1567 return 0; /* Already created, nothing more to do. */
1568
1569 /* Don't allow time recalculation while creating a new task. */
1570 mutex_lock(&boost_mutex);
Joel Fernandes (Google)450efca2018-06-10 16:45:43 -07001571 rcu_torture_disable_rt_throttle();
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001572 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
Eric Dumazet1f288092011-06-16 15:53:18 -07001573 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1574 cpu_to_node(cpu),
1575 "rcu_torture_boost");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001576 if (IS_ERR(boost_tasks[cpu])) {
1577 retval = PTR_ERR(boost_tasks[cpu]);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001578 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
Paul E. McKenney8e8be452010-09-02 16:16:14 -07001579 n_rcu_torture_boost_ktrerror++;
1580 boost_tasks[cpu] = NULL;
1581 mutex_unlock(&boost_mutex);
1582 return retval;
1583 }
1584 kthread_bind(boost_tasks[cpu], cpu);
1585 wake_up_process(boost_tasks[cpu]);
1586 mutex_unlock(&boost_mutex);
1587 return 0;
1588}
1589
Paul E. McKenneyd5f546d2011-11-04 11:44:12 -07001590/*
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001591 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1592 * induces a CPU stall for the time specified by stall_cpu.
1593 */
Paul Gortmaker49fb4c62013-06-19 14:52:21 -04001594static int rcu_torture_stall(void *args)
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001595{
1596 unsigned long stop_at;
1597
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001598 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001599 if (stall_cpu_holdoff > 0) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001600 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001601 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08001602 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001603 }
1604 if (!kthread_should_stop()) {
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001605 stop_at = ktime_get_seconds() + stall_cpu;
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001606 /* RCU CPU stall is expected behavior in following code. */
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001607 rcu_read_lock();
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001608 if (stall_cpu_irqsoff)
1609 local_irq_disable();
1610 else
1611 preempt_disable();
1612 pr_alert("rcu_torture_stall start on CPU %d.\n",
1613 smp_processor_id());
Arnd Bergmann622be33f2018-06-18 16:47:34 +02001614 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1615 stop_at))
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001616 continue; /* Induce RCU CPU stall warning. */
Paul E. McKenney2b1516e2017-08-18 16:11:37 -07001617 if (stall_cpu_irqsoff)
1618 local_irq_enable();
1619 else
1620 preempt_enable();
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001621 rcu_read_unlock();
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07001622 pr_alert("rcu_torture_stall end.\n");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001623 }
Paul E. McKenneyf67a3352014-01-29 07:40:27 -08001624 torture_shutdown_absorb("rcu_torture_stall");
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001625 while (!kthread_should_stop())
1626 schedule_timeout_interruptible(10 * HZ);
1627 return 0;
1628}
1629
1630/* Spawn CPU-stall kthread, if stall_cpu specified. */
1631static int __init rcu_torture_stall_init(void)
1632{
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001633 if (stall_cpu <= 0)
1634 return 0;
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08001635 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
Paul E. McKenneyc13f3752012-01-20 15:36:33 -08001636}
1637
Paul E. McKenney9fdcb9a2018-07-19 13:36:00 -07001638/* State structure for forward-progress self-propagating RCU callback. */
1639struct fwd_cb_state {
1640 struct rcu_head rh;
1641 int stop;
1642};
1643
1644/*
1645 * Forward-progress self-propagating RCU callback function. Because
1646 * callbacks run from softirq, this function is an implicit RCU read-side
1647 * critical section.
1648 */
1649static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1650{
1651 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1652
1653 if (READ_ONCE(fcsp->stop)) {
1654 WRITE_ONCE(fcsp->stop, 2);
1655 return;
1656 }
1657 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1658}
1659
Paul E. McKenney48718482018-08-15 15:32:51 -07001660/* State for continuous-flood RCU callbacks. */
1661struct rcu_fwd_cb {
1662 struct rcu_head rh;
1663 struct rcu_fwd_cb *rfc_next;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001664 struct rcu_fwd *rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001665 int rfc_gps;
1666};
Paul E. McKenneya289e602019-11-05 08:31:56 -08001667
Paul E. McKenney48718482018-08-15 15:32:51 -07001668#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1669#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1670#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
Paul E. McKenney2e57bf92018-10-05 16:43:09 -07001671#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
Paul E. McKenneya289e602019-11-05 08:31:56 -08001672#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1673
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001674struct rcu_launder_hist {
1675 long n_launders;
1676 unsigned long launder_gp_seq;
1677};
Paul E. McKenney48718482018-08-15 15:32:51 -07001678
Paul E. McKenneya289e602019-11-05 08:31:56 -08001679struct rcu_fwd {
1680 spinlock_t rcu_fwd_lock;
1681 struct rcu_fwd_cb *rcu_fwd_cb_head;
1682 struct rcu_fwd_cb **rcu_fwd_cb_tail;
1683 long n_launders_cb;
1684 unsigned long rcu_fwd_startat;
1685 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1686 unsigned long rcu_launder_gp_seq_start;
1687};
1688
Paul E. McKenney5155be92019-11-06 08:35:08 -08001689struct rcu_fwd *rcu_fwds;
Paul E. McKenneya289e602019-11-05 08:31:56 -08001690bool rcu_fwd_emergency_stop;
Paul E. McKenney48718482018-08-15 15:32:51 -07001691
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001692static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001693{
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001694 unsigned long gps;
1695 unsigned long gps_old;
Paul E. McKenney1a682752018-10-03 12:33:41 -07001696 int i;
1697 int j;
1698
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001699 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1700 if (rfp->n_launders_hist[i].n_launders > 0)
Paul E. McKenney1a682752018-10-03 12:33:41 -07001701 break;
Paul E. McKenney73d665b2018-10-04 10:54:22 -07001702 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001703 __func__, jiffies - rfp->rcu_fwd_startat);
1704 gps_old = rfp->rcu_launder_gp_seq_start;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001705 for (j = 0; j <= i; j++) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001706 gps = rfp->n_launders_hist[j].launder_gp_seq;
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001707 pr_cont(" %ds/%d: %ld:%ld",
Paul E. McKenneya289e602019-11-05 08:31:56 -08001708 j + 1, FWD_CBS_HIST_DIV,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001709 rfp->n_launders_hist[j].n_launders,
Paul E. McKenneycd618d12019-01-08 13:41:26 -08001710 rcutorture_seq_diff(gps, gps_old));
1711 gps_old = gps;
1712 }
Paul E. McKenney1a682752018-10-03 12:33:41 -07001713 pr_cont("\n");
1714}
1715
Paul E. McKenney48718482018-08-15 15:32:51 -07001716/* Callback function for continuous-flood RCU callbacks. */
1717static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1718{
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001719 unsigned long flags;
Paul E. McKenney48718482018-08-15 15:32:51 -07001720 int i;
1721 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1722 struct rcu_fwd_cb **rfcpp;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001723 struct rcu_fwd *rfp = rfcp->rfc_rfp;
Paul E. McKenney48718482018-08-15 15:32:51 -07001724
1725 rfcp->rfc_next = NULL;
1726 rfcp->rfc_gps++;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001727 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1728 rfcpp = rfp->rcu_fwd_cb_tail;
1729 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
Paul E. McKenney48718482018-08-15 15:32:51 -07001730 WRITE_ONCE(*rfcpp, rfcp);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001731 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1732 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1733 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1734 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1735 rfp->n_launders_hist[i].n_launders++;
1736 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1737 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001738}
1739
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001740// Give the scheduler a chance, even on nohz_full CPUs.
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001741static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001742{
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +02001743 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001744 // Real call_rcu() floods hit userspace, so emulate that.
1745 if (need_resched() || (iter & 0xfff))
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001746 schedule();
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001747 return;
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001748 }
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001749 // No userspace emulation: CB invocation throttles call_rcu()
1750 cond_resched();
Paul E. McKenneyab21f602019-04-14 18:30:22 -07001751}
1752
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001753/*
1754 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1755 * test is over or because we hit an OOM event.
1756 */
Paul E. McKenney67641002019-11-06 08:20:20 -08001757static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001758{
1759 unsigned long flags;
1760 unsigned long freed = 0;
1761 struct rcu_fwd_cb *rfcp;
1762
1763 for (;;) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001764 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1765 rfcp = rfp->rcu_fwd_cb_head;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001766 if (!rfcp) {
Paul E. McKenney67641002019-11-06 08:20:20 -08001767 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001768 break;
Paul E. McKenney140e53f2019-04-09 10:08:18 -07001769 }
Paul E. McKenney67641002019-11-06 08:20:20 -08001770 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1771 if (!rfp->rcu_fwd_cb_head)
1772 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1773 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001774 kfree(rfcp);
1775 freed++;
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001776 rcu_torture_fwd_prog_cond_resched(freed);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001777 if (tick_nohz_full_enabled()) {
1778 local_irq_save(flags);
1779 rcu_momentary_dyntick_idle();
1780 local_irq_restore(flags);
1781 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001782 }
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001783 return freed;
Paul E. McKenney48718482018-08-15 15:32:51 -07001784}
1785
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001786/* Carry out need_resched()/cond_resched() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001787static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1788 int *tested, int *tested_tries)
Paul E. McKenney1b272912018-07-18 14:32:31 -07001789{
Paul E. McKenney119248b2018-07-18 15:39:37 -07001790 unsigned long cver;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001791 unsigned long dur;
Paul E. McKenney7c590fc2018-08-07 16:42:42 -07001792 struct fwd_cb_state fcs;
Paul E. McKenney119248b2018-07-18 15:39:37 -07001793 unsigned long gps;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001794 int idx;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001795 int sd;
1796 int sd4;
1797 bool selfpropcb = false;
1798 unsigned long stopat;
1799 static DEFINE_TORTURE_RANDOM(trs);
1800
1801 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1802 init_rcu_head_on_stack(&fcs.rh);
1803 selfpropcb = true;
1804 }
1805
1806 /* Tight loop containing cond_resched(). */
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001807 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1808 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001809 if (selfpropcb) {
1810 WRITE_ONCE(fcs.stop, 0);
1811 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1812 }
1813 cver = READ_ONCE(rcu_torture_current_version);
1814 gps = cur_ops->get_gp_seq();
1815 sd = cur_ops->stall_dur() + 1;
1816 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1817 dur = sd4 + torture_random(&trs) % (sd - sd4);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001818 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1819 stopat = rfp->rcu_fwd_startat + dur;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001820 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001821 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001822 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001823 idx = cur_ops->readlock();
1824 udelay(10);
1825 cur_ops->readunlock(idx);
1826 if (!fwd_progress_need_resched || need_resched())
Paul E. McKenneyfbbd5e32019-08-15 11:43:53 -07001827 cond_resched();
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001828 }
1829 (*tested_tries)++;
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001830 if (!time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001831 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001832 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001833 (*tested)++;
1834 cver = READ_ONCE(rcu_torture_current_version) - cver;
1835 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1836 WARN_ON(!cver && gps < 2);
1837 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1838 }
1839 if (selfpropcb) {
1840 WRITE_ONCE(fcs.stop, 1);
1841 cur_ops->sync(); /* Wait for running CB to complete. */
1842 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1843 }
1844
1845 if (selfpropcb) {
1846 WARN_ON(READ_ONCE(fcs.stop) != 2);
1847 destroy_rcu_head_on_stack(&fcs.rh);
1848 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001849 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1850 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001851}
1852
1853/* Carry out call_rcu() forward-progress testing. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001854static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001855{
1856 unsigned long cver;
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001857 unsigned long flags;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001858 unsigned long gps;
1859 int i;
Paul E. McKenney48718482018-08-15 15:32:51 -07001860 long n_launders;
1861 long n_launders_cb_snap;
1862 long n_launders_sa;
1863 long n_max_cbs;
1864 long n_max_gps;
1865 struct rcu_fwd_cb *rfcp;
1866 struct rcu_fwd_cb *rfcpn;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001867 unsigned long stopat;
Paul E. McKenney48718482018-08-15 15:32:51 -07001868 unsigned long stoppedat;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001869
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001870 if (READ_ONCE(rcu_fwd_emergency_stop))
1871 return; /* Get out of the way quickly, no GP wait! */
Paul E. McKenneyc682db52019-04-19 07:38:27 -07001872 if (!cur_ops->call)
1873 return; /* Can't do call_rcu() fwd prog without ->call. */
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001874
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001875 /* Loop continuously posting RCU callbacks. */
1876 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1877 cur_ops->sync(); /* Later readers see above write. */
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001878 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1879 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001880 n_launders = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001881 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001882 n_launders_sa = 0;
1883 n_max_cbs = 0;
1884 n_max_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001885 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
1886 rfp->n_launders_hist[i].n_launders = 0;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001887 cver = READ_ONCE(rcu_torture_current_version);
1888 gps = cur_ops->get_gp_seq();
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001889 rfp->rcu_launder_gp_seq_start = gps;
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001890 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001891 while (time_before(jiffies, stopat) &&
Paul E. McKenney60013d52019-07-10 08:30:00 -07001892 !shutdown_time_arrived() &&
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001893 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001894 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001895 rfcpn = NULL;
1896 if (rfcp)
1897 rfcpn = READ_ONCE(rfcp->rfc_next);
1898 if (rfcpn) {
1899 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1900 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
1901 break;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001902 rfp->rcu_fwd_cb_head = rfcpn;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001903 n_launders++;
1904 n_launders_sa++;
1905 } else {
1906 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
1907 if (WARN_ON_ONCE(!rfcp)) {
1908 schedule_timeout_interruptible(1);
1909 continue;
1910 }
1911 n_max_cbs++;
1912 n_launders_sa = 0;
1913 rfcp->rfc_gps = 0;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001914 rfcp->rfc_rfp = rfp;
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001915 }
1916 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
Paul E. McKenneybd1bfc52019-06-22 14:35:59 -07001917 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -07001918 if (tick_nohz_full_enabled()) {
1919 local_irq_save(flags);
1920 rcu_momentary_dyntick_idle();
1921 local_irq_restore(flags);
1922 }
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001923 }
1924 stoppedat = jiffies;
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001925 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001926 cver = READ_ONCE(rcu_torture_current_version) - cver;
1927 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1928 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
Paul E. McKenney67641002019-11-06 08:20:20 -08001929 (void)rcu_torture_fwd_prog_cbfree(rfp);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001930
Paul E. McKenney60013d52019-07-10 08:30:00 -07001931 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
1932 !shutdown_time_arrived()) {
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001933 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
1934 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1935 __func__,
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001936 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001937 n_launders + n_max_cbs - n_launders_cb_snap,
1938 n_launders, n_launders_sa,
1939 n_max_gps, n_max_cbs, cver, gps);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001940 rcu_torture_fwd_cb_hist(rfp);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001941 }
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001942 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
Paul E. McKenneyd38e6dc2019-07-28 12:00:48 -07001943 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
Paul E. McKenneye8516c62019-04-09 11:06:32 -07001944 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001945}
1946
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001947
1948/*
1949 * OOM notifier, but this only prints diagnostic information for the
1950 * current forward-progress test.
1951 */
1952static int rcutorture_oom_notify(struct notifier_block *self,
1953 unsigned long notused, void *nfreed)
1954{
Paul E. McKenney5155be92019-11-06 08:35:08 -08001955 struct rcu_fwd *rfp = rcu_fwds;
Paul E. McKenney67641002019-11-06 08:20:20 -08001956
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001957 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1958 __func__);
Paul E. McKenney67641002019-11-06 08:20:20 -08001959 rcu_torture_fwd_cb_hist(rfp);
1960 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001961 WRITE_ONCE(rcu_fwd_emergency_stop, true);
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001962 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1963 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001964 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001965 rcu_barrier();
1966 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001967 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001968 rcu_barrier();
1969 pr_info("%s: Freed %lu RCU callbacks.\n",
Paul E. McKenney67641002019-11-06 08:20:20 -08001970 __func__, rcu_torture_fwd_prog_cbfree(rfp));
Paul E. McKenney2667ccc2018-10-05 09:09:49 -07001971 smp_mb(); /* Frees before return to avoid redoing OOM. */
1972 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
1973 pr_info("%s returning after OOM processing.\n", __func__);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001974 return NOTIFY_OK;
1975}
1976
1977static struct notifier_block rcutorture_oom_nb = {
1978 .notifier_call = rcutorture_oom_notify
1979};
1980
Paul E. McKenney6b3de7a2018-08-28 14:38:43 -07001981/* Carry out grace-period forward-progress testing. */
1982static int rcu_torture_fwd_prog(void *args)
1983{
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001984 struct rcu_fwd *rfp = args;
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07001985 int tested = 0;
Paul E. McKenney152f4af2018-07-19 10:57:58 -07001986 int tested_tries = 0;
Paul E. McKenney1b272912018-07-18 14:32:31 -07001987
1988 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
Paul E. McKenney5ab7ab82018-09-21 18:08:09 -07001989 rcu_bind_current_to_nocb();
Paul E. McKenneyfecad502018-07-20 12:18:11 -07001990 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1991 set_user_nice(current, MAX_NICE);
Paul E. McKenney1b272912018-07-18 14:32:31 -07001992 do {
1993 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001994 WRITE_ONCE(rcu_fwd_emergency_stop, false);
1995 register_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney6b1b8322019-11-05 09:08:58 -08001996 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
1997 rcu_torture_fwd_prog_cr(rfp);
Paul E. McKenneye0aff972018-10-01 17:40:54 -07001998 unregister_oom_notifier(&rcutorture_oom_nb);
Paul E. McKenney48718482018-08-15 15:32:51 -07001999
Paul E. McKenney1b272912018-07-18 14:32:31 -07002000 /* Avoid slow periods, better to test when busy. */
2001 stutter_wait("rcu_torture_fwd_prog");
2002 } while (!torture_must_stop());
Paul E. McKenney152f4af2018-07-19 10:57:58 -07002003 /* Short runs might not contain a valid forward-progress attempt. */
2004 WARN_ON(!tested && tested_tries >= 5);
Paul E. McKenneyf4de46e2018-07-24 20:50:40 -07002005 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002006 torture_kthread_stopping("rcu_torture_fwd_prog");
2007 return 0;
2008}
2009
2010/* If forward-progress checking is requested and feasible, spawn the thread. */
2011static int __init rcu_torture_fwd_prog_init(void)
2012{
Paul E. McKenney5155be92019-11-06 08:35:08 -08002013 struct rcu_fwd *rfp;
Paul E. McKenney67641002019-11-06 08:20:20 -08002014
Paul E. McKenney1b272912018-07-18 14:32:31 -07002015 if (!fwd_progress)
2016 return 0; /* Not requested, so don't do it. */
Paul E. McKenney5ac7cdc2018-10-16 05:46:58 -07002017 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
2018 cur_ops == &rcu_busted_ops) {
Paul E. McKenney1b272912018-07-18 14:32:31 -07002019 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2020 return 0;
2021 }
2022 if (stall_cpu > 0) {
2023 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2024 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2025 return -EINVAL; /* In module, can fail back to user. */
2026 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2027 return 0;
2028 }
2029 if (fwd_progress_holdoff <= 0)
2030 fwd_progress_holdoff = 1;
2031 if (fwd_progress_div <= 0)
2032 fwd_progress_div = 4;
Paul E. McKenney5155be92019-11-06 08:35:08 -08002033 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2034 if (!rfp)
2035 return -ENOMEM;
2036 spin_lock_init(&rfp->rcu_fwd_lock);
2037 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
Paul E. McKenney67641002019-11-06 08:20:20 -08002038 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
Paul E. McKenney1b272912018-07-18 14:32:31 -07002039}
2040
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002041/* Callback function for RCU barrier testing. */
Rashika Kheriab3b8a4d2014-02-27 17:16:57 +05302042static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002043{
2044 atomic_inc(&barrier_cbs_invoked);
2045}
2046
2047/* kthread function to register callbacks used to test RCU barriers. */
2048static int rcu_torture_barrier_cbs(void *arg)
2049{
2050 long myid = (long)arg;
Paul E. McKenneyc6ebcbb2012-05-28 19:21:41 -07002051 bool lastphase = 0;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002052 bool newphase;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002053 struct rcu_head rcu;
2054
2055 init_rcu_head_on_stack(&rcu);
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002056 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
Linus Torvalds971eae72014-03-31 11:21:19 -07002057 set_user_nice(current, MAX_NICE);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002058 do {
2059 wait_event(barrier_cbs_wq[myid],
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002060 (newphase =
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002061 smp_load_acquire(&barrier_phase)) != lastphase ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002062 torture_must_stop());
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002063 lastphase = newphase;
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002064 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002065 break;
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002066 /*
2067 * The above smp_load_acquire() ensures barrier_phase load
Paul E. McKenneyaab05732016-05-02 12:20:51 -07002068 * is ordered before the following ->call().
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002069 */
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07002070 local_irq_disable(); /* Just to test no-irq call_rcu(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002071 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
Paul E. McKenney0aa67e72016-03-30 11:40:44 -07002072 local_irq_enable();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002073 if (atomic_dec_and_test(&barrier_cbs_count))
2074 wake_up(&barrier_wq);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002075 } while (!torture_must_stop());
Paul E. McKenney69c60452014-07-01 11:59:36 -07002076 if (cur_ops->cb_barrier != NULL)
2077 cur_ops->cb_barrier();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002078 destroy_rcu_head_on_stack(&rcu);
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002079 torture_kthread_stopping("rcu_torture_barrier_cbs");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002080 return 0;
2081}
2082
2083/* kthread function to drive and coordinate RCU barrier testing. */
2084static int rcu_torture_barrier(void *arg)
2085{
2086 int i;
2087
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002088 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002089 do {
2090 atomic_set(&barrier_cbs_invoked, 0);
2091 atomic_set(&barrier_cbs_count, n_barrier_cbs);
Paul E. McKenney6c7ed422015-04-13 11:58:08 -07002092 /* Ensure barrier_phase ordered after prior assignments. */
2093 smp_store_release(&barrier_phase, !barrier_phase);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002094 for (i = 0; i < n_barrier_cbs; i++)
2095 wake_up(&barrier_cbs_wq[i]);
2096 wait_event(barrier_wq,
2097 atomic_read(&barrier_cbs_count) == 0 ||
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002098 torture_must_stop());
2099 if (torture_must_stop())
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002100 break;
2101 n_barrier_attempts++;
Paul E. McKenney78e4bc32013-09-24 15:04:06 -07002102 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002103 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2104 n_rcu_torture_barrier_error++;
Paul E. McKenney7602de4a2014-12-17 18:39:54 -08002105 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2106 atomic_read(&barrier_cbs_invoked),
2107 n_barrier_cbs);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002108 WARN_ON_ONCE(1);
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002109 } else {
2110 n_barrier_successes++;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002111 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002112 schedule_timeout_interruptible(HZ / 10);
Paul E. McKenney36970bb2014-01-30 15:49:29 -08002113 } while (!torture_must_stop());
Paul E. McKenney7fafaac2014-01-31 17:37:28 -08002114 torture_kthread_stopping("rcu_torture_barrier");
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002115 return 0;
2116}
2117
2118/* Initialize RCU barrier testing. */
2119static int rcu_torture_barrier_init(void)
2120{
2121 int i;
2122 int ret;
2123
Paul E. McKenneyd9eba7682015-05-14 15:35:43 -07002124 if (n_barrier_cbs <= 0)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002125 return 0;
2126 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002127 pr_alert("%s" TORTURE_FLAG
2128 " Call or barrier ops missing for %s,\n",
2129 torture_type, cur_ops->name);
2130 pr_alert("%s" TORTURE_FLAG
2131 " RCU barrier testing omitted from run.\n",
2132 torture_type);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002133 return 0;
2134 }
2135 atomic_set(&barrier_cbs_count, 0);
2136 atomic_set(&barrier_cbs_invoked, 0);
2137 barrier_cbs_tasks =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002138 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002139 GFP_KERNEL);
2140 barrier_cbs_wq =
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002141 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
Sasha Levinde5e6432012-12-20 14:11:28 -05002142 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002143 return -ENOMEM;
2144 for (i = 0; i < n_barrier_cbs; i++) {
2145 init_waitqueue_head(&barrier_cbs_wq[i]);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002146 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2147 (void *)(long)i,
2148 barrier_cbs_tasks[i]);
2149 if (ret)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002150 return ret;
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002151 }
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002152 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002153}
2154
2155/* Clean up after RCU barrier testing. */
2156static void rcu_torture_barrier_cleanup(void)
2157{
2158 int i;
2159
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002160 torture_stop_kthread(rcu_torture_barrier, barrier_task);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002161 if (barrier_cbs_tasks != NULL) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002162 for (i = 0; i < n_barrier_cbs; i++)
2163 torture_stop_kthread(rcu_torture_barrier_cbs,
2164 barrier_cbs_tasks[i]);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002165 kfree(barrier_cbs_tasks);
2166 barrier_cbs_tasks = NULL;
2167 }
2168 if (barrier_cbs_wq != NULL) {
2169 kfree(barrier_cbs_wq);
2170 barrier_cbs_wq = NULL;
2171 }
2172}
2173
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002174static bool rcu_torture_can_boost(void)
2175{
2176 static int boost_warn_once;
2177 int prio;
2178
2179 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2180 return false;
2181
2182 prio = rcu_get_gp_kthreads_prio();
2183 if (!prio)
2184 return false;
2185
2186 if (prio < 2) {
2187 if (boost_warn_once == 1)
2188 return false;
2189
Joel Fernandes (Google)bf5b6432018-06-19 15:14:19 -07002190 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002191 boost_warn_once = 1;
2192 return false;
2193 }
2194
2195 return true;
2196}
2197
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002198static enum cpuhp_state rcutor_hp;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002199
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002200static void
2201rcu_torture_cleanup(void)
2202{
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002203 int firsttime;
Paul E. McKenney034777d2018-04-19 08:43:11 -07002204 int flags = 0;
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002205 unsigned long gp_seq = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002206 int i;
2207
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002208 if (torture_cleanup_begin()) {
Paul E. McKenney343e9092008-12-15 16:13:07 -08002209 if (cur_ops->cb_barrier != NULL)
2210 cur_ops->cb_barrier();
2211 return;
2212 }
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002213 if (!cur_ops) {
2214 torture_cleanup_end();
2215 return;
2216 }
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002217
Paul E. McKenneyf7a81b12019-06-25 13:32:51 -07002218 show_rcu_gp_kthreads();
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002219 rcu_torture_barrier_cleanup();
Paul E. McKenney1b272912018-07-18 14:32:31 -07002220 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002221 torture_stop_kthread(rcu_torture_stall, stall_task);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002222 torture_stop_kthread(rcu_torture_writer, writer_task);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002223
Josh Triplettc8e5b162007-05-08 00:33:20 -07002224 if (reader_tasks) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002225 for (i = 0; i < nrealreaders; i++)
2226 torture_stop_kthread(rcu_torture_reader,
2227 reader_tasks[i]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002228 kfree(reader_tasks);
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002229 }
2230 rcu_torture_current = NULL;
2231
Josh Triplettc8e5b162007-05-08 00:33:20 -07002232 if (fakewriter_tasks) {
Josh Triplettb772e1d2006-10-04 02:17:13 -07002233 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002234 torture_stop_kthread(rcu_torture_fakewriter,
2235 fakewriter_tasks[i]);
Josh Triplettb772e1d2006-10-04 02:17:13 -07002236 }
2237 kfree(fakewriter_tasks);
2238 fakewriter_tasks = NULL;
2239 }
2240
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07002241 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2242 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2243 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2244 cur_ops->name, gp_seq, flags);
Paul E. McKenney9c029b82014-02-04 11:47:08 -08002245 torture_stop_kthread(rcu_torture_stats, stats_task);
2246 torture_stop_kthread(rcu_torture_fqs, fqs_task);
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002247 if (rcu_torture_can_boost())
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002248 cpuhp_remove_state(rcutor_hp);
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002249
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002250 /*
Paul E. McKenney62a1a942018-07-07 18:12:26 -07002251 * Wait for all RCU callbacks to fire, then do torture-type-specific
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002252 * cleanup operations.
2253 */
Paul E. McKenney23269742008-05-12 21:21:05 +02002254 if (cur_ops->cb_barrier != NULL)
2255 cur_ops->cb_barrier();
Paul E. McKenneyca1d51e2015-04-14 12:28:22 -07002256 if (cur_ops->cleanup != NULL)
2257 cur_ops->cleanup();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002258
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002259 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002260
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002261 if (err_segs_recorded) {
2262 pr_alert("Failure/close-call rcutorture reader segments:\n");
2263 if (rt_read_nsegs == 0)
2264 pr_alert("\t: No segments recorded!!!\n");
2265 firsttime = 1;
2266 for (i = 0; i < rt_read_nsegs; i++) {
2267 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2268 if (err_segs[i].rt_delay_jiffies != 0) {
2269 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2270 err_segs[i].rt_delay_jiffies);
2271 firsttime = 0;
2272 }
2273 if (err_segs[i].rt_delay_ms != 0) {
2274 pr_cont("%s%ldms", firsttime ? "" : "+",
2275 err_segs[i].rt_delay_ms);
2276 firsttime = 0;
2277 }
2278 if (err_segs[i].rt_delay_us != 0) {
2279 pr_cont("%s%ldus", firsttime ? "" : "+",
2280 err_segs[i].rt_delay_us);
2281 firsttime = 0;
2282 }
2283 pr_cont("%s\n",
2284 err_segs[i].rt_preempted ? "preempted" : "");
2285
2286 }
2287 }
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002288 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002289 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
Paul E. McKenney2e9e8082014-01-28 15:58:22 -08002290 else if (torture_onoff_failures())
Paul E. McKenney091541b2012-01-10 12:51:14 -08002291 rcu_torture_print_module_parms(cur_ops,
2292 "End of test: RCU_HOTPLUG");
Paul E. McKenney95c38322006-03-24 03:15:58 -08002293 else
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002294 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
Davidlohr Buesod36a7a02014-09-11 20:40:21 -07002295 torture_cleanup_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002296}
2297
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002298#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2299static void rcu_torture_leak_cb(struct rcu_head *rhp)
2300{
2301}
2302
2303static void rcu_torture_err_cb(struct rcu_head *rhp)
2304{
2305 /*
2306 * This -might- happen due to race conditions, but is unlikely.
2307 * The scenario that leads to this happening is that the
2308 * first of the pair of duplicate callbacks is queued,
2309 * someone else starts a grace period that includes that
2310 * callback, then the second of the pair must wait for the
2311 * next grace period. Unlikely, but can happen. If it
2312 * does happen, the debug-objects subsystem won't have splatted.
2313 */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002314 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002315}
2316#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2317
2318/*
2319 * Verify that double-free causes debug-objects to complain, but only
2320 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2321 * cannot be carried out.
2322 */
2323static void rcu_test_debug_objects(void)
2324{
2325#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2326 struct rcu_head rh1;
2327 struct rcu_head rh2;
2328
2329 init_rcu_head_on_stack(&rh1);
2330 init_rcu_head_on_stack(&rh2);
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002331 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002332
2333 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2334 preempt_disable(); /* Prevent preemption from interrupting test. */
2335 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2336 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2337 local_irq_disable(); /* Make it harder to start a new grace period. */
2338 call_rcu(&rh2, rcu_torture_leak_cb);
2339 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2340 local_irq_enable();
2341 rcu_read_unlock();
2342 preempt_enable();
2343
2344 /* Wait for them all to get done so we can safely return. */
2345 rcu_barrier();
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002346 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002347 destroy_rcu_head_on_stack(&rh1);
2348 destroy_rcu_head_on_stack(&rh2);
2349#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenneye0d31a34c2017-12-01 15:22:38 -08002350 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002351#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2352}
2353
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002354static void rcutorture_sync(void)
2355{
2356 static unsigned long n;
2357
2358 if (cur_ops->sync && !(++n & 0xfff))
2359 cur_ops->sync();
2360}
2361
Josh Triplett6f8bc5002007-05-08 00:25:24 -07002362static int __init
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002363rcu_torture_init(void)
2364{
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002365 long i;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002366 int cpu;
2367 int firsterr = 0;
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002368 static struct rcu_torture_ops *torture_ops[] = {
Paul E. McKenneyc770c822018-07-07 10:28:07 -07002369 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
Paul E. McKenneyc682db52019-04-19 07:38:27 -07002370 &busted_srcud_ops, &tasks_ops, &trivial_ops,
Paul E. McKenney2ec1f2d2013-06-12 15:12:21 -07002371 };
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002372
Paul E. McKenneya2f25772017-11-21 20:19:17 -08002373 if (!torture_init_begin(torture_type, verbose))
Paul E. McKenney52280842014-04-07 09:14:11 -07002374 return -EBUSY;
Paul E. McKenney343e9092008-12-15 16:13:07 -08002375
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002376 /* Process args and tell the world that the torturer is on the job. */
Josh Triplettade5fb82007-05-08 00:33:22 -07002377 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002378 cur_ops = torture_ops[i];
Josh Triplettade5fb82007-05-08 00:33:22 -07002379 if (strcmp(torture_type, cur_ops->name) == 0)
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002380 break;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002381 }
Josh Triplettade5fb82007-05-08 00:33:22 -07002382 if (i == ARRAY_SIZE(torture_ops)) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002383 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2384 torture_type);
2385 pr_alert("rcu-torture types:");
Paul E. McKenneycf886c42009-10-25 19:03:54 -07002386 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
Joe Perchesa7538352018-05-14 13:27:33 -07002387 pr_cont(" %s", torture_ops[i]->name);
2388 pr_cont("\n");
Paul E. McKenneye746b552018-07-07 17:35:22 -07002389 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
Paul E. McKenney889d4872015-08-24 11:37:58 -07002390 firsterr = -EINVAL;
Paul E. McKenneyb813afa2019-03-21 09:27:28 -07002391 cur_ops = NULL;
Paul E. McKenney889d4872015-08-24 11:37:58 -07002392 goto unwind;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002393 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002394 if (cur_ops->fqs == NULL && fqs_duration != 0) {
Paul E. McKenney2caa1e42012-08-09 16:30:45 -07002395 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002396 fqs_duration = 0;
2397 }
Josh Triplettc8e5b162007-05-08 00:33:20 -07002398 if (cur_ops->init)
Paul E. McKenney889d4872015-08-24 11:37:58 -07002399 cur_ops->init();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -07002400
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002401 if (nreaders >= 0) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002402 nrealreaders = nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002403 } else {
Paul E. McKenney3838cc12015-03-12 13:55:48 -07002404 nrealreaders = num_online_cpus() - 2 - nreaders;
Paul E. McKenney64e4b432014-03-12 10:26:35 -07002405 if (nrealreaders <= 0)
2406 nrealreaders = 1;
2407 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002408 rcu_torture_print_module_parms(cur_ops, "Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002409
2410 /* Set up the freelist. */
2411
2412 INIT_LIST_HEAD(&rcu_torture_freelist);
Ahmed S. Darwish788e7702007-05-08 00:33:14 -07002413 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -08002414 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002415 list_add_tail(&rcu_tortures[i].rtort_free,
2416 &rcu_torture_freelist);
2417 }
2418
2419 /* Initialize the statistics so that each run gets its own numbers. */
2420
2421 rcu_torture_current = NULL;
2422 rcu_torture_current_version = 0;
2423 atomic_set(&n_rcu_torture_alloc, 0);
2424 atomic_set(&n_rcu_torture_alloc_fail, 0);
2425 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -08002426 atomic_set(&n_rcu_torture_mberror, 0);
2427 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002428 n_rcu_torture_barrier_error = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002429 n_rcu_torture_boost_ktrerror = 0;
2430 n_rcu_torture_boost_rterror = 0;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002431 n_rcu_torture_boost_failure = 0;
2432 n_rcu_torture_boosts = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002433 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2434 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002435 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002436 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2437 per_cpu(rcu_torture_count, cpu)[i] = 0;
2438 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2439 }
2440 }
Paul E. McKenneyc116dba2018-07-13 12:09:14 -07002441 err_segs_recorded = 0;
2442 rt_read_nsegs = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002443
2444 /* Start up the kthreads. */
2445
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002446 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2447 writer_task);
2448 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002449 goto unwind;
Paul E. McKenney4444d852015-05-14 15:42:40 -07002450 if (nfakewriters > 0) {
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002451 fakewriter_tasks = kcalloc(nfakewriters,
Paul E. McKenney4444d852015-05-14 15:42:40 -07002452 sizeof(fakewriter_tasks[0]),
2453 GFP_KERNEL);
2454 if (fakewriter_tasks == NULL) {
2455 VERBOSE_TOROUT_ERRSTRING("out of memory");
2456 firsterr = -ENOMEM;
2457 goto unwind;
2458 }
Josh Triplettb772e1d2006-10-04 02:17:13 -07002459 }
2460 for (i = 0; i < nfakewriters; i++) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002461 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2462 NULL, fakewriter_tasks[i]);
2463 if (firsterr)
Josh Triplettb772e1d2006-10-04 02:17:13 -07002464 goto unwind;
Josh Triplettb772e1d2006-10-04 02:17:13 -07002465 }
Paul E. McKenney68a675d2017-12-01 14:26:56 -08002466 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002467 GFP_KERNEL);
2468 if (reader_tasks == NULL) {
Paul E. McKenney5ccf60f2014-01-29 07:25:25 -08002469 VERBOSE_TOROUT_ERRSTRING("out of memory");
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002470 firsterr = -ENOMEM;
2471 goto unwind;
2472 }
2473 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenneyc04dd092018-07-23 14:16:47 -07002474 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002475 reader_tasks[i]);
2476 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002477 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002478 }
2479 if (stat_interval > 0) {
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002480 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2481 stats_task);
2482 if (firsterr)
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002483 goto unwind;
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002484 }
Paul E. McKenneye8e255f2015-05-14 16:55:45 -07002485 if (test_no_idle_hz && shuffle_interval > 0) {
Paul E. McKenney3808dc92014-01-28 15:29:21 -08002486 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2487 if (firsterr)
Rusty Russell73d0a4b2009-03-30 22:05:16 -06002488 goto unwind;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -08002489 }
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002490 if (stutter < 0)
2491 stutter = 0;
2492 if (stutter) {
Paul E. McKenneyff3bf922019-04-09 14:44:49 -07002493 int t;
2494
2495 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2496 firsterr = torture_stutter_init(stutter * HZ, t);
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002497 if (firsterr)
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002498 goto unwind;
Paul E. McKenneyd120f652008-06-18 05:21:44 -07002499 }
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002500 if (fqs_duration < 0)
2501 fqs_duration = 0;
2502 if (fqs_duration) {
Paul E. McKenney628edaa2014-01-31 11:57:43 -08002503 /* Create the fqs thread */
Paul E. McKenneyd0d06062014-03-17 20:56:45 -07002504 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2505 fqs_task);
Paul E. McKenney47cf29b2014-02-03 11:52:27 -08002506 if (firsterr)
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002507 goto unwind;
Paul E. McKenneybf66f182010-01-04 15:09:10 -08002508 }
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002509 if (test_boost_interval < 1)
2510 test_boost_interval = 1;
2511 if (test_boost_duration < 2)
2512 test_boost_duration = 2;
Joel Fernandes (Google)4babd852018-06-19 15:14:18 -07002513 if (rcu_torture_can_boost()) {
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002514
2515 boost_starttime = jiffies + test_boost_interval * HZ;
Sebastian Andrzej Siewior0ffd3742016-08-18 14:57:22 +02002516
2517 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2518 rcutorture_booster_init,
2519 rcutorture_booster_cleanup);
2520 if (firsterr < 0)
2521 goto unwind;
2522 rcutor_hp = firsterr;
Paul E. McKenney8e8be452010-09-02 16:16:14 -07002523 }
Paul E. McKenney60013d52019-07-10 08:30:00 -07002524 shutdown_jiffies = jiffies + shutdown_secs * HZ;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002525 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2526 if (firsterr)
Paul E. McKenneye991dbc2014-01-31 14:52:13 -08002527 goto unwind;
Paul E. McKenney3a6cb582018-12-10 09:44:52 -08002528 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2529 rcutorture_sync);
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002530 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002531 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002532 firsterr = rcu_torture_stall_init();
2533 if (firsterr)
Paul E. McKenney37e377d2012-02-17 22:12:18 -08002534 goto unwind;
Paul E. McKenney1b272912018-07-18 14:32:31 -07002535 firsterr = rcu_torture_fwd_prog_init();
2536 if (firsterr)
2537 goto unwind;
Paul E. McKenney01025eb2014-01-31 15:15:02 -08002538 firsterr = rcu_torture_barrier_init();
2539 if (firsterr)
Paul E. McKenneyfae4b542012-02-20 17:51:45 -08002540 goto unwind;
Paul E. McKenneyd2818df2013-04-23 17:05:42 -07002541 if (object_debug)
2542 rcu_test_debug_objects();
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002543 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002544 return 0;
2545
2546unwind:
Paul E. McKenneyb5daa8f2014-01-30 13:38:09 -08002547 torture_init_end();
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002548 rcu_torture_cleanup();
2549 return firsterr;
2550}
2551
2552module_init(rcu_torture_init);
2553module_exit(rcu_torture_cleanup);