Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 29766f1 | 2006-06-27 02:54:02 -0700 | [diff] [blame] | 3 | * Read-Copy Update module-based torture test facility |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 4 | * |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2005, 2006 |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 6 | * |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Josh Triplett | e0198b29 | 2014-07-30 16:08:42 -0700 | [diff] [blame] | 8 | * Josh Triplett <josh@joshtriplett.org> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 9 | * |
Mauro Carvalho Chehab | 43cb545 | 2020-04-21 19:04:06 +0200 | [diff] [blame] | 10 | * See also: Documentation/RCU/torture.rst |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 11 | */ |
Paul E. McKenney | 6050003 | 2018-05-15 12:25:05 -0700 | [diff] [blame] | 12 | |
| 13 | #define pr_fmt(fmt) fmt |
| 14 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/smp.h> |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 23 | #include <linux/rcupdate_wait.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 24 | #include <linux/interrupt.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 25 | #include <linux/sched/signal.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 26 | #include <uapi/linux/sched/types.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 27 | #include <linux/atomic.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 29 | #include <linux/completion.h> |
| 30 | #include <linux/moduleparam.h> |
| 31 | #include <linux/percpu.h> |
| 32 | #include <linux/notifier.h> |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 33 | #include <linux/reboot.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 34 | #include <linux/freezer.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 35 | #include <linux/cpu.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 36 | #include <linux/delay.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 37 | #include <linux/stat.h> |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 38 | #include <linux/srcu.h> |
Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 39 | #include <linux/slab.h> |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 40 | #include <linux/trace_clock.h> |
Harvey Harrison | f07767f | 2008-10-20 10:23:38 -0700 | [diff] [blame] | 41 | #include <asm/byteorder.h> |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 42 | #include <linux/torture.h> |
Paul E. McKenney | 38706bc | 2014-08-18 21:12:17 -0700 | [diff] [blame] | 43 | #include <linux/vmalloc.h> |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 44 | #include <linux/sched/debug.h> |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 45 | #include <linux/sched/sysctl.h> |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 46 | #include <linux/oom.h> |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 47 | #include <linux/tick.h> |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 48 | #include <linux/rcupdate_trace.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 49 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 50 | #include "rcu.h" |
| 51 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 52 | MODULE_LICENSE("GPL"); |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 54 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 55 | /* Bits for ->extendables field, extendables param, and related definitions. */ |
| 56 | #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ |
| 57 | #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 58 | #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ |
| 59 | #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ |
| 60 | #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ |
| 61 | #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ |
| 62 | #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ |
| 63 | #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ |
| 64 | #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ |
| 65 | #define RCUTORTURE_MAX_EXTEND \ |
| 66 | (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ |
| 67 | RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 68 | #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ |
| 69 | /* Must be power of two minus one. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 70 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 71 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 72 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
| 73 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 74 | torture_param(int, fqs_duration, 0, |
| 75 | "Duration of fqs bursts (us), 0 to disable"); |
| 76 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
| 77 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 78 | torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); |
| 79 | torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); |
| 80 | torture_param(int, fwd_progress_holdoff, 60, |
| 81 | "Time between forward-progress tests (s)"); |
| 82 | torture_param(bool, fwd_progress_need_resched, 1, |
| 83 | "Hide cond_resched() behind need_resched()"); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 84 | torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 85 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
| 86 | torture_param(bool, gp_normal, false, |
| 87 | "Use normal (non-expedited) GP wait primitives"); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 88 | torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 89 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame^] | 90 | torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 91 | torture_param(int, n_barrier_cbs, 0, |
| 92 | "# of callbacks/kthreads for barrier testing"); |
| 93 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
| 94 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
| 95 | torture_param(int, object_debug, 0, |
| 96 | "Enable debug-object double call_rcu() testing"); |
| 97 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
| 98 | torture_param(int, onoff_interval, 0, |
Paul E. McKenney | 028be12 | 2018-05-08 09:20:34 -0700 | [diff] [blame] | 99 | "Time between CPU hotplugs (jiffies), 0=disable"); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 100 | torture_param(int, read_exit_delay, 13, |
| 101 | "Delay between read-then-exit episodes (s)"); |
| 102 | torture_param(int, read_exit_burst, 16, |
| 103 | "# of read-then-exit bursts per episode, zero to disable"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 104 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
| 105 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
| 106 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
| 107 | torture_param(int, stall_cpu_holdoff, 10, |
| 108 | "Time to wait before starting stall (s)."); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 109 | torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 110 | torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 111 | torture_param(int, stall_gp_kthread, 0, |
| 112 | "Grace-period kthread stall duration (s)."); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 113 | torture_param(int, stat_interval, 60, |
| 114 | "Number of seconds between stats printk()s"); |
| 115 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
| 116 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
| 117 | torture_param(int, test_boost_duration, 4, |
| 118 | "Duration of each boost test, seconds."); |
| 119 | torture_param(int, test_boost_interval, 7, |
| 120 | "Interval between boost tests, seconds."); |
| 121 | torture_param(bool, test_no_idle_hz, true, |
| 122 | "Test support for tickless idle CPUs"); |
Paul E. McKenney | 90127d6 | 2018-05-09 10:29:18 -0700 | [diff] [blame] | 123 | torture_param(int, verbose, 1, |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 124 | "Enable verbose debugging printk()s"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 125 | |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 126 | static char *torture_type = "rcu"; |
Josh Triplett | d6ad671 | 2007-03-06 01:42:13 -0800 | [diff] [blame] | 127 | module_param(torture_type, charp, 0444); |
Paul E. McKenney | c770c82 | 2018-07-07 10:28:07 -0700 | [diff] [blame] | 128 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 129 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 130 | static int nrealreaders; |
| 131 | static struct task_struct *writer_task; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 132 | static struct task_struct **fakewriter_tasks; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 133 | static struct task_struct **reader_tasks; |
| 134 | static struct task_struct *stats_task; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 135 | static struct task_struct *fqs_task; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 136 | static struct task_struct *boost_tasks[NR_CPUS]; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 137 | static struct task_struct *stall_task; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 138 | static struct task_struct *fwd_prog_task; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 139 | static struct task_struct **barrier_cbs_tasks; |
| 140 | static struct task_struct *barrier_task; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 141 | static struct task_struct *read_exit_task; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 142 | |
| 143 | #define RCU_TORTURE_PIPE_LEN 10 |
| 144 | |
| 145 | struct rcu_torture { |
| 146 | struct rcu_head rtort_rcu; |
| 147 | int rtort_pipe_count; |
| 148 | struct list_head rtort_free; |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 149 | int rtort_mbtest; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 150 | }; |
| 151 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 152 | static LIST_HEAD(rcu_torture_freelist); |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 153 | static struct rcu_torture __rcu *rcu_torture_current; |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 154 | static unsigned long rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 155 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 156 | static DEFINE_SPINLOCK(rcu_torture_lock); |
Paul E. McKenney | 67522be | 2016-03-01 08:52:19 -0800 | [diff] [blame] | 157 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); |
| 158 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 159 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 160 | static atomic_t n_rcu_torture_alloc; |
| 161 | static atomic_t n_rcu_torture_alloc_fail; |
| 162 | static atomic_t n_rcu_torture_free; |
| 163 | static atomic_t n_rcu_torture_mberror; |
| 164 | static atomic_t n_rcu_torture_error; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 165 | static long n_rcu_torture_barrier_error; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 166 | static long n_rcu_torture_boost_ktrerror; |
| 167 | static long n_rcu_torture_boost_rterror; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 168 | static long n_rcu_torture_boost_failure; |
| 169 | static long n_rcu_torture_boosts; |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 170 | static atomic_long_t n_rcu_torture_timers; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 171 | static long n_barrier_attempts; |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 172 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 173 | static unsigned long n_read_exits; |
Josh Triplett | e303373 | 2006-10-04 02:17:14 -0700 | [diff] [blame] | 174 | static struct list_head rcu_torture_removed; |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 175 | static unsigned long shutdown_jiffies; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 176 | static unsigned long start_gp_seq; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 177 | |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 178 | static int rcu_torture_writer_state; |
| 179 | #define RTWS_FIXED_DELAY 0 |
| 180 | #define RTWS_DELAY 1 |
| 181 | #define RTWS_REPLACE 2 |
| 182 | #define RTWS_DEF_FREE 3 |
| 183 | #define RTWS_EXP_SYNC 4 |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 184 | #define RTWS_COND_GET 5 |
| 185 | #define RTWS_COND_SYNC 6 |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 186 | #define RTWS_SYNC 7 |
| 187 | #define RTWS_STUTTER 8 |
| 188 | #define RTWS_STOPPING 9 |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 189 | static const char * const rcu_torture_writer_state_names[] = { |
| 190 | "RTWS_FIXED_DELAY", |
| 191 | "RTWS_DELAY", |
| 192 | "RTWS_REPLACE", |
| 193 | "RTWS_DEF_FREE", |
| 194 | "RTWS_EXP_SYNC", |
| 195 | "RTWS_COND_GET", |
| 196 | "RTWS_COND_SYNC", |
| 197 | "RTWS_SYNC", |
| 198 | "RTWS_STUTTER", |
| 199 | "RTWS_STOPPING", |
| 200 | }; |
| 201 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 202 | /* Record reader segment types and duration for first failing read. */ |
| 203 | struct rt_read_seg { |
| 204 | int rt_readstate; |
| 205 | unsigned long rt_delay_jiffies; |
| 206 | unsigned long rt_delay_ms; |
| 207 | unsigned long rt_delay_us; |
| 208 | bool rt_preempted; |
| 209 | }; |
| 210 | static int err_segs_recorded; |
| 211 | static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; |
| 212 | static int rt_read_nsegs; |
| 213 | |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 214 | static const char *rcu_torture_writer_state_getname(void) |
| 215 | { |
| 216 | unsigned int i = READ_ONCE(rcu_torture_writer_state); |
| 217 | |
| 218 | if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) |
| 219 | return "???"; |
| 220 | return rcu_torture_writer_state_names[i]; |
| 221 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 222 | |
Paul E. McKenney | 3acf4a9 | 2011-04-17 23:45:23 -0700 | [diff] [blame] | 223 | #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 224 | #define rcu_can_boost() 1 |
Paul E. McKenney | 3acf4a9 | 2011-04-17 23:45:23 -0700 | [diff] [blame] | 225 | #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 226 | #define rcu_can_boost() 0 |
Paul E. McKenney | 3acf4a9 | 2011-04-17 23:45:23 -0700 | [diff] [blame] | 227 | #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 228 | |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 229 | #ifdef CONFIG_RCU_TRACE |
| 230 | static u64 notrace rcu_trace_clock_local(void) |
| 231 | { |
| 232 | u64 ts = trace_clock_local(); |
Paul E. McKenney | a3b7b6c | 2017-06-23 16:07:17 -0700 | [diff] [blame] | 233 | |
| 234 | (void)do_div(ts, NSEC_PER_USEC); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 235 | return ts; |
| 236 | } |
| 237 | #else /* #ifdef CONFIG_RCU_TRACE */ |
| 238 | static u64 notrace rcu_trace_clock_local(void) |
| 239 | { |
| 240 | return 0ULL; |
| 241 | } |
| 242 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 243 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 244 | /* |
| 245 | * Stop aggressive CPU-hog tests a bit before the end of the test in order |
| 246 | * to avoid interfering with test shutdown. |
| 247 | */ |
| 248 | static bool shutdown_time_arrived(void) |
| 249 | { |
| 250 | return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); |
| 251 | } |
| 252 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 253 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
Pranith Kumar | 58ade2d | 2014-06-11 16:39:43 -0400 | [diff] [blame] | 254 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 255 | /* and boost task create/destroy. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 256 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
Paul E. McKenney | c6ebcbb | 2012-05-28 19:21:41 -0700 | [diff] [blame] | 257 | static bool barrier_phase; /* Test phase. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 258 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
| 259 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
| 260 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 261 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 262 | static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ |
| 263 | |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 264 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 265 | * Allocate an element from the rcu_tortures pool. |
| 266 | */ |
Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 267 | static struct rcu_torture * |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 268 | rcu_torture_alloc(void) |
| 269 | { |
| 270 | struct list_head *p; |
| 271 | |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 272 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 273 | if (list_empty(&rcu_torture_freelist)) { |
| 274 | atomic_inc(&n_rcu_torture_alloc_fail); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 275 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 276 | return NULL; |
| 277 | } |
| 278 | atomic_inc(&n_rcu_torture_alloc); |
| 279 | p = rcu_torture_freelist.next; |
| 280 | list_del_init(p); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 281 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 282 | return container_of(p, struct rcu_torture, rtort_free); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Free an element to the rcu_tortures pool. |
| 287 | */ |
| 288 | static void |
| 289 | rcu_torture_free(struct rcu_torture *p) |
| 290 | { |
| 291 | atomic_inc(&n_rcu_torture_free); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 292 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 293 | list_add_tail(&p->rtort_free, &rcu_torture_freelist); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 294 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 295 | } |
| 296 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 297 | /* |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 298 | * Operations vector for selecting different types of tests. |
| 299 | */ |
| 300 | |
| 301 | struct rcu_torture_ops { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 302 | int ttype; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 303 | void (*init)(void); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 304 | void (*cleanup)(void); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 305 | int (*readlock)(void); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 306 | void (*read_delay)(struct torture_random_state *rrsp, |
| 307 | struct rt_read_seg *rtrsp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 308 | void (*readunlock)(int idx); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 309 | unsigned long (*get_gp_seq)(void); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 310 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 311 | void (*deferred_free)(struct rcu_torture *p); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 312 | void (*sync)(void); |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 313 | void (*exp_sync)(void); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 314 | unsigned long (*get_state)(void); |
| 315 | void (*cond_sync)(unsigned long oldstate); |
Boqun Feng | db3e8db | 2015-07-29 13:29:39 +0800 | [diff] [blame] | 316 | call_rcu_func_t call; |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 317 | void (*cb_barrier)(void); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 318 | void (*fqs)(void); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 319 | void (*stats)(void); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 320 | int (*stall_dur)(void); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 321 | int irq_capable; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 322 | int can_boost; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 323 | int extendables; |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 324 | int slow_gps; |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 325 | const char *name; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 326 | }; |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 327 | |
| 328 | static struct rcu_torture_ops *cur_ops; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 329 | |
| 330 | /* |
| 331 | * Definitions for rcu torture testing. |
| 332 | */ |
| 333 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 334 | static int rcu_torture_read_lock(void) __acquires(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 335 | { |
| 336 | rcu_read_lock(); |
| 337 | return 0; |
| 338 | } |
| 339 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 340 | static void |
| 341 | rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 342 | { |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 343 | unsigned long started; |
| 344 | unsigned long completed; |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 345 | const unsigned long shortdelay_us = 200; |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 346 | unsigned long longdelay_ms = 300; |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 347 | unsigned long long ts; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 348 | |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 349 | /* We want a short delay sometimes to make a reader delay the grace |
| 350 | * period, and we want a long delay occasionally to trigger |
| 351 | * force_quiescent_state. */ |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 352 | |
Paul E. McKenney | 102c14d | 2019-12-21 11:23:50 -0800 | [diff] [blame] | 353 | if (!READ_ONCE(rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 354 | !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 355 | started = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 356 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 357 | if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) |
| 358 | longdelay_ms = 5; /* Avoid triggering BH limits. */ |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 359 | mdelay(longdelay_ms); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 360 | rtrsp->rt_delay_ms = longdelay_ms; |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 361 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 362 | do_trace_rcu_torture_read(cur_ops->name, NULL, ts, |
| 363 | started, completed); |
| 364 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 365 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 366 | udelay(shortdelay_us); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 367 | rtrsp->rt_delay_us = shortdelay_us; |
| 368 | } |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 369 | if (!preempt_count() && |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 370 | !(torture_random(rrsp) % (nrealreaders * 500))) { |
Paul E. McKenney | cc1321c | 2017-10-16 11:05:03 -0700 | [diff] [blame] | 371 | torture_preempt_schedule(); /* QS only if preemptible. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 372 | rtrsp->rt_preempted = true; |
| 373 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 376 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 377 | { |
| 378 | rcu_read_unlock(); |
| 379 | } |
| 380 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 381 | /* |
| 382 | * Update callback in the pipe. This should be invoked after a grace period. |
| 383 | */ |
| 384 | static bool |
| 385 | rcu_torture_pipe_update_one(struct rcu_torture *rp) |
| 386 | { |
| 387 | int i; |
| 388 | |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 389 | i = READ_ONCE(rp->rtort_pipe_count); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 390 | if (i > RCU_TORTURE_PIPE_LEN) |
| 391 | i = RCU_TORTURE_PIPE_LEN; |
| 392 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 393 | WRITE_ONCE(rp->rtort_pipe_count, i + 1); |
| 394 | if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 395 | rp->rtort_mbtest = 0; |
| 396 | return true; |
| 397 | } |
| 398 | return false; |
| 399 | } |
| 400 | |
| 401 | /* |
| 402 | * Update all callbacks in the pipe. Suitable for synchronous grace-period |
| 403 | * primitives. |
| 404 | */ |
| 405 | static void |
| 406 | rcu_torture_pipe_update(struct rcu_torture *old_rp) |
| 407 | { |
| 408 | struct rcu_torture *rp; |
| 409 | struct rcu_torture *rp1; |
| 410 | |
| 411 | if (old_rp) |
| 412 | list_add(&old_rp->rtort_free, &rcu_torture_removed); |
| 413 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { |
| 414 | if (rcu_torture_pipe_update_one(rp)) { |
| 415 | list_del(&rp->rtort_free); |
| 416 | rcu_torture_free(rp); |
| 417 | } |
| 418 | } |
| 419 | } |
| 420 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 421 | static void |
| 422 | rcu_torture_cb(struct rcu_head *p) |
| 423 | { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 424 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 425 | |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 426 | if (torture_must_stop_irq()) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 427 | /* Test is ending, just drop callbacks on the floor. */ |
| 428 | /* The next initialization will pick up the pieces. */ |
| 429 | return; |
| 430 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 431 | if (rcu_torture_pipe_update_one(rp)) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 432 | rcu_torture_free(rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 433 | else |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 434 | cur_ops->deferred_free(rp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 435 | } |
| 436 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 437 | static unsigned long rcu_no_completed(void) |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 438 | { |
| 439 | return 0; |
| 440 | } |
| 441 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 442 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
| 443 | { |
| 444 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
| 445 | } |
| 446 | |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 447 | static void rcu_sync_torture_init(void) |
| 448 | { |
| 449 | INIT_LIST_HEAD(&rcu_torture_removed); |
| 450 | } |
| 451 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 452 | static struct rcu_torture_ops rcu_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 453 | .ttype = RCU_FLAVOR, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 454 | .init = rcu_sync_torture_init, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 455 | .readlock = rcu_torture_read_lock, |
| 456 | .read_delay = rcu_read_delay, |
| 457 | .readunlock = rcu_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 458 | .get_gp_seq = rcu_get_gp_seq, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 459 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 460 | .deferred_free = rcu_torture_deferred_free, |
| 461 | .sync = synchronize_rcu, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 462 | .exp_sync = synchronize_rcu_expedited, |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 463 | .get_state = get_state_synchronize_rcu, |
| 464 | .cond_sync = cond_synchronize_rcu, |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 465 | .call = call_rcu, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 466 | .cb_barrier = rcu_barrier, |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 467 | .fqs = rcu_force_quiescent_state, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 468 | .stats = NULL, |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 469 | .stall_dur = rcu_jiffies_till_stall_check, |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 470 | .irq_capable = 1, |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 471 | .can_boost = rcu_can_boost(), |
Paul E. McKenney | c033574 | 2018-06-21 16:17:46 -0700 | [diff] [blame] | 472 | .extendables = RCUTORTURE_MAX_EXTEND, |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 473 | .name = "rcu" |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 474 | }; |
| 475 | |
Paul E. McKenney | c32e066 | 2006-06-27 02:54:04 -0700 | [diff] [blame] | 476 | /* |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 477 | * Don't even think about trying any of these in real life!!! |
| 478 | * The names includes "busted", and they really means it! |
| 479 | * The only purpose of these functions is to provide a buggy RCU |
| 480 | * implementation to make sure that rcutorture correctly emits |
| 481 | * buggy-RCU error messages. |
| 482 | */ |
| 483 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) |
| 484 | { |
| 485 | /* This is a deliberate bug for testing purposes only! */ |
| 486 | rcu_torture_cb(&p->rtort_rcu); |
| 487 | } |
| 488 | |
| 489 | static void synchronize_rcu_busted(void) |
| 490 | { |
| 491 | /* This is a deliberate bug for testing purposes only! */ |
| 492 | } |
| 493 | |
| 494 | static void |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 495 | call_rcu_busted(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 496 | { |
| 497 | /* This is a deliberate bug for testing purposes only! */ |
| 498 | func(head); |
| 499 | } |
| 500 | |
| 501 | static struct rcu_torture_ops rcu_busted_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 502 | .ttype = INVALID_RCU_FLAVOR, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 503 | .init = rcu_sync_torture_init, |
| 504 | .readlock = rcu_torture_read_lock, |
| 505 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 506 | .readunlock = rcu_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 507 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 508 | .deferred_free = rcu_busted_torture_deferred_free, |
| 509 | .sync = synchronize_rcu_busted, |
| 510 | .exp_sync = synchronize_rcu_busted, |
| 511 | .call = call_rcu_busted, |
| 512 | .cb_barrier = NULL, |
| 513 | .fqs = NULL, |
| 514 | .stats = NULL, |
| 515 | .irq_capable = 1, |
Paul E. McKenney | b3c9831 | 2017-06-06 16:39:00 -0700 | [diff] [blame] | 516 | .name = "busted" |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 517 | }; |
| 518 | |
| 519 | /* |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 520 | * Definitions for srcu torture testing. |
| 521 | */ |
| 522 | |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 523 | DEFINE_STATIC_SRCU(srcu_ctl); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 524 | static struct srcu_struct srcu_ctld; |
| 525 | static struct srcu_struct *srcu_ctlp = &srcu_ctl; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 526 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 527 | static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 528 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 529 | return srcu_read_lock(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 530 | } |
| 531 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 532 | static void |
| 533 | srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 534 | { |
| 535 | long delay; |
| 536 | const long uspertick = 1000000 / HZ; |
| 537 | const long longdelay = 10; |
| 538 | |
| 539 | /* We want there to be long-running readers, but not all the time. */ |
| 540 | |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 541 | delay = torture_random(rrsp) % |
| 542 | (nrealreaders * 2 * longdelay * uspertick); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 543 | if (!delay && in_task()) { |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 544 | schedule_timeout_interruptible(longdelay); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 545 | rtrsp->rt_delay_jiffies = longdelay; |
| 546 | } else { |
| 547 | rcu_read_delay(rrsp, rtrsp); |
| 548 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 549 | } |
| 550 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 551 | static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 552 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 553 | srcu_read_unlock(srcu_ctlp, idx); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 554 | } |
| 555 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 556 | static unsigned long srcu_torture_completed(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 557 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 558 | return srcu_batches_completed(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 559 | } |
| 560 | |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 561 | static void srcu_torture_deferred_free(struct rcu_torture *rp) |
| 562 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 563 | call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 564 | } |
| 565 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 566 | static void srcu_torture_synchronize(void) |
| 567 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 568 | synchronize_srcu(srcu_ctlp); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 571 | static void srcu_torture_call(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 572 | rcu_callback_t func) |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 573 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 574 | call_srcu(srcu_ctlp, head, func); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 575 | } |
| 576 | |
| 577 | static void srcu_torture_barrier(void) |
| 578 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 579 | srcu_barrier(srcu_ctlp); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 580 | } |
| 581 | |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 582 | static void srcu_torture_stats(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 583 | { |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 584 | srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 585 | } |
| 586 | |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 587 | static void srcu_torture_synchronize_expedited(void) |
| 588 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 589 | synchronize_srcu_expedited(srcu_ctlp); |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 592 | static struct rcu_torture_ops srcu_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 593 | .ttype = SRCU_FLAVOR, |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 594 | .init = rcu_sync_torture_init, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 595 | .readlock = srcu_torture_read_lock, |
| 596 | .read_delay = srcu_read_delay, |
| 597 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 598 | .get_gp_seq = srcu_torture_completed, |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 599 | .deferred_free = srcu_torture_deferred_free, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 600 | .sync = srcu_torture_synchronize, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 601 | .exp_sync = srcu_torture_synchronize_expedited, |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 602 | .call = srcu_torture_call, |
| 603 | .cb_barrier = srcu_torture_barrier, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 604 | .stats = srcu_torture_stats, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 605 | .irq_capable = 1, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 606 | .name = "srcu" |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 607 | }; |
| 608 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 609 | static void srcu_torture_init(void) |
| 610 | { |
| 611 | rcu_sync_torture_init(); |
| 612 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 613 | srcu_ctlp = &srcu_ctld; |
| 614 | } |
| 615 | |
| 616 | static void srcu_torture_cleanup(void) |
| 617 | { |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 618 | cleanup_srcu_struct(&srcu_ctld); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 619 | srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ |
| 620 | } |
| 621 | |
| 622 | /* As above, but dynamically allocated. */ |
| 623 | static struct rcu_torture_ops srcud_ops = { |
| 624 | .ttype = SRCU_FLAVOR, |
| 625 | .init = srcu_torture_init, |
| 626 | .cleanup = srcu_torture_cleanup, |
| 627 | .readlock = srcu_torture_read_lock, |
| 628 | .read_delay = srcu_read_delay, |
| 629 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 630 | .get_gp_seq = srcu_torture_completed, |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 631 | .deferred_free = srcu_torture_deferred_free, |
| 632 | .sync = srcu_torture_synchronize, |
| 633 | .exp_sync = srcu_torture_synchronize_expedited, |
| 634 | .call = srcu_torture_call, |
| 635 | .cb_barrier = srcu_torture_barrier, |
| 636 | .stats = srcu_torture_stats, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 637 | .irq_capable = 1, |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 638 | .name = "srcud" |
| 639 | }; |
| 640 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 641 | /* As above, but broken due to inappropriate reader extension. */ |
| 642 | static struct rcu_torture_ops busted_srcud_ops = { |
| 643 | .ttype = SRCU_FLAVOR, |
| 644 | .init = srcu_torture_init, |
| 645 | .cleanup = srcu_torture_cleanup, |
| 646 | .readlock = srcu_torture_read_lock, |
| 647 | .read_delay = rcu_read_delay, |
| 648 | .readunlock = srcu_torture_read_unlock, |
| 649 | .get_gp_seq = srcu_torture_completed, |
| 650 | .deferred_free = srcu_torture_deferred_free, |
| 651 | .sync = srcu_torture_synchronize, |
| 652 | .exp_sync = srcu_torture_synchronize_expedited, |
| 653 | .call = srcu_torture_call, |
| 654 | .cb_barrier = srcu_torture_barrier, |
| 655 | .stats = srcu_torture_stats, |
| 656 | .irq_capable = 1, |
| 657 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 658 | .name = "busted_srcud" |
| 659 | }; |
| 660 | |
Josh Triplett | 4b6c2cc | 2006-10-04 02:17:16 -0700 | [diff] [blame] | 661 | /* |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 662 | * Definitions for RCU-tasks torture testing. |
| 663 | */ |
| 664 | |
| 665 | static int tasks_torture_read_lock(void) |
| 666 | { |
| 667 | return 0; |
| 668 | } |
| 669 | |
| 670 | static void tasks_torture_read_unlock(int idx) |
| 671 | { |
| 672 | } |
| 673 | |
| 674 | static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) |
| 675 | { |
| 676 | call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); |
| 677 | } |
| 678 | |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 679 | static void synchronize_rcu_mult_test(void) |
| 680 | { |
| 681 | synchronize_rcu_mult(call_rcu_tasks, call_rcu); |
| 682 | } |
| 683 | |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 684 | static struct rcu_torture_ops tasks_ops = { |
| 685 | .ttype = RCU_TASKS_FLAVOR, |
| 686 | .init = rcu_sync_torture_init, |
| 687 | .readlock = tasks_torture_read_lock, |
| 688 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 689 | .readunlock = tasks_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 690 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 691 | .deferred_free = rcu_tasks_torture_deferred_free, |
| 692 | .sync = synchronize_rcu_tasks, |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 693 | .exp_sync = synchronize_rcu_mult_test, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 694 | .call = call_rcu_tasks, |
| 695 | .cb_barrier = rcu_barrier_tasks, |
| 696 | .fqs = NULL, |
| 697 | .stats = NULL, |
| 698 | .irq_capable = 1, |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 699 | .slow_gps = 1, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 700 | .name = "tasks" |
| 701 | }; |
| 702 | |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 703 | /* |
| 704 | * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. |
| 705 | * This implementation does not necessarily work well with CPU hotplug. |
| 706 | */ |
| 707 | |
| 708 | static void synchronize_rcu_trivial(void) |
| 709 | { |
| 710 | int cpu; |
| 711 | |
| 712 | for_each_online_cpu(cpu) { |
| 713 | rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); |
| 714 | WARN_ON_ONCE(raw_smp_processor_id() != cpu); |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | static int rcu_torture_read_lock_trivial(void) __acquires(RCU) |
| 719 | { |
| 720 | preempt_disable(); |
| 721 | return 0; |
| 722 | } |
| 723 | |
| 724 | static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) |
| 725 | { |
| 726 | preempt_enable(); |
| 727 | } |
| 728 | |
| 729 | static struct rcu_torture_ops trivial_ops = { |
| 730 | .ttype = RCU_TRIVIAL_FLAVOR, |
| 731 | .init = rcu_sync_torture_init, |
| 732 | .readlock = rcu_torture_read_lock_trivial, |
| 733 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 734 | .readunlock = rcu_torture_read_unlock_trivial, |
| 735 | .get_gp_seq = rcu_no_completed, |
| 736 | .sync = synchronize_rcu_trivial, |
| 737 | .exp_sync = synchronize_rcu_trivial, |
| 738 | .fqs = NULL, |
| 739 | .stats = NULL, |
| 740 | .irq_capable = 1, |
| 741 | .name = "trivial" |
| 742 | }; |
| 743 | |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 744 | /* |
| 745 | * Definitions for rude RCU-tasks torture testing. |
| 746 | */ |
| 747 | |
| 748 | static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) |
| 749 | { |
| 750 | call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); |
| 751 | } |
| 752 | |
| 753 | static struct rcu_torture_ops tasks_rude_ops = { |
| 754 | .ttype = RCU_TASKS_RUDE_FLAVOR, |
| 755 | .init = rcu_sync_torture_init, |
| 756 | .readlock = rcu_torture_read_lock_trivial, |
| 757 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 758 | .readunlock = rcu_torture_read_unlock_trivial, |
| 759 | .get_gp_seq = rcu_no_completed, |
| 760 | .deferred_free = rcu_tasks_rude_torture_deferred_free, |
| 761 | .sync = synchronize_rcu_tasks_rude, |
| 762 | .exp_sync = synchronize_rcu_tasks_rude, |
| 763 | .call = call_rcu_tasks_rude, |
| 764 | .cb_barrier = rcu_barrier_tasks_rude, |
| 765 | .fqs = NULL, |
| 766 | .stats = NULL, |
| 767 | .irq_capable = 1, |
| 768 | .name = "tasks-rude" |
| 769 | }; |
| 770 | |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 771 | /* |
| 772 | * Definitions for tracing RCU-tasks torture testing. |
| 773 | */ |
| 774 | |
| 775 | static int tasks_tracing_torture_read_lock(void) |
| 776 | { |
| 777 | rcu_read_lock_trace(); |
| 778 | return 0; |
| 779 | } |
| 780 | |
| 781 | static void tasks_tracing_torture_read_unlock(int idx) |
| 782 | { |
| 783 | rcu_read_unlock_trace(); |
| 784 | } |
| 785 | |
| 786 | static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) |
| 787 | { |
| 788 | call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); |
| 789 | } |
| 790 | |
| 791 | static struct rcu_torture_ops tasks_tracing_ops = { |
| 792 | .ttype = RCU_TASKS_TRACING_FLAVOR, |
| 793 | .init = rcu_sync_torture_init, |
| 794 | .readlock = tasks_tracing_torture_read_lock, |
| 795 | .read_delay = srcu_read_delay, /* just reuse srcu's version. */ |
| 796 | .readunlock = tasks_tracing_torture_read_unlock, |
| 797 | .get_gp_seq = rcu_no_completed, |
| 798 | .deferred_free = rcu_tasks_tracing_torture_deferred_free, |
| 799 | .sync = synchronize_rcu_tasks_trace, |
| 800 | .exp_sync = synchronize_rcu_tasks_trace, |
| 801 | .call = call_rcu_tasks_trace, |
| 802 | .cb_barrier = rcu_barrier_tasks_trace, |
| 803 | .fqs = NULL, |
| 804 | .stats = NULL, |
| 805 | .irq_capable = 1, |
| 806 | .slow_gps = 1, |
| 807 | .name = "tasks-tracing" |
| 808 | }; |
| 809 | |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 810 | static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) |
| 811 | { |
| 812 | if (!cur_ops->gp_diff) |
| 813 | return new - old; |
| 814 | return cur_ops->gp_diff(new, old); |
| 815 | } |
| 816 | |
Paul E. McKenney | 5be5d1a | 2015-06-30 08:57:57 -0700 | [diff] [blame] | 817 | static bool __maybe_unused torturing_tasks(void) |
| 818 | { |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 819 | return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; |
Paul E. McKenney | 5be5d1a | 2015-06-30 08:57:57 -0700 | [diff] [blame] | 820 | } |
| 821 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 822 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 823 | * RCU torture priority-boost testing. Runs one real-time thread per |
| 824 | * CPU for moderate bursts, repeatedly registering RCU callbacks and |
| 825 | * spinning waiting for them to be invoked. If a given callback takes |
| 826 | * too long to be invoked, we assume that priority inversion has occurred. |
| 827 | */ |
| 828 | |
| 829 | struct rcu_boost_inflight { |
| 830 | struct rcu_head rcu; |
| 831 | int inflight; |
| 832 | }; |
| 833 | |
| 834 | static void rcu_torture_boost_cb(struct rcu_head *head) |
| 835 | { |
| 836 | struct rcu_boost_inflight *rbip = |
| 837 | container_of(head, struct rcu_boost_inflight, rcu); |
| 838 | |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 839 | /* Ensure RCU-core accesses precede clearing ->inflight */ |
| 840 | smp_store_release(&rbip->inflight, 0); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 841 | } |
| 842 | |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 843 | static int old_rt_runtime = -1; |
| 844 | |
| 845 | static void rcu_torture_disable_rt_throttle(void) |
| 846 | { |
| 847 | /* |
| 848 | * Disable RT throttling so that rcutorture's boost threads don't get |
| 849 | * throttled. Only possible if rcutorture is built-in otherwise the |
| 850 | * user should manually do this by setting the sched_rt_period_us and |
| 851 | * sched_rt_runtime sysctls. |
| 852 | */ |
| 853 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) |
| 854 | return; |
| 855 | |
| 856 | old_rt_runtime = sysctl_sched_rt_runtime; |
| 857 | sysctl_sched_rt_runtime = -1; |
| 858 | } |
| 859 | |
| 860 | static void rcu_torture_enable_rt_throttle(void) |
| 861 | { |
| 862 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) |
| 863 | return; |
| 864 | |
| 865 | sysctl_sched_rt_runtime = old_rt_runtime; |
| 866 | old_rt_runtime = -1; |
| 867 | } |
| 868 | |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 869 | static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) |
| 870 | { |
| 871 | if (end - start > test_boost_duration * HZ - HZ / 2) { |
| 872 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
| 873 | n_rcu_torture_boost_failure++; |
| 874 | |
| 875 | return true; /* failed */ |
| 876 | } |
| 877 | |
| 878 | return false; /* passed */ |
| 879 | } |
| 880 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 881 | static int rcu_torture_boost(void *arg) |
| 882 | { |
| 883 | unsigned long call_rcu_time; |
| 884 | unsigned long endtime; |
| 885 | unsigned long oldstarttime; |
| 886 | struct rcu_boost_inflight rbi = { .inflight = 0 }; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 887 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 888 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 889 | |
| 890 | /* Set real-time priority. */ |
Peter Zijlstra | 8b70098 | 2020-04-22 13:10:04 +0200 | [diff] [blame] | 891 | sched_set_fifo_low(current); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 892 | |
Paul E. McKenney | 561190e | 2011-03-30 09:10:44 -0700 | [diff] [blame] | 893 | init_rcu_head_on_stack(&rbi.rcu); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 894 | /* Each pass through the following loop does one boost-test cycle. */ |
| 895 | do { |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 896 | /* Track if the test failed already in this test interval? */ |
| 897 | bool failed = false; |
| 898 | |
| 899 | /* Increment n_rcu_torture_boosts once per boost-test */ |
| 900 | while (!kthread_should_stop()) { |
| 901 | if (mutex_trylock(&boost_mutex)) { |
| 902 | n_rcu_torture_boosts++; |
| 903 | mutex_unlock(&boost_mutex); |
| 904 | break; |
| 905 | } |
| 906 | schedule_timeout_uninterruptible(1); |
| 907 | } |
| 908 | if (kthread_should_stop()) |
| 909 | goto checkwait; |
| 910 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 911 | /* Wait for the next test interval. */ |
| 912 | oldstarttime = boost_starttime; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 913 | while (time_before(jiffies, oldstarttime)) { |
Paul E. McKenney | 0e11c8e | 2013-01-10 16:21:07 -0800 | [diff] [blame] | 914 | schedule_timeout_interruptible(oldstarttime - jiffies); |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 915 | stutter_wait("rcu_torture_boost"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 916 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 917 | goto checkwait; |
| 918 | } |
| 919 | |
| 920 | /* Do one boost-test interval. */ |
| 921 | endtime = oldstarttime + test_boost_duration * HZ; |
| 922 | call_rcu_time = jiffies; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 923 | while (time_before(jiffies, endtime)) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 924 | /* If we don't have a callback in flight, post one. */ |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 925 | if (!smp_load_acquire(&rbi.inflight)) { |
| 926 | /* RCU core before ->inflight = 1. */ |
| 927 | smp_store_release(&rbi.inflight, 1); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 928 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 929 | /* Check if the boost test failed */ |
| 930 | failed = failed || |
| 931 | rcu_torture_boost_failed(call_rcu_time, |
| 932 | jiffies); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 933 | call_rcu_time = jiffies; |
| 934 | } |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 935 | stutter_wait("rcu_torture_boost"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 936 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 937 | goto checkwait; |
| 938 | } |
| 939 | |
| 940 | /* |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 941 | * If boost never happened, then inflight will always be 1, in |
| 942 | * this case the boost check would never happen in the above |
| 943 | * loop so do another one here. |
| 944 | */ |
| 945 | if (!failed && smp_load_acquire(&rbi.inflight)) |
| 946 | rcu_torture_boost_failed(call_rcu_time, jiffies); |
| 947 | |
| 948 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 949 | * Set the start time of the next test interval. |
| 950 | * Yes, this is vulnerable to long delays, but such |
| 951 | * delays simply cause a false negative for the next |
| 952 | * interval. Besides, we are running at RT priority, |
| 953 | * so delays should be relatively rare. |
| 954 | */ |
Paul E. McKenney | ab8f11e | 2011-08-18 09:30:32 -0700 | [diff] [blame] | 955 | while (oldstarttime == boost_starttime && |
| 956 | !kthread_should_stop()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 957 | if (mutex_trylock(&boost_mutex)) { |
| 958 | boost_starttime = jiffies + |
| 959 | test_boost_interval * HZ; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 960 | mutex_unlock(&boost_mutex); |
| 961 | break; |
| 962 | } |
| 963 | schedule_timeout_uninterruptible(1); |
| 964 | } |
| 965 | |
| 966 | /* Go do the stutter. */ |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 967 | checkwait: stutter_wait("rcu_torture_boost"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 968 | } while (!torture_must_stop()); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 969 | |
| 970 | /* Clean up and exit. */ |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 971 | while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 972 | torture_shutdown_absorb("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 973 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 974 | } |
Paul E. McKenney | 9d68197 | 2011-06-21 01:48:03 -0700 | [diff] [blame] | 975 | destroy_rcu_head_on_stack(&rbi.rcu); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 976 | torture_kthread_stopping("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 977 | return 0; |
| 978 | } |
| 979 | |
| 980 | /* |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 981 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
| 982 | * bursts of calls to force_quiescent_state(), increasing the probability |
| 983 | * of occurrence of some important types of race conditions. |
| 984 | */ |
| 985 | static int |
| 986 | rcu_torture_fqs(void *arg) |
| 987 | { |
| 988 | unsigned long fqs_resume_time; |
| 989 | int fqs_burst_remaining; |
| 990 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 991 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 992 | do { |
| 993 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 994 | while (time_before(jiffies, fqs_resume_time) && |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 995 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 996 | schedule_timeout_interruptible(1); |
| 997 | } |
| 998 | fqs_burst_remaining = fqs_duration; |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 999 | while (fqs_burst_remaining > 0 && |
| 1000 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1001 | cur_ops->fqs(); |
| 1002 | udelay(fqs_holdoff); |
| 1003 | fqs_burst_remaining -= fqs_holdoff; |
| 1004 | } |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1005 | stutter_wait("rcu_torture_fqs"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1006 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1007 | torture_kthread_stopping("rcu_torture_fqs"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1008 | return 0; |
| 1009 | } |
| 1010 | |
| 1011 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1012 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
| 1013 | * for that pointed to by rcu_torture_current, freeing the old structure |
| 1014 | * after a series of grace periods (the "pipeline"). |
| 1015 | */ |
| 1016 | static int |
| 1017 | rcu_torture_writer(void *arg) |
| 1018 | { |
Paul E. McKenney | 9efafb8 | 2015-12-31 18:11:47 -0800 | [diff] [blame] | 1019 | bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1020 | int expediting = 0; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1021 | unsigned long gp_snap; |
| 1022 | bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1023 | bool gp_sync1 = gp_sync; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1024 | int i; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1025 | struct rcu_torture *rp; |
| 1026 | struct rcu_torture *old_rp; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1027 | static DEFINE_TORTURE_RANDOM(rand); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1028 | int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, |
| 1029 | RTWS_COND_GET, RTWS_SYNC }; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1030 | int nsynctypes = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1031 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1032 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1033 | if (!can_expedite) |
Paul E. McKenney | aa5a898 | 2015-12-31 16:27:06 -0800 | [diff] [blame] | 1034 | pr_alert("%s" TORTURE_FLAG |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1035 | " GP expediting controlled from boot/sysfs for %s.\n", |
Paul E. McKenney | aa5a898 | 2015-12-31 16:27:06 -0800 | [diff] [blame] | 1036 | torture_type, cur_ops->name); |
Ingo Molnar | dbdf65b | 2005-11-13 16:07:22 -0800 | [diff] [blame] | 1037 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1038 | /* Initialize synctype[] array. If none set, take default. */ |
Paul E. McKenney | c136f99 | 2015-02-19 12:15:19 -0800 | [diff] [blame] | 1039 | if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1040 | gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1041 | if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1042 | synctype[nsynctypes++] = RTWS_COND_GET; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1043 | pr_info("%s: Testing conditional GPs.\n", __func__); |
| 1044 | } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1045 | pr_alert("%s: gp_cond without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1046 | } |
| 1047 | if (gp_exp1 && cur_ops->exp_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1048 | synctype[nsynctypes++] = RTWS_EXP_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1049 | pr_info("%s: Testing expedited GPs.\n", __func__); |
| 1050 | } else if (gp_exp && !cur_ops->exp_sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1051 | pr_alert("%s: gp_exp without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1052 | } |
| 1053 | if (gp_normal1 && cur_ops->deferred_free) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1054 | synctype[nsynctypes++] = RTWS_DEF_FREE; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1055 | pr_info("%s: Testing asynchronous GPs.\n", __func__); |
| 1056 | } else if (gp_normal && !cur_ops->deferred_free) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1057 | pr_alert("%s: gp_normal without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1058 | } |
| 1059 | if (gp_sync1 && cur_ops->sync) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1060 | synctype[nsynctypes++] = RTWS_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1061 | pr_info("%s: Testing normal GPs.\n", __func__); |
| 1062 | } else if (gp_sync && !cur_ops->sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1063 | pr_alert("%s: gp_sync without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1064 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1065 | if (WARN_ONCE(nsynctypes == 0, |
| 1066 | "rcu_torture_writer: No update-side primitives.\n")) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1067 | /* |
| 1068 | * No updates primitives, so don't try updating. |
| 1069 | * The resulting test won't be testing much, hence the |
| 1070 | * above WARN_ONCE(). |
| 1071 | */ |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1072 | rcu_torture_writer_state = RTWS_STOPPING; |
| 1073 | torture_kthread_stopping("rcu_torture_writer"); |
| 1074 | } |
| 1075 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1076 | do { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1077 | rcu_torture_writer_state = RTWS_FIXED_DELAY; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1078 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 1079 | rp = rcu_torture_alloc(); |
| 1080 | if (rp == NULL) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1081 | continue; |
| 1082 | rp->rtort_pipe_count = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1083 | rcu_torture_writer_state = RTWS_DELAY; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1084 | udelay(torture_random(&rand) & 0x3ff); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1085 | rcu_torture_writer_state = RTWS_REPLACE; |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 1086 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 1087 | current == writer_task); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1088 | rp->rtort_mbtest = 1; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1089 | rcu_assign_pointer(rcu_torture_current, rp); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 1090 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 1091 | if (old_rp) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1092 | i = old_rp->rtort_pipe_count; |
| 1093 | if (i > RCU_TORTURE_PIPE_LEN) |
| 1094 | i = RCU_TORTURE_PIPE_LEN; |
| 1095 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1096 | WRITE_ONCE(old_rp->rtort_pipe_count, |
| 1097 | old_rp->rtort_pipe_count + 1); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1098 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
| 1099 | case RTWS_DEF_FREE: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1100 | rcu_torture_writer_state = RTWS_DEF_FREE; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1101 | cur_ops->deferred_free(old_rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1102 | break; |
| 1103 | case RTWS_EXP_SYNC: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1104 | rcu_torture_writer_state = RTWS_EXP_SYNC; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1105 | cur_ops->exp_sync(); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1106 | rcu_torture_pipe_update(old_rp); |
| 1107 | break; |
| 1108 | case RTWS_COND_GET: |
| 1109 | rcu_torture_writer_state = RTWS_COND_GET; |
| 1110 | gp_snap = cur_ops->get_state(); |
| 1111 | i = torture_random(&rand) % 16; |
| 1112 | if (i != 0) |
| 1113 | schedule_timeout_interruptible(i); |
| 1114 | udelay(torture_random(&rand) % 1000); |
| 1115 | rcu_torture_writer_state = RTWS_COND_SYNC; |
| 1116 | cur_ops->cond_sync(gp_snap); |
| 1117 | rcu_torture_pipe_update(old_rp); |
| 1118 | break; |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1119 | case RTWS_SYNC: |
| 1120 | rcu_torture_writer_state = RTWS_SYNC; |
| 1121 | cur_ops->sync(); |
| 1122 | rcu_torture_pipe_update(old_rp); |
| 1123 | break; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1124 | default: |
| 1125 | WARN_ON_ONCE(1); |
| 1126 | break; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1127 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1128 | } |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1129 | WRITE_ONCE(rcu_torture_current_version, |
| 1130 | rcu_torture_current_version + 1); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1131 | /* Cycle through nesting levels of rcu_expedite_gp() calls. */ |
| 1132 | if (can_expedite && |
| 1133 | !(torture_random(&rand) & 0xff & (!!expediting - 1))) { |
| 1134 | WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); |
| 1135 | if (expediting >= 0) |
| 1136 | rcu_expedite_gp(); |
| 1137 | else |
| 1138 | rcu_unexpedite_gp(); |
| 1139 | if (++expediting > 3) |
| 1140 | expediting = -expediting; |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1141 | } else if (!can_expedite) { /* Disabled during boot, recheck. */ |
| 1142 | can_expedite = !rcu_gp_is_expedited() && |
| 1143 | !rcu_gp_is_normal(); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1144 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1145 | rcu_torture_writer_state = RTWS_STUTTER; |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1146 | if (stutter_wait("rcu_torture_writer") && |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 1147 | !READ_ONCE(rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 3432d76 | 2019-04-15 14:50:05 -0700 | [diff] [blame] | 1148 | !cur_ops->slow_gps && |
Paul E. McKenney | 59ee032 | 2019-11-28 18:54:06 -0800 | [diff] [blame] | 1149 | !torture_must_stop() && |
| 1150 | rcu_inkernel_boot_has_ended()) |
Paul E. McKenney | 474e59b | 2018-08-07 14:34:44 -0700 | [diff] [blame] | 1151 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1152 | if (list_empty(&rcu_tortures[i].rtort_free) && |
| 1153 | rcu_access_pointer(rcu_torture_current) != |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1154 | &rcu_tortures[i]) { |
| 1155 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1156 | WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1157 | } |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1158 | } while (!torture_must_stop()); |
Paul E. McKenney | cae7cc6 | 2020-04-26 19:20:37 -0700 | [diff] [blame] | 1159 | rcu_torture_current = NULL; // Let stats task know that we are done. |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1160 | /* Reset expediting back to unexpedited. */ |
| 1161 | if (expediting > 0) |
| 1162 | expediting = -expediting; |
| 1163 | while (can_expedite && expediting++ < 0) |
| 1164 | rcu_unexpedite_gp(); |
| 1165 | WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1166 | if (!can_expedite) |
| 1167 | pr_alert("%s" TORTURE_FLAG |
| 1168 | " Dynamic grace-period expediting was disabled.\n", |
| 1169 | torture_type); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1170 | rcu_torture_writer_state = RTWS_STOPPING; |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1171 | torture_kthread_stopping("rcu_torture_writer"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1172 | return 0; |
| 1173 | } |
| 1174 | |
| 1175 | /* |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1176 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random |
| 1177 | * delay between calls. |
| 1178 | */ |
| 1179 | static int |
| 1180 | rcu_torture_fakewriter(void *arg) |
| 1181 | { |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1182 | DEFINE_TORTURE_RANDOM(rand); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1183 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1184 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1185 | set_user_nice(current, MAX_NICE); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1186 | |
| 1187 | do { |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1188 | schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); |
| 1189 | udelay(torture_random(&rand) & 0x3ff); |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1190 | if (cur_ops->cb_barrier != NULL && |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1191 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1192 | cur_ops->cb_barrier(); |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1193 | } else if (gp_normal == gp_exp) { |
Paul E. McKenney | eb03399 | 2017-12-08 10:48:41 -0800 | [diff] [blame] | 1194 | if (cur_ops->sync && torture_random(&rand) & 0x80) |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1195 | cur_ops->sync(); |
Paul E. McKenney | eb03399 | 2017-12-08 10:48:41 -0800 | [diff] [blame] | 1196 | else if (cur_ops->exp_sync) |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1197 | cur_ops->exp_sync(); |
Paul E. McKenney | eb03399 | 2017-12-08 10:48:41 -0800 | [diff] [blame] | 1198 | } else if (gp_normal && cur_ops->sync) { |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1199 | cur_ops->sync(); |
Paul E. McKenney | eb03399 | 2017-12-08 10:48:41 -0800 | [diff] [blame] | 1200 | } else if (cur_ops->exp_sync) { |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1201 | cur_ops->exp_sync(); |
| 1202 | } |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1203 | stutter_wait("rcu_torture_fakewriter"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1204 | } while (!torture_must_stop()); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1205 | |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1206 | torture_kthread_stopping("rcu_torture_fakewriter"); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1207 | return 0; |
| 1208 | } |
| 1209 | |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1210 | static void rcu_torture_timer_cb(struct rcu_head *rhp) |
| 1211 | { |
| 1212 | kfree(rhp); |
| 1213 | } |
| 1214 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1215 | /* |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1216 | * Do one extension of an RCU read-side critical section using the |
| 1217 | * current reader state in readstate (set to zero for initial entry |
| 1218 | * to extended critical section), set the new state as specified by |
| 1219 | * newstate (set to zero for final exit from extended critical section), |
| 1220 | * and random-number-generator state in trsp. If this is neither the |
| 1221 | * beginning or end of the critical section and if there was actually a |
| 1222 | * change, do a ->read_delay(). |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1223 | */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1224 | static void rcutorture_one_extend(int *readstate, int newstate, |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1225 | struct torture_random_state *trsp, |
| 1226 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1227 | { |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1228 | unsigned long flags; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1229 | int idxnew = -1; |
| 1230 | int idxold = *readstate; |
| 1231 | int statesnew = ~*readstate & newstate; |
| 1232 | int statesold = *readstate & ~newstate; |
| 1233 | |
| 1234 | WARN_ON_ONCE(idxold < 0); |
| 1235 | WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1236 | rtrsp->rt_readstate = newstate; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1237 | |
| 1238 | /* First, put new protection in place to avoid critical-section gap. */ |
| 1239 | if (statesnew & RCUTORTURE_RDR_BH) |
| 1240 | local_bh_disable(); |
| 1241 | if (statesnew & RCUTORTURE_RDR_IRQ) |
| 1242 | local_irq_disable(); |
| 1243 | if (statesnew & RCUTORTURE_RDR_PREEMPT) |
| 1244 | preempt_disable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1245 | if (statesnew & RCUTORTURE_RDR_RBH) |
| 1246 | rcu_read_lock_bh(); |
| 1247 | if (statesnew & RCUTORTURE_RDR_SCHED) |
| 1248 | rcu_read_lock_sched(); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1249 | if (statesnew & RCUTORTURE_RDR_RCU) |
| 1250 | idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; |
| 1251 | |
| 1252 | /* Next, remove old protection, irq first due to bh conflict. */ |
| 1253 | if (statesold & RCUTORTURE_RDR_IRQ) |
| 1254 | local_irq_enable(); |
| 1255 | if (statesold & RCUTORTURE_RDR_BH) |
| 1256 | local_bh_enable(); |
| 1257 | if (statesold & RCUTORTURE_RDR_PREEMPT) |
| 1258 | preempt_enable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1259 | if (statesold & RCUTORTURE_RDR_RBH) |
| 1260 | rcu_read_unlock_bh(); |
| 1261 | if (statesold & RCUTORTURE_RDR_SCHED) |
| 1262 | rcu_read_unlock_sched(); |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1263 | if (statesold & RCUTORTURE_RDR_RCU) { |
| 1264 | bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); |
| 1265 | |
| 1266 | if (lockit) |
| 1267 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1268 | cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1269 | if (lockit) |
| 1270 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 1271 | } |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1272 | |
| 1273 | /* Delay if neither beginning nor end and there was a change. */ |
| 1274 | if ((statesnew || statesold) && *readstate && newstate) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1275 | cur_ops->read_delay(trsp, rtrsp); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1276 | |
| 1277 | /* Update the reader state. */ |
| 1278 | if (idxnew == -1) |
| 1279 | idxnew = idxold & ~RCUTORTURE_RDR_MASK; |
| 1280 | WARN_ON_ONCE(idxnew < 0); |
| 1281 | WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); |
| 1282 | *readstate = idxnew | newstate; |
| 1283 | WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); |
| 1284 | WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); |
| 1285 | } |
| 1286 | |
| 1287 | /* Return the biggest extendables mask given current RCU and boot parameters. */ |
| 1288 | static int rcutorture_extend_mask_max(void) |
| 1289 | { |
| 1290 | int mask; |
| 1291 | |
| 1292 | WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); |
| 1293 | mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; |
| 1294 | mask = mask | RCUTORTURE_RDR_RCU; |
| 1295 | return mask; |
| 1296 | } |
| 1297 | |
| 1298 | /* Return a random protection state mask, but with at least one bit set. */ |
| 1299 | static int |
| 1300 | rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) |
| 1301 | { |
| 1302 | int mask = rcutorture_extend_mask_max(); |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1303 | unsigned long randmask1 = torture_random(trsp) >> 8; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1304 | unsigned long randmask2 = randmask1 >> 3; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1305 | |
| 1306 | WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); |
Paul E. McKenney | a3b0e1e5 | 2019-02-28 15:06:13 -0800 | [diff] [blame] | 1307 | /* Mostly only one bit (need preemption!), sometimes lots of bits. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1308 | if (!(randmask1 & 0x7)) |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1309 | mask = mask & randmask2; |
| 1310 | else |
| 1311 | mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1312 | /* Can't enable bh w/irq disabled. */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1313 | if ((mask & RCUTORTURE_RDR_IRQ) && |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1314 | ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || |
| 1315 | (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) |
| 1316 | mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1317 | return mask ?: RCUTORTURE_RDR_RCU; |
| 1318 | } |
| 1319 | |
| 1320 | /* |
| 1321 | * Do a randomly selected number of extensions of an existing RCU read-side |
| 1322 | * critical section. |
| 1323 | */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1324 | static struct rt_read_seg * |
| 1325 | rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, |
| 1326 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1327 | { |
| 1328 | int i; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1329 | int j; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1330 | int mask = rcutorture_extend_mask_max(); |
| 1331 | |
| 1332 | WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ |
| 1333 | if (!((mask - 1) & mask)) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1334 | return rtrsp; /* Current RCU reader not extendable. */ |
| 1335 | /* Bias towards larger numbers of loops. */ |
| 1336 | i = (torture_random(trsp) >> 3); |
| 1337 | i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; |
| 1338 | for (j = 0; j < i; j++) { |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1339 | mask = rcutorture_extend_mask(*readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1340 | rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1341 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1342 | return &rtrsp[j]; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1343 | } |
| 1344 | |
| 1345 | /* |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1346 | * Do one read-side critical section, returning false if there was |
| 1347 | * no data to read. Can be invoked both from process context and |
| 1348 | * from a timer handler. |
| 1349 | */ |
| 1350 | static bool rcu_torture_one_read(struct torture_random_state *trsp) |
| 1351 | { |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1352 | int i; |
Paul E. McKenney | 917963d | 2014-11-21 17:10:16 -0800 | [diff] [blame] | 1353 | unsigned long started; |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 1354 | unsigned long completed; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1355 | int newstate; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1356 | struct rcu_torture *p; |
| 1357 | int pipe_count; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1358 | int readstate = 0; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1359 | struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; |
| 1360 | struct rt_read_seg *rtrsp = &rtseg[0]; |
| 1361 | struct rt_read_seg *rtrsp1; |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1362 | unsigned long long ts; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1363 | |
Paul E. McKenney | 7752275 | 2020-06-11 16:43:14 -0700 | [diff] [blame] | 1364 | WARN_ON_ONCE(!rcu_is_watching()); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1365 | newstate = rcutorture_extend_mask(readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1366 | rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1367 | started = cur_ops->get_gp_seq(); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 1368 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 1369 | p = rcu_dereference_check(rcu_torture_current, |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 1370 | rcu_read_lock_bh_held() || |
| 1371 | rcu_read_lock_sched_held() || |
Paul E. McKenney | 5be5d1a | 2015-06-30 08:57:57 -0700 | [diff] [blame] | 1372 | srcu_read_lock_held(srcu_ctlp) || |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 1373 | rcu_read_lock_trace_held() || |
Paul E. McKenney | 5be5d1a | 2015-06-30 08:57:57 -0700 | [diff] [blame] | 1374 | torturing_tasks()); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1375 | if (p == NULL) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1376 | /* Wait for rcu_torture_writer to get underway */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1377 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1378 | return false; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1379 | } |
| 1380 | if (p->rtort_mbtest == 0) |
| 1381 | atomic_inc(&n_rcu_torture_mberror); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1382 | rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1383 | preempt_disable(); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1384 | pipe_count = READ_ONCE(p->rtort_pipe_count); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1385 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
| 1386 | /* Should not happen, but... */ |
| 1387 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 1388 | } |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1389 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1390 | if (pipe_count > 1) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1391 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
| 1392 | ts, started, completed); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1393 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1394 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1395 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 1396 | completed = rcutorture_seq_diff(completed, started); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1397 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 1398 | /* Should not happen, but... */ |
| 1399 | completed = RCU_TORTURE_PIPE_LEN; |
| 1400 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1401 | __this_cpu_inc(rcu_torture_batch[completed]); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1402 | preempt_enable(); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1403 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1404 | WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame^] | 1405 | // This next splat is expected behavior if leakpointer, especially |
| 1406 | // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. |
| 1407 | WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1408 | |
| 1409 | /* If error or close call, record the sequence of reader protections. */ |
| 1410 | if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { |
| 1411 | i = 0; |
| 1412 | for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) |
| 1413 | err_segs[i++] = *rtrsp1; |
| 1414 | rt_read_nsegs = i; |
| 1415 | } |
| 1416 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1417 | return true; |
| 1418 | } |
| 1419 | |
Paul E. McKenney | 3025520e | 2018-05-22 11:38:47 -0700 | [diff] [blame] | 1420 | static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); |
| 1421 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1422 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1423 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, |
| 1424 | * incrementing the corresponding element of the pipeline array. The |
| 1425 | * counter in the element should never be greater than 1, otherwise, the |
| 1426 | * RCU implementation is broken. |
| 1427 | */ |
| 1428 | static void rcu_torture_timer(struct timer_list *unused) |
| 1429 | { |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1430 | atomic_long_inc(&n_rcu_torture_timers); |
Paul E. McKenney | 241b425 | 2018-05-22 11:59:31 -0700 | [diff] [blame] | 1431 | (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1432 | |
| 1433 | /* Test call_rcu() invocation from interrupt handler. */ |
| 1434 | if (cur_ops->call) { |
| 1435 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); |
| 1436 | |
| 1437 | if (rhp) |
| 1438 | cur_ops->call(rhp, rcu_torture_timer_cb); |
| 1439 | } |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1440 | } |
| 1441 | |
| 1442 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1443 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, |
| 1444 | * incrementing the corresponding element of the pipeline array. The |
| 1445 | * counter in the element should never be greater than 1, otherwise, the |
| 1446 | * RCU implementation is broken. |
| 1447 | */ |
| 1448 | static int |
| 1449 | rcu_torture_reader(void *arg) |
| 1450 | { |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1451 | unsigned long lastsleep = jiffies; |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1452 | long myid = (long)arg; |
| 1453 | int mynumonline = myid; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1454 | DEFINE_TORTURE_RANDOM(rand); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1455 | struct timer_list t; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1456 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1457 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1458 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1459 | if (irqreader && cur_ops->irq_capable) |
Kees Cook | fd30b71 | 2017-10-22 17:58:54 -0700 | [diff] [blame] | 1460 | timer_setup_on_stack(&t, rcu_torture_timer, 0); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1461 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1462 | do { |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1463 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1464 | if (!timer_pending(&t)) |
Paul E. McKenney | 6155fec | 2010-02-22 17:05:04 -0800 | [diff] [blame] | 1465 | mod_timer(&t, jiffies + 1); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1466 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1467 | if (!rcu_torture_one_read(&rand) && !torture_must_stop()) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1468 | schedule_timeout_interruptible(HZ); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1469 | if (time_after(jiffies, lastsleep) && !torture_must_stop()) { |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1470 | schedule_timeout_interruptible(1); |
| 1471 | lastsleep = jiffies + 10; |
| 1472 | } |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1473 | while (num_online_cpus() < mynumonline && !torture_must_stop()) |
| 1474 | schedule_timeout_interruptible(HZ / 5); |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1475 | stutter_wait("rcu_torture_reader"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1476 | } while (!torture_must_stop()); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1477 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1478 | del_timer_sync(&t); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1479 | destroy_timer_on_stack(&t); |
| 1480 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1481 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1482 | torture_kthread_stopping("rcu_torture_reader"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1483 | return 0; |
| 1484 | } |
| 1485 | |
| 1486 | /* |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1487 | * Print torture statistics. Caller must ensure that there is only |
| 1488 | * one call to this function at a given time!!! This is normally |
| 1489 | * accomplished by relying on the module system to only have one copy |
| 1490 | * of the module loaded, and then by giving the rcu_torture_stats |
| 1491 | * kthread full control (or the init/cleanup functions when rcu_torture_stats |
| 1492 | * thread is not running). |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1493 | */ |
Chen Gang | d100895 | 2013-11-07 10:30:25 +0800 | [diff] [blame] | 1494 | static void |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1495 | rcu_torture_stats_print(void) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1496 | { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1497 | int cpu; |
| 1498 | int i; |
| 1499 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
| 1500 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1501 | struct rcu_torture *rtcp; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1502 | static unsigned long rtcv_snap = ULONG_MAX; |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 1503 | static bool splatted; |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 1504 | struct task_struct *wtp; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1505 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 1506 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1507 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Paul E. McKenney | f042a43 | 2020-01-03 16:27:00 -0800 | [diff] [blame] | 1508 | pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); |
| 1509 | batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1510 | } |
| 1511 | } |
| 1512 | for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { |
| 1513 | if (pipesummary[i] != 0) |
| 1514 | break; |
| 1515 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1516 | |
| 1517 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1518 | rtcp = rcu_access_pointer(rcu_torture_current); |
Paul E. McKenney | 354ea05 | 2019-05-25 12:36:53 -0700 | [diff] [blame] | 1519 | pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1520 | rtcp, |
| 1521 | rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1522 | rcu_torture_current_version, |
| 1523 | list_empty(&rcu_torture_freelist), |
| 1524 | atomic_read(&n_rcu_torture_alloc), |
| 1525 | atomic_read(&n_rcu_torture_alloc_fail), |
| 1526 | atomic_read(&n_rcu_torture_free)); |
SeongJae Park | 472213a | 2016-08-13 15:54:35 +0900 | [diff] [blame] | 1527 | pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1528 | atomic_read(&n_rcu_torture_mberror), |
SeongJae Park | 472213a | 2016-08-13 15:54:35 +0900 | [diff] [blame] | 1529 | n_rcu_torture_barrier_error, |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1530 | n_rcu_torture_boost_ktrerror, |
| 1531 | n_rcu_torture_boost_rterror); |
| 1532 | pr_cont("rtbf: %ld rtb: %ld nt: %ld ", |
| 1533 | n_rcu_torture_boost_failure, |
| 1534 | n_rcu_torture_boosts, |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1535 | atomic_long_read(&n_rcu_torture_timers)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1536 | torture_onoff_stats(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1537 | pr_cont("barrier: %ld/%ld:%ld ", |
Paul E. McKenney | c9527be | 2020-02-18 13:41:02 -0800 | [diff] [blame] | 1538 | data_race(n_barrier_successes), |
| 1539 | data_race(n_barrier_attempts), |
| 1540 | data_race(n_rcu_torture_barrier_error)); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1541 | pr_cont("read-exits: %ld\n", data_race(n_read_exits)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1542 | |
| 1543 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1544 | if (atomic_read(&n_rcu_torture_mberror) || |
| 1545 | n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || |
| 1546 | n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 1547 | i > 1) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1548 | pr_cont("%s", "!!! "); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1549 | atomic_inc(&n_rcu_torture_error); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1550 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); |
| 1551 | WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() |
| 1552 | WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread |
| 1553 | WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio |
| 1554 | WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed |
| 1555 | WARN_ON_ONCE(i > 1); // Too-short grace period |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1556 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1557 | pr_cont("Reader Pipe: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1558 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1559 | pr_cont(" %ld", pipesummary[i]); |
| 1560 | pr_cont("\n"); |
| 1561 | |
| 1562 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1563 | pr_cont("Reader Batch: "); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1564 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1565 | pr_cont(" %ld", batchsummary[i]); |
| 1566 | pr_cont("\n"); |
| 1567 | |
| 1568 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1569 | pr_cont("Free-Block Circulation: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1570 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1571 | pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1572 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1573 | pr_cont("\n"); |
| 1574 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 1575 | if (cur_ops->stats) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1576 | cur_ops->stats(); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1577 | if (rtcv_snap == rcu_torture_current_version && |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1578 | rcu_access_pointer(rcu_torture_current) && |
| 1579 | !rcu_stall_is_suppressed()) { |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1580 | int __maybe_unused flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1581 | unsigned long __maybe_unused gp_seq = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1582 | |
| 1583 | rcutorture_get_gp_data(cur_ops->ttype, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1584 | &flags, &gp_seq); |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1585 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1586 | &flags, &gp_seq); |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 1587 | wtp = READ_ONCE(writer_task); |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1588 | pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 1589 | rcu_torture_writer_state_getname(), |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1590 | rcu_torture_writer_state, gp_seq, flags, |
Paul E. McKenney | 808de39 | 2017-06-19 10:03:22 -0700 | [diff] [blame] | 1591 | wtp == NULL ? ~0UL : wtp->state, |
| 1592 | wtp == NULL ? -1 : (int)task_cpu(wtp)); |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 1593 | if (!splatted && wtp) { |
| 1594 | sched_show_task(wtp); |
| 1595 | splatted = true; |
| 1596 | } |
Paul E. McKenney | afea227 | 2014-03-12 07:10:41 -0700 | [diff] [blame] | 1597 | show_rcu_gp_kthreads(); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1598 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1599 | } |
| 1600 | rtcv_snap = rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1601 | } |
| 1602 | |
| 1603 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1604 | * Periodically prints torture statistics, if periodic statistics printing |
| 1605 | * was specified via the stat_interval module parameter. |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1606 | */ |
| 1607 | static int |
| 1608 | rcu_torture_stats(void *arg) |
| 1609 | { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1610 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1611 | do { |
| 1612 | schedule_timeout_interruptible(stat_interval * HZ); |
| 1613 | rcu_torture_stats_print(); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 1614 | torture_shutdown_absorb("rcu_torture_stats"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1615 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1616 | torture_kthread_stopping("rcu_torture_stats"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1617 | return 0; |
| 1618 | } |
| 1619 | |
Paul E. McKenney | eac45e5 | 2018-05-17 11:33:17 -0700 | [diff] [blame] | 1620 | static void |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 1621 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 1622 | { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 1623 | pr_alert("%s" TORTURE_FLAG |
| 1624 | "--- %s: nreaders=%d nfakewriters=%d " |
| 1625 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
| 1626 | "shuffle_interval=%d stutter=%d irqreader=%d " |
| 1627 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
| 1628 | "test_boost=%d/%d test_boost_interval=%d " |
| 1629 | "test_boost_duration=%d shutdown_secs=%d " |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1630 | "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1631 | "stall_cpu_block=%d " |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 1632 | "n_barrier_cbs=%d " |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1633 | "onoff_interval=%d onoff_holdoff=%d " |
| 1634 | "read_exit_delay=%d read_exit_burst=%d\n", |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 1635 | torture_type, tag, nrealreaders, nfakewriters, |
| 1636 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
| 1637 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
| 1638 | test_boost, cur_ops->can_boost, |
| 1639 | test_boost_interval, test_boost_duration, shutdown_secs, |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1640 | stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1641 | stall_cpu_block, |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 1642 | n_barrier_cbs, |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1643 | onoff_interval, onoff_holdoff, |
| 1644 | read_exit_delay, read_exit_burst); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 1645 | } |
| 1646 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 1647 | static int rcutorture_booster_cleanup(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1648 | { |
| 1649 | struct task_struct *t; |
| 1650 | |
| 1651 | if (boost_tasks[cpu] == NULL) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 1652 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1653 | mutex_lock(&boost_mutex); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1654 | t = boost_tasks[cpu]; |
| 1655 | boost_tasks[cpu] = NULL; |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 1656 | rcu_torture_enable_rt_throttle(); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1657 | mutex_unlock(&boost_mutex); |
| 1658 | |
| 1659 | /* This must be outside of the mutex, otherwise deadlock! */ |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 1660 | torture_stop_kthread(rcu_torture_boost, t); |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 1661 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1662 | } |
| 1663 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 1664 | static int rcutorture_booster_init(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1665 | { |
| 1666 | int retval; |
| 1667 | |
| 1668 | if (boost_tasks[cpu] != NULL) |
| 1669 | return 0; /* Already created, nothing more to do. */ |
| 1670 | |
| 1671 | /* Don't allow time recalculation while creating a new task. */ |
| 1672 | mutex_lock(&boost_mutex); |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 1673 | rcu_torture_disable_rt_throttle(); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1674 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
Eric Dumazet | 1f28809 | 2011-06-16 15:53:18 -0700 | [diff] [blame] | 1675 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, |
| 1676 | cpu_to_node(cpu), |
| 1677 | "rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1678 | if (IS_ERR(boost_tasks[cpu])) { |
| 1679 | retval = PTR_ERR(boost_tasks[cpu]); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1680 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1681 | n_rcu_torture_boost_ktrerror++; |
| 1682 | boost_tasks[cpu] = NULL; |
| 1683 | mutex_unlock(&boost_mutex); |
| 1684 | return retval; |
| 1685 | } |
| 1686 | kthread_bind(boost_tasks[cpu], cpu); |
| 1687 | wake_up_process(boost_tasks[cpu]); |
| 1688 | mutex_unlock(&boost_mutex); |
| 1689 | return 0; |
| 1690 | } |
| 1691 | |
Paul E. McKenney | d5f546d | 2011-11-04 11:44:12 -0700 | [diff] [blame] | 1692 | /* |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1693 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
| 1694 | * induces a CPU stall for the time specified by stall_cpu. |
| 1695 | */ |
Paul Gortmaker | 49fb4c6 | 2013-06-19 14:52:21 -0400 | [diff] [blame] | 1696 | static int rcu_torture_stall(void *args) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1697 | { |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1698 | int idx; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1699 | unsigned long stop_at; |
| 1700 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1701 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1702 | if (stall_cpu_holdoff > 0) { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1703 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1704 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1705 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1706 | } |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 1707 | if (!kthread_should_stop() && stall_gp_kthread > 0) { |
| 1708 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); |
| 1709 | rcu_gp_set_torture_wait(stall_gp_kthread * HZ); |
| 1710 | for (idx = 0; idx < stall_gp_kthread + 2; idx++) { |
| 1711 | if (kthread_should_stop()) |
| 1712 | break; |
| 1713 | schedule_timeout_uninterruptible(HZ); |
| 1714 | } |
| 1715 | } |
| 1716 | if (!kthread_should_stop() && stall_cpu > 0) { |
| 1717 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 1718 | stop_at = ktime_get_seconds() + stall_cpu; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1719 | /* RCU CPU stall is expected behavior in following code. */ |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1720 | idx = cur_ops->readlock(); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1721 | if (stall_cpu_irqsoff) |
| 1722 | local_irq_disable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1723 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1724 | preempt_disable(); |
| 1725 | pr_alert("rcu_torture_stall start on CPU %d.\n", |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1726 | raw_smp_processor_id()); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 1727 | while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), |
| 1728 | stop_at)) |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1729 | if (stall_cpu_block) |
| 1730 | schedule_timeout_uninterruptible(HZ); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1731 | if (stall_cpu_irqsoff) |
| 1732 | local_irq_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1733 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1734 | preempt_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1735 | cur_ops->readunlock(idx); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1736 | } |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 1737 | pr_alert("rcu_torture_stall end.\n"); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 1738 | torture_shutdown_absorb("rcu_torture_stall"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1739 | while (!kthread_should_stop()) |
| 1740 | schedule_timeout_interruptible(10 * HZ); |
| 1741 | return 0; |
| 1742 | } |
| 1743 | |
| 1744 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
| 1745 | static int __init rcu_torture_stall_init(void) |
| 1746 | { |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 1747 | if (stall_cpu <= 0 && stall_gp_kthread <= 0) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1748 | return 0; |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 1749 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 1750 | } |
| 1751 | |
Paul E. McKenney | 9fdcb9a | 2018-07-19 13:36:00 -0700 | [diff] [blame] | 1752 | /* State structure for forward-progress self-propagating RCU callback. */ |
| 1753 | struct fwd_cb_state { |
| 1754 | struct rcu_head rh; |
| 1755 | int stop; |
| 1756 | }; |
| 1757 | |
| 1758 | /* |
| 1759 | * Forward-progress self-propagating RCU callback function. Because |
| 1760 | * callbacks run from softirq, this function is an implicit RCU read-side |
| 1761 | * critical section. |
| 1762 | */ |
| 1763 | static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) |
| 1764 | { |
| 1765 | struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); |
| 1766 | |
| 1767 | if (READ_ONCE(fcsp->stop)) { |
| 1768 | WRITE_ONCE(fcsp->stop, 2); |
| 1769 | return; |
| 1770 | } |
| 1771 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
| 1772 | } |
| 1773 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1774 | /* State for continuous-flood RCU callbacks. */ |
| 1775 | struct rcu_fwd_cb { |
| 1776 | struct rcu_head rh; |
| 1777 | struct rcu_fwd_cb *rfc_next; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1778 | struct rcu_fwd *rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1779 | int rfc_gps; |
| 1780 | }; |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 1781 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1782 | #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ |
| 1783 | #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ |
| 1784 | #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ |
Paul E. McKenney | 2e57bf9 | 2018-10-05 16:43:09 -0700 | [diff] [blame] | 1785 | #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 1786 | #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) |
| 1787 | |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 1788 | struct rcu_launder_hist { |
| 1789 | long n_launders; |
| 1790 | unsigned long launder_gp_seq; |
| 1791 | }; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1792 | |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 1793 | struct rcu_fwd { |
| 1794 | spinlock_t rcu_fwd_lock; |
| 1795 | struct rcu_fwd_cb *rcu_fwd_cb_head; |
| 1796 | struct rcu_fwd_cb **rcu_fwd_cb_tail; |
| 1797 | long n_launders_cb; |
| 1798 | unsigned long rcu_fwd_startat; |
| 1799 | struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; |
| 1800 | unsigned long rcu_launder_gp_seq_start; |
| 1801 | }; |
| 1802 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 1803 | static DEFINE_MUTEX(rcu_fwd_mutex); |
Jason Yan | afbc157 | 2020-04-09 19:42:38 +0800 | [diff] [blame] | 1804 | static struct rcu_fwd *rcu_fwds; |
| 1805 | static bool rcu_fwd_emergency_stop; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1806 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1807 | static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 1808 | { |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 1809 | unsigned long gps; |
| 1810 | unsigned long gps_old; |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 1811 | int i; |
| 1812 | int j; |
| 1813 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1814 | for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) |
| 1815 | if (rfp->n_launders_hist[i].n_launders > 0) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 1816 | break; |
Paul E. McKenney | 73d665b | 2018-10-04 10:54:22 -0700 | [diff] [blame] | 1817 | pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1818 | __func__, jiffies - rfp->rcu_fwd_startat); |
| 1819 | gps_old = rfp->rcu_launder_gp_seq_start; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 1820 | for (j = 0; j <= i; j++) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1821 | gps = rfp->n_launders_hist[j].launder_gp_seq; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 1822 | pr_cont(" %ds/%d: %ld:%ld", |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 1823 | j + 1, FWD_CBS_HIST_DIV, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1824 | rfp->n_launders_hist[j].n_launders, |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 1825 | rcutorture_seq_diff(gps, gps_old)); |
| 1826 | gps_old = gps; |
| 1827 | } |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 1828 | pr_cont("\n"); |
| 1829 | } |
| 1830 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1831 | /* Callback function for continuous-flood RCU callbacks. */ |
| 1832 | static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) |
| 1833 | { |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1834 | unsigned long flags; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1835 | int i; |
| 1836 | struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); |
| 1837 | struct rcu_fwd_cb **rfcpp; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1838 | struct rcu_fwd *rfp = rfcp->rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1839 | |
| 1840 | rfcp->rfc_next = NULL; |
| 1841 | rfcp->rfc_gps++; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1842 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 1843 | rfcpp = rfp->rcu_fwd_cb_tail; |
| 1844 | rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1845 | WRITE_ONCE(*rfcpp, rfcp); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1846 | WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); |
| 1847 | i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); |
| 1848 | if (i >= ARRAY_SIZE(rfp->n_launders_hist)) |
| 1849 | i = ARRAY_SIZE(rfp->n_launders_hist) - 1; |
| 1850 | rfp->n_launders_hist[i].n_launders++; |
| 1851 | rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); |
| 1852 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1853 | } |
| 1854 | |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 1855 | // Give the scheduler a chance, even on nohz_full CPUs. |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 1856 | static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 1857 | { |
Sebastian Andrzej Siewior | 90326f0 | 2019-10-15 21:18:14 +0200 | [diff] [blame] | 1858 | if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 1859 | // Real call_rcu() floods hit userspace, so emulate that. |
| 1860 | if (need_resched() || (iter & 0xfff)) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 1861 | schedule(); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1862 | return; |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 1863 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1864 | // No userspace emulation: CB invocation throttles call_rcu() |
| 1865 | cond_resched(); |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 1866 | } |
| 1867 | |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1868 | /* |
| 1869 | * Free all callbacks on the rcu_fwd_cb_head list, either because the |
| 1870 | * test is over or because we hit an OOM event. |
| 1871 | */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 1872 | static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1873 | { |
| 1874 | unsigned long flags; |
| 1875 | unsigned long freed = 0; |
| 1876 | struct rcu_fwd_cb *rfcp; |
| 1877 | |
| 1878 | for (;;) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 1879 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 1880 | rfcp = rfp->rcu_fwd_cb_head; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 1881 | if (!rfcp) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 1882 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1883 | break; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 1884 | } |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 1885 | rfp->rcu_fwd_cb_head = rfcp->rfc_next; |
| 1886 | if (!rfp->rcu_fwd_cb_head) |
| 1887 | rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
| 1888 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1889 | kfree(rfcp); |
| 1890 | freed++; |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 1891 | rcu_torture_fwd_prog_cond_resched(freed); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 1892 | if (tick_nohz_full_enabled()) { |
| 1893 | local_irq_save(flags); |
| 1894 | rcu_momentary_dyntick_idle(); |
| 1895 | local_irq_restore(flags); |
| 1896 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1897 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1898 | return freed; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1899 | } |
| 1900 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1901 | /* Carry out need_resched()/cond_resched() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1902 | static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, |
| 1903 | int *tested, int *tested_tries) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1904 | { |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 1905 | unsigned long cver; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 1906 | unsigned long dur; |
Paul E. McKenney | 7c590fc | 2018-08-07 16:42:42 -0700 | [diff] [blame] | 1907 | struct fwd_cb_state fcs; |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 1908 | unsigned long gps; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1909 | int idx; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1910 | int sd; |
| 1911 | int sd4; |
| 1912 | bool selfpropcb = false; |
| 1913 | unsigned long stopat; |
| 1914 | static DEFINE_TORTURE_RANDOM(trs); |
| 1915 | |
| 1916 | if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { |
| 1917 | init_rcu_head_on_stack(&fcs.rh); |
| 1918 | selfpropcb = true; |
| 1919 | } |
| 1920 | |
| 1921 | /* Tight loop containing cond_resched(). */ |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1922 | WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
| 1923 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1924 | if (selfpropcb) { |
| 1925 | WRITE_ONCE(fcs.stop, 0); |
| 1926 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); |
| 1927 | } |
| 1928 | cver = READ_ONCE(rcu_torture_current_version); |
| 1929 | gps = cur_ops->get_gp_seq(); |
| 1930 | sd = cur_ops->stall_dur() + 1; |
| 1931 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; |
| 1932 | dur = sd4 + torture_random(&trs) % (sd - sd4); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1933 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 1934 | stopat = rfp->rcu_fwd_startat + dur; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 1935 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 1936 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 1937 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1938 | idx = cur_ops->readlock(); |
| 1939 | udelay(10); |
| 1940 | cur_ops->readunlock(idx); |
| 1941 | if (!fwd_progress_need_resched || need_resched()) |
Paul E. McKenney | fbbd5e3 | 2019-08-15 11:43:53 -0700 | [diff] [blame] | 1942 | cond_resched(); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1943 | } |
| 1944 | (*tested_tries)++; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 1945 | if (!time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 1946 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 1947 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1948 | (*tested)++; |
| 1949 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 1950 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
| 1951 | WARN_ON(!cver && gps < 2); |
| 1952 | pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); |
| 1953 | } |
| 1954 | if (selfpropcb) { |
| 1955 | WRITE_ONCE(fcs.stop, 1); |
| 1956 | cur_ops->sync(); /* Wait for running CB to complete. */ |
| 1957 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ |
| 1958 | } |
| 1959 | |
| 1960 | if (selfpropcb) { |
| 1961 | WARN_ON(READ_ONCE(fcs.stop) != 2); |
| 1962 | destroy_rcu_head_on_stack(&fcs.rh); |
| 1963 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1964 | schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ |
| 1965 | WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1966 | } |
| 1967 | |
| 1968 | /* Carry out call_rcu() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1969 | static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1970 | { |
| 1971 | unsigned long cver; |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 1972 | unsigned long flags; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1973 | unsigned long gps; |
| 1974 | int i; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1975 | long n_launders; |
| 1976 | long n_launders_cb_snap; |
| 1977 | long n_launders_sa; |
| 1978 | long n_max_cbs; |
| 1979 | long n_max_gps; |
| 1980 | struct rcu_fwd_cb *rfcp; |
| 1981 | struct rcu_fwd_cb *rfcpn; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1982 | unsigned long stopat; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 1983 | unsigned long stoppedat; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1984 | |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1985 | if (READ_ONCE(rcu_fwd_emergency_stop)) |
| 1986 | return; /* Get out of the way quickly, no GP wait! */ |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 1987 | if (!cur_ops->call) |
| 1988 | return; /* Can't do call_rcu() fwd prog without ->call. */ |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 1989 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1990 | /* Loop continuously posting RCU callbacks. */ |
| 1991 | WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
| 1992 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1993 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 1994 | stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1995 | n_launders = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 1996 | rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 1997 | n_launders_sa = 0; |
| 1998 | n_max_cbs = 0; |
| 1999 | n_max_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2000 | for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) |
| 2001 | rfp->n_launders_hist[i].n_launders = 0; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2002 | cver = READ_ONCE(rcu_torture_current_version); |
| 2003 | gps = cur_ops->get_gp_seq(); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2004 | rfp->rcu_launder_gp_seq_start = gps; |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2005 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2006 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2007 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2008 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2009 | rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2010 | rfcpn = NULL; |
| 2011 | if (rfcp) |
| 2012 | rfcpn = READ_ONCE(rfcp->rfc_next); |
| 2013 | if (rfcpn) { |
| 2014 | if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && |
| 2015 | ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) |
| 2016 | break; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2017 | rfp->rcu_fwd_cb_head = rfcpn; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2018 | n_launders++; |
| 2019 | n_launders_sa++; |
| 2020 | } else { |
| 2021 | rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); |
| 2022 | if (WARN_ON_ONCE(!rfcp)) { |
| 2023 | schedule_timeout_interruptible(1); |
| 2024 | continue; |
| 2025 | } |
| 2026 | n_max_cbs++; |
| 2027 | n_launders_sa = 0; |
| 2028 | rfcp->rfc_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2029 | rfcp->rfc_rfp = rfp; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2030 | } |
| 2031 | cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2032 | rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2033 | if (tick_nohz_full_enabled()) { |
| 2034 | local_irq_save(flags); |
| 2035 | rcu_momentary_dyntick_idle(); |
| 2036 | local_irq_restore(flags); |
| 2037 | } |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2038 | } |
| 2039 | stoppedat = jiffies; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2040 | n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2041 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 2042 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
| 2043 | cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2044 | (void)rcu_torture_fwd_prog_cbfree(rfp); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2045 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2046 | if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && |
| 2047 | !shutdown_time_arrived()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2048 | WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); |
| 2049 | pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", |
| 2050 | __func__, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2051 | stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2052 | n_launders + n_max_cbs - n_launders_cb_snap, |
| 2053 | n_launders, n_launders_sa, |
| 2054 | n_max_gps, n_max_cbs, cver, gps); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2055 | rcu_torture_fwd_cb_hist(rfp); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2056 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2057 | schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2058 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2059 | WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2060 | } |
| 2061 | |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2062 | |
| 2063 | /* |
| 2064 | * OOM notifier, but this only prints diagnostic information for the |
| 2065 | * current forward-progress test. |
| 2066 | */ |
| 2067 | static int rcutorture_oom_notify(struct notifier_block *self, |
| 2068 | unsigned long notused, void *nfreed) |
| 2069 | { |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2070 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2071 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2072 | mutex_lock(&rcu_fwd_mutex); |
| 2073 | rfp = rcu_fwds; |
| 2074 | if (!rfp) { |
| 2075 | mutex_unlock(&rcu_fwd_mutex); |
| 2076 | return NOTIFY_OK; |
| 2077 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2078 | WARN(1, "%s invoked upon OOM during forward-progress testing.\n", |
| 2079 | __func__); |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2080 | rcu_torture_fwd_cb_hist(rfp); |
| 2081 | rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2082 | WRITE_ONCE(rcu_fwd_emergency_stop, true); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2083 | smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ |
| 2084 | pr_info("%s: Freed %lu RCU callbacks.\n", |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2085 | __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2086 | rcu_barrier(); |
| 2087 | pr_info("%s: Freed %lu RCU callbacks.\n", |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2088 | __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2089 | rcu_barrier(); |
| 2090 | pr_info("%s: Freed %lu RCU callbacks.\n", |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2091 | __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2092 | smp_mb(); /* Frees before return to avoid redoing OOM. */ |
| 2093 | (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ |
| 2094 | pr_info("%s returning after OOM processing.\n", __func__); |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2095 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2096 | return NOTIFY_OK; |
| 2097 | } |
| 2098 | |
| 2099 | static struct notifier_block rcutorture_oom_nb = { |
| 2100 | .notifier_call = rcutorture_oom_notify |
| 2101 | }; |
| 2102 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2103 | /* Carry out grace-period forward-progress testing. */ |
| 2104 | static int rcu_torture_fwd_prog(void *args) |
| 2105 | { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2106 | struct rcu_fwd *rfp = args; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2107 | int tested = 0; |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2108 | int tested_tries = 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2109 | |
| 2110 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); |
Paul E. McKenney | 5ab7ab8 | 2018-09-21 18:08:09 -0700 | [diff] [blame] | 2111 | rcu_bind_current_to_nocb(); |
Paul E. McKenney | fecad50 | 2018-07-20 12:18:11 -0700 | [diff] [blame] | 2112 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) |
| 2113 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2114 | do { |
| 2115 | schedule_timeout_interruptible(fwd_progress_holdoff * HZ); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2116 | WRITE_ONCE(rcu_fwd_emergency_stop, false); |
Paul E. McKenney | 4355080 | 2019-12-04 15:58:41 -0800 | [diff] [blame] | 2117 | if (!IS_ENABLED(CONFIG_TINY_RCU) || |
| 2118 | rcu_inkernel_boot_has_ended()) |
| 2119 | rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); |
| 2120 | if (rcu_inkernel_boot_has_ended()) |
| 2121 | rcu_torture_fwd_prog_cr(rfp); |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2122 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2123 | /* Avoid slow periods, better to test when busy. */ |
| 2124 | stutter_wait("rcu_torture_fwd_prog"); |
| 2125 | } while (!torture_must_stop()); |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2126 | /* Short runs might not contain a valid forward-progress attempt. */ |
| 2127 | WARN_ON(!tested && tested_tries >= 5); |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2128 | pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2129 | torture_kthread_stopping("rcu_torture_fwd_prog"); |
| 2130 | return 0; |
| 2131 | } |
| 2132 | |
| 2133 | /* If forward-progress checking is requested and feasible, spawn the thread. */ |
| 2134 | static int __init rcu_torture_fwd_prog_init(void) |
| 2135 | { |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2136 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2137 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2138 | if (!fwd_progress) |
| 2139 | return 0; /* Not requested, so don't do it. */ |
Paul E. McKenney | 5ac7cdc | 2018-10-16 05:46:58 -0700 | [diff] [blame] | 2140 | if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || |
| 2141 | cur_ops == &rcu_busted_ops) { |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2142 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); |
| 2143 | return 0; |
| 2144 | } |
| 2145 | if (stall_cpu > 0) { |
| 2146 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); |
| 2147 | if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) |
| 2148 | return -EINVAL; /* In module, can fail back to user. */ |
| 2149 | WARN_ON(1); /* Make sure rcutorture notices conflict. */ |
| 2150 | return 0; |
| 2151 | } |
| 2152 | if (fwd_progress_holdoff <= 0) |
| 2153 | fwd_progress_holdoff = 1; |
| 2154 | if (fwd_progress_div <= 0) |
| 2155 | fwd_progress_div = 4; |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2156 | rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); |
| 2157 | if (!rfp) |
| 2158 | return -ENOMEM; |
| 2159 | spin_lock_init(&rfp->rcu_fwd_lock); |
| 2160 | rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2161 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2162 | rcu_fwds = rfp; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2163 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 299c7d9 | 2020-07-22 10:45:12 -0700 | [diff] [blame] | 2164 | register_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2165 | return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2166 | } |
| 2167 | |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2168 | static void rcu_torture_fwd_prog_cleanup(void) |
| 2169 | { |
| 2170 | struct rcu_fwd *rfp; |
| 2171 | |
| 2172 | torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); |
| 2173 | rfp = rcu_fwds; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2174 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2175 | rcu_fwds = NULL; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2176 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 299c7d9 | 2020-07-22 10:45:12 -0700 | [diff] [blame] | 2177 | unregister_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2178 | kfree(rfp); |
| 2179 | } |
| 2180 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2181 | /* Callback function for RCU barrier testing. */ |
Rashika Kheria | b3b8a4d | 2014-02-27 17:16:57 +0530 | [diff] [blame] | 2182 | static void rcu_torture_barrier_cbf(struct rcu_head *rcu) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2183 | { |
| 2184 | atomic_inc(&barrier_cbs_invoked); |
| 2185 | } |
| 2186 | |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2187 | /* IPI handler to get callback posted on desired CPU, if online. */ |
| 2188 | static void rcu_torture_barrier1cb(void *rcu_void) |
| 2189 | { |
| 2190 | struct rcu_head *rhp = rcu_void; |
| 2191 | |
| 2192 | cur_ops->call(rhp, rcu_torture_barrier_cbf); |
| 2193 | } |
| 2194 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2195 | /* kthread function to register callbacks used to test RCU barriers. */ |
| 2196 | static int rcu_torture_barrier_cbs(void *arg) |
| 2197 | { |
| 2198 | long myid = (long)arg; |
Jules Irenge | 8f43d59 | 2020-06-01 19:45:48 +0100 | [diff] [blame] | 2199 | bool lastphase = false; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2200 | bool newphase; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2201 | struct rcu_head rcu; |
| 2202 | |
| 2203 | init_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2204 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 2205 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2206 | do { |
| 2207 | wait_event(barrier_cbs_wq[myid], |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2208 | (newphase = |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2209 | smp_load_acquire(&barrier_phase)) != lastphase || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2210 | torture_must_stop()); |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2211 | lastphase = newphase; |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2212 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2213 | break; |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2214 | /* |
| 2215 | * The above smp_load_acquire() ensures barrier_phase load |
Paul E. McKenney | aab0573 | 2016-05-02 12:20:51 -0700 | [diff] [blame] | 2216 | * is ordered before the following ->call(). |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2217 | */ |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2218 | if (smp_call_function_single(myid, rcu_torture_barrier1cb, |
| 2219 | &rcu, 1)) { |
| 2220 | // IPI failed, so use direct call from current CPU. |
| 2221 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
| 2222 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2223 | if (atomic_dec_and_test(&barrier_cbs_count)) |
| 2224 | wake_up(&barrier_wq); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2225 | } while (!torture_must_stop()); |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 2226 | if (cur_ops->cb_barrier != NULL) |
| 2227 | cur_ops->cb_barrier(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2228 | destroy_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2229 | torture_kthread_stopping("rcu_torture_barrier_cbs"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2230 | return 0; |
| 2231 | } |
| 2232 | |
| 2233 | /* kthread function to drive and coordinate RCU barrier testing. */ |
| 2234 | static int rcu_torture_barrier(void *arg) |
| 2235 | { |
| 2236 | int i; |
| 2237 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2238 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2239 | do { |
| 2240 | atomic_set(&barrier_cbs_invoked, 0); |
| 2241 | atomic_set(&barrier_cbs_count, n_barrier_cbs); |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2242 | /* Ensure barrier_phase ordered after prior assignments. */ |
| 2243 | smp_store_release(&barrier_phase, !barrier_phase); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2244 | for (i = 0; i < n_barrier_cbs; i++) |
| 2245 | wake_up(&barrier_cbs_wq[i]); |
| 2246 | wait_event(barrier_wq, |
| 2247 | atomic_read(&barrier_cbs_count) == 0 || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2248 | torture_must_stop()); |
| 2249 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2250 | break; |
| 2251 | n_barrier_attempts++; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2252 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2253 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
| 2254 | n_rcu_torture_barrier_error++; |
Paul E. McKenney | 7602de4a | 2014-12-17 18:39:54 -0800 | [diff] [blame] | 2255 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", |
| 2256 | atomic_read(&barrier_cbs_invoked), |
| 2257 | n_barrier_cbs); |
Paul E. McKenney | 9470a18 | 2020-02-05 12:54:34 -0800 | [diff] [blame] | 2258 | WARN_ON(1); |
| 2259 | // Wait manually for the remaining callbacks |
| 2260 | i = 0; |
| 2261 | do { |
| 2262 | if (WARN_ON(i++ > HZ)) |
| 2263 | i = INT_MIN; |
| 2264 | schedule_timeout_interruptible(1); |
| 2265 | cur_ops->cb_barrier(); |
| 2266 | } while (atomic_read(&barrier_cbs_invoked) != |
| 2267 | n_barrier_cbs && |
| 2268 | !torture_must_stop()); |
| 2269 | smp_mb(); // Can't trust ordering if broken. |
| 2270 | if (!torture_must_stop()) |
| 2271 | pr_err("Recovered: barrier_cbs_invoked = %d\n", |
| 2272 | atomic_read(&barrier_cbs_invoked)); |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2273 | } else { |
| 2274 | n_barrier_successes++; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2275 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2276 | schedule_timeout_interruptible(HZ / 10); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2277 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2278 | torture_kthread_stopping("rcu_torture_barrier"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2279 | return 0; |
| 2280 | } |
| 2281 | |
| 2282 | /* Initialize RCU barrier testing. */ |
| 2283 | static int rcu_torture_barrier_init(void) |
| 2284 | { |
| 2285 | int i; |
| 2286 | int ret; |
| 2287 | |
Paul E. McKenney | d9eba768 | 2015-05-14 15:35:43 -0700 | [diff] [blame] | 2288 | if (n_barrier_cbs <= 0) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2289 | return 0; |
| 2290 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2291 | pr_alert("%s" TORTURE_FLAG |
| 2292 | " Call or barrier ops missing for %s,\n", |
| 2293 | torture_type, cur_ops->name); |
| 2294 | pr_alert("%s" TORTURE_FLAG |
| 2295 | " RCU barrier testing omitted from run.\n", |
| 2296 | torture_type); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2297 | return 0; |
| 2298 | } |
| 2299 | atomic_set(&barrier_cbs_count, 0); |
| 2300 | atomic_set(&barrier_cbs_invoked, 0); |
| 2301 | barrier_cbs_tasks = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2302 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2303 | GFP_KERNEL); |
| 2304 | barrier_cbs_wq = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2305 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); |
Sasha Levin | de5e643 | 2012-12-20 14:11:28 -0500 | [diff] [blame] | 2306 | if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2307 | return -ENOMEM; |
| 2308 | for (i = 0; i < n_barrier_cbs; i++) { |
| 2309 | init_waitqueue_head(&barrier_cbs_wq[i]); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2310 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
| 2311 | (void *)(long)i, |
| 2312 | barrier_cbs_tasks[i]); |
| 2313 | if (ret) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2314 | return ret; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2315 | } |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2316 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2317 | } |
| 2318 | |
| 2319 | /* Clean up after RCU barrier testing. */ |
| 2320 | static void rcu_torture_barrier_cleanup(void) |
| 2321 | { |
| 2322 | int i; |
| 2323 | |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2324 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2325 | if (barrier_cbs_tasks != NULL) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2326 | for (i = 0; i < n_barrier_cbs; i++) |
| 2327 | torture_stop_kthread(rcu_torture_barrier_cbs, |
| 2328 | barrier_cbs_tasks[i]); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2329 | kfree(barrier_cbs_tasks); |
| 2330 | barrier_cbs_tasks = NULL; |
| 2331 | } |
| 2332 | if (barrier_cbs_wq != NULL) { |
| 2333 | kfree(barrier_cbs_wq); |
| 2334 | barrier_cbs_wq = NULL; |
| 2335 | } |
| 2336 | } |
| 2337 | |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2338 | static bool rcu_torture_can_boost(void) |
| 2339 | { |
| 2340 | static int boost_warn_once; |
| 2341 | int prio; |
| 2342 | |
| 2343 | if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) |
| 2344 | return false; |
| 2345 | |
| 2346 | prio = rcu_get_gp_kthreads_prio(); |
| 2347 | if (!prio) |
| 2348 | return false; |
| 2349 | |
| 2350 | if (prio < 2) { |
| 2351 | if (boost_warn_once == 1) |
| 2352 | return false; |
| 2353 | |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2354 | pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2355 | boost_warn_once = 1; |
| 2356 | return false; |
| 2357 | } |
| 2358 | |
| 2359 | return true; |
| 2360 | } |
| 2361 | |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2362 | static bool read_exit_child_stop; |
| 2363 | static bool read_exit_child_stopped; |
| 2364 | static wait_queue_head_t read_exit_wq; |
| 2365 | |
| 2366 | // Child kthread which just does an rcutorture reader and exits. |
| 2367 | static int rcu_torture_read_exit_child(void *trsp_in) |
| 2368 | { |
| 2369 | struct torture_random_state *trsp = trsp_in; |
| 2370 | |
| 2371 | set_user_nice(current, MAX_NICE); |
| 2372 | // Minimize time between reading and exiting. |
| 2373 | while (!kthread_should_stop()) |
| 2374 | schedule_timeout_uninterruptible(1); |
| 2375 | (void)rcu_torture_one_read(trsp); |
| 2376 | return 0; |
| 2377 | } |
| 2378 | |
| 2379 | // Parent kthread which creates and destroys read-exit child kthreads. |
| 2380 | static int rcu_torture_read_exit(void *unused) |
| 2381 | { |
| 2382 | int count = 0; |
| 2383 | bool errexit = false; |
| 2384 | int i; |
| 2385 | struct task_struct *tsp; |
| 2386 | DEFINE_TORTURE_RANDOM(trs); |
| 2387 | |
| 2388 | // Allocate and initialize. |
| 2389 | set_user_nice(current, MAX_NICE); |
| 2390 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); |
| 2391 | |
| 2392 | // Each pass through this loop does one read-exit episode. |
| 2393 | do { |
| 2394 | if (++count > read_exit_burst) { |
| 2395 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); |
| 2396 | rcu_barrier(); // Wait for task_struct free, avoid OOM. |
| 2397 | for (i = 0; i < read_exit_delay; i++) { |
| 2398 | schedule_timeout_uninterruptible(HZ); |
| 2399 | if (READ_ONCE(read_exit_child_stop)) |
| 2400 | break; |
| 2401 | } |
| 2402 | if (!READ_ONCE(read_exit_child_stop)) |
| 2403 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); |
| 2404 | count = 0; |
| 2405 | } |
| 2406 | if (READ_ONCE(read_exit_child_stop)) |
| 2407 | break; |
| 2408 | // Spawn child. |
| 2409 | tsp = kthread_run(rcu_torture_read_exit_child, |
| 2410 | &trs, "%s", |
| 2411 | "rcu_torture_read_exit_child"); |
| 2412 | if (IS_ERR(tsp)) { |
| 2413 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
| 2414 | errexit = true; |
| 2415 | tsp = NULL; |
| 2416 | break; |
| 2417 | } |
| 2418 | cond_resched(); |
| 2419 | kthread_stop(tsp); |
| 2420 | n_read_exits ++; |
| 2421 | stutter_wait("rcu_torture_read_exit"); |
| 2422 | } while (!errexit && !READ_ONCE(read_exit_child_stop)); |
| 2423 | |
| 2424 | // Clean up and exit. |
| 2425 | smp_store_release(&read_exit_child_stopped, true); // After reaping. |
| 2426 | smp_mb(); // Store before wakeup. |
| 2427 | wake_up(&read_exit_wq); |
| 2428 | while (!torture_must_stop()) |
| 2429 | schedule_timeout_uninterruptible(1); |
| 2430 | torture_kthread_stopping("rcu_torture_read_exit"); |
| 2431 | return 0; |
| 2432 | } |
| 2433 | |
| 2434 | static int rcu_torture_read_exit_init(void) |
| 2435 | { |
| 2436 | if (read_exit_burst <= 0) |
| 2437 | return -EINVAL; |
| 2438 | init_waitqueue_head(&read_exit_wq); |
| 2439 | read_exit_child_stop = false; |
| 2440 | read_exit_child_stopped = false; |
| 2441 | return torture_create_kthread(rcu_torture_read_exit, NULL, |
| 2442 | read_exit_task); |
| 2443 | } |
| 2444 | |
| 2445 | static void rcu_torture_read_exit_cleanup(void) |
| 2446 | { |
| 2447 | if (!read_exit_task) |
| 2448 | return; |
| 2449 | WRITE_ONCE(read_exit_child_stop, true); |
| 2450 | smp_mb(); // Above write before wait. |
| 2451 | wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); |
| 2452 | torture_stop_kthread(rcutorture_read_exit, read_exit_task); |
| 2453 | } |
| 2454 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2455 | static enum cpuhp_state rcutor_hp; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2456 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2457 | static void |
| 2458 | rcu_torture_cleanup(void) |
| 2459 | { |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 2460 | int firsttime; |
Paul E. McKenney | 034777d | 2018-04-19 08:43:11 -0700 | [diff] [blame] | 2461 | int flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2462 | unsigned long gp_seq = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2463 | int i; |
| 2464 | |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 2465 | if (torture_cleanup_begin()) { |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 2466 | if (cur_ops->cb_barrier != NULL) |
| 2467 | cur_ops->cb_barrier(); |
| 2468 | return; |
| 2469 | } |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 2470 | if (!cur_ops) { |
| 2471 | torture_cleanup_end(); |
| 2472 | return; |
| 2473 | } |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 2474 | |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 2475 | show_rcu_gp_kthreads(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2476 | rcu_torture_read_exit_cleanup(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2477 | rcu_torture_barrier_cleanup(); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2478 | rcu_torture_fwd_prog_cleanup(); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2479 | torture_stop_kthread(rcu_torture_stall, stall_task); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2480 | torture_stop_kthread(rcu_torture_writer, writer_task); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2481 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2482 | if (reader_tasks) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2483 | for (i = 0; i < nrealreaders; i++) |
| 2484 | torture_stop_kthread(rcu_torture_reader, |
| 2485 | reader_tasks[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2486 | kfree(reader_tasks); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2487 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2488 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2489 | if (fakewriter_tasks) { |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2490 | for (i = 0; i < nfakewriters; i++) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2491 | torture_stop_kthread(rcu_torture_fakewriter, |
| 2492 | fakewriter_tasks[i]); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2493 | } |
| 2494 | kfree(fakewriter_tasks); |
| 2495 | fakewriter_tasks = NULL; |
| 2496 | } |
| 2497 | |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2498 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 2499 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 2500 | pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", |
| 2501 | cur_ops->name, (long)gp_seq, flags, |
| 2502 | rcutorture_seq_diff(gp_seq, start_gp_seq)); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2503 | torture_stop_kthread(rcu_torture_stats, stats_task); |
| 2504 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2505 | if (rcu_torture_can_boost()) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2506 | cpuhp_remove_state(rcutor_hp); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2507 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2508 | /* |
Paul E. McKenney | 62a1a94 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 2509 | * Wait for all RCU callbacks to fire, then do torture-type-specific |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2510 | * cleanup operations. |
| 2511 | */ |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 2512 | if (cur_ops->cb_barrier != NULL) |
| 2513 | cur_ops->cb_barrier(); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2514 | if (cur_ops->cleanup != NULL) |
| 2515 | cur_ops->cleanup(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2516 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2517 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2518 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 2519 | if (err_segs_recorded) { |
| 2520 | pr_alert("Failure/close-call rcutorture reader segments:\n"); |
| 2521 | if (rt_read_nsegs == 0) |
| 2522 | pr_alert("\t: No segments recorded!!!\n"); |
| 2523 | firsttime = 1; |
| 2524 | for (i = 0; i < rt_read_nsegs; i++) { |
| 2525 | pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); |
| 2526 | if (err_segs[i].rt_delay_jiffies != 0) { |
| 2527 | pr_cont("%s%ldjiffies", firsttime ? "" : "+", |
| 2528 | err_segs[i].rt_delay_jiffies); |
| 2529 | firsttime = 0; |
| 2530 | } |
| 2531 | if (err_segs[i].rt_delay_ms != 0) { |
| 2532 | pr_cont("%s%ldms", firsttime ? "" : "+", |
| 2533 | err_segs[i].rt_delay_ms); |
| 2534 | firsttime = 0; |
| 2535 | } |
| 2536 | if (err_segs[i].rt_delay_us != 0) { |
| 2537 | pr_cont("%s%ldus", firsttime ? "" : "+", |
| 2538 | err_segs[i].rt_delay_us); |
| 2539 | firsttime = 0; |
| 2540 | } |
| 2541 | pr_cont("%s\n", |
| 2542 | err_segs[i].rt_preempted ? "preempted" : ""); |
| 2543 | |
| 2544 | } |
| 2545 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2546 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2547 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
Paul E. McKenney | 2e9e808 | 2014-01-28 15:58:22 -0800 | [diff] [blame] | 2548 | else if (torture_onoff_failures()) |
Paul E. McKenney | 091541b | 2012-01-10 12:51:14 -0800 | [diff] [blame] | 2549 | rcu_torture_print_module_parms(cur_ops, |
| 2550 | "End of test: RCU_HOTPLUG"); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 2551 | else |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2552 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 2553 | torture_cleanup_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2554 | } |
| 2555 | |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2556 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 2557 | static void rcu_torture_leak_cb(struct rcu_head *rhp) |
| 2558 | { |
| 2559 | } |
| 2560 | |
| 2561 | static void rcu_torture_err_cb(struct rcu_head *rhp) |
| 2562 | { |
| 2563 | /* |
| 2564 | * This -might- happen due to race conditions, but is unlikely. |
| 2565 | * The scenario that leads to this happening is that the |
| 2566 | * first of the pair of duplicate callbacks is queued, |
| 2567 | * someone else starts a grace period that includes that |
| 2568 | * callback, then the second of the pair must wait for the |
| 2569 | * next grace period. Unlikely, but can happen. If it |
| 2570 | * does happen, the debug-objects subsystem won't have splatted. |
| 2571 | */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 2572 | pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2573 | } |
| 2574 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 2575 | |
| 2576 | /* |
| 2577 | * Verify that double-free causes debug-objects to complain, but only |
| 2578 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test |
| 2579 | * cannot be carried out. |
| 2580 | */ |
| 2581 | static void rcu_test_debug_objects(void) |
| 2582 | { |
| 2583 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 2584 | struct rcu_head rh1; |
| 2585 | struct rcu_head rh2; |
| 2586 | |
| 2587 | init_rcu_head_on_stack(&rh1); |
| 2588 | init_rcu_head_on_stack(&rh2); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 2589 | pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2590 | |
| 2591 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ |
| 2592 | preempt_disable(); /* Prevent preemption from interrupting test. */ |
| 2593 | rcu_read_lock(); /* Make it impossible to finish a grace period. */ |
| 2594 | call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ |
| 2595 | local_irq_disable(); /* Make it harder to start a new grace period. */ |
| 2596 | call_rcu(&rh2, rcu_torture_leak_cb); |
| 2597 | call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ |
| 2598 | local_irq_enable(); |
| 2599 | rcu_read_unlock(); |
| 2600 | preempt_enable(); |
| 2601 | |
| 2602 | /* Wait for them all to get done so we can safely return. */ |
| 2603 | rcu_barrier(); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 2604 | pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2605 | destroy_rcu_head_on_stack(&rh1); |
| 2606 | destroy_rcu_head_on_stack(&rh2); |
| 2607 | #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 2608 | pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2609 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 2610 | } |
| 2611 | |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 2612 | static void rcutorture_sync(void) |
| 2613 | { |
| 2614 | static unsigned long n; |
| 2615 | |
| 2616 | if (cur_ops->sync && !(++n & 0xfff)) |
| 2617 | cur_ops->sync(); |
| 2618 | } |
| 2619 | |
Josh Triplett | 6f8bc500 | 2007-05-08 00:25:24 -0700 | [diff] [blame] | 2620 | static int __init |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2621 | rcu_torture_init(void) |
| 2622 | { |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 2623 | long i; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2624 | int cpu; |
| 2625 | int firsterr = 0; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 2626 | int flags = 0; |
| 2627 | unsigned long gp_seq = 0; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 2628 | static struct rcu_torture_ops *torture_ops[] = { |
Paul E. McKenney | c770c82 | 2018-07-07 10:28:07 -0700 | [diff] [blame] | 2629 | &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 2630 | &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, |
| 2631 | &tasks_tracing_ops, &trivial_ops, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 2632 | }; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2633 | |
Paul E. McKenney | a2f2577 | 2017-11-21 20:19:17 -0800 | [diff] [blame] | 2634 | if (!torture_init_begin(torture_type, verbose)) |
Paul E. McKenney | 5228084 | 2014-04-07 09:14:11 -0700 | [diff] [blame] | 2635 | return -EBUSY; |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 2636 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2637 | /* Process args and tell the world that the torturer is on the job. */ |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 2638 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2639 | cur_ops = torture_ops[i]; |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 2640 | if (strcmp(torture_type, cur_ops->name) == 0) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2641 | break; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2642 | } |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 2643 | if (i == ARRAY_SIZE(torture_ops)) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2644 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n", |
| 2645 | torture_type); |
| 2646 | pr_alert("rcu-torture types:"); |
Paul E. McKenney | cf886c4 | 2009-10-25 19:03:54 -0700 | [diff] [blame] | 2647 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 2648 | pr_cont(" %s", torture_ops[i]->name); |
| 2649 | pr_cont("\n"); |
Paul E. McKenney | e746b55 | 2018-07-07 17:35:22 -0700 | [diff] [blame] | 2650 | WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 2651 | firsterr = -EINVAL; |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 2652 | cur_ops = NULL; |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 2653 | goto unwind; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2654 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2655 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2656 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2657 | fqs_duration = 0; |
| 2658 | } |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2659 | if (cur_ops->init) |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 2660 | cur_ops->init(); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2661 | |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 2662 | if (nreaders >= 0) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2663 | nrealreaders = nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 2664 | } else { |
Paul E. McKenney | 3838cc1 | 2015-03-12 13:55:48 -0700 | [diff] [blame] | 2665 | nrealreaders = num_online_cpus() - 2 - nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 2666 | if (nrealreaders <= 0) |
| 2667 | nrealreaders = 1; |
| 2668 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2669 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 2670 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 2671 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
| 2672 | start_gp_seq = gp_seq; |
| 2673 | pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", |
| 2674 | cur_ops->name, (long)gp_seq, flags); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2675 | |
| 2676 | /* Set up the freelist. */ |
| 2677 | |
| 2678 | INIT_LIST_HEAD(&rcu_torture_freelist); |
Ahmed S. Darwish | 788e770 | 2007-05-08 00:33:14 -0700 | [diff] [blame] | 2679 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 2680 | rcu_tortures[i].rtort_mbtest = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2681 | list_add_tail(&rcu_tortures[i].rtort_free, |
| 2682 | &rcu_torture_freelist); |
| 2683 | } |
| 2684 | |
| 2685 | /* Initialize the statistics so that each run gets its own numbers. */ |
| 2686 | |
| 2687 | rcu_torture_current = NULL; |
| 2688 | rcu_torture_current_version = 0; |
| 2689 | atomic_set(&n_rcu_torture_alloc, 0); |
| 2690 | atomic_set(&n_rcu_torture_alloc_fail, 0); |
| 2691 | atomic_set(&n_rcu_torture_free, 0); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 2692 | atomic_set(&n_rcu_torture_mberror, 0); |
| 2693 | atomic_set(&n_rcu_torture_error, 0); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2694 | n_rcu_torture_barrier_error = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2695 | n_rcu_torture_boost_ktrerror = 0; |
| 2696 | n_rcu_torture_boost_rterror = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2697 | n_rcu_torture_boost_failure = 0; |
| 2698 | n_rcu_torture_boosts = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2699 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 2700 | atomic_set(&rcu_torture_wcount[i], 0); |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 2701 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2702 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 2703 | per_cpu(rcu_torture_count, cpu)[i] = 0; |
| 2704 | per_cpu(rcu_torture_batch, cpu)[i] = 0; |
| 2705 | } |
| 2706 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 2707 | err_segs_recorded = 0; |
| 2708 | rt_read_nsegs = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2709 | |
| 2710 | /* Start up the kthreads. */ |
| 2711 | |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2712 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
| 2713 | writer_task); |
| 2714 | if (firsterr) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2715 | goto unwind; |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 2716 | if (nfakewriters > 0) { |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2717 | fakewriter_tasks = kcalloc(nfakewriters, |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 2718 | sizeof(fakewriter_tasks[0]), |
| 2719 | GFP_KERNEL); |
| 2720 | if (fakewriter_tasks == NULL) { |
| 2721 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
| 2722 | firsterr = -ENOMEM; |
| 2723 | goto unwind; |
| 2724 | } |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2725 | } |
| 2726 | for (i = 0; i < nfakewriters; i++) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2727 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
| 2728 | NULL, fakewriter_tasks[i]); |
| 2729 | if (firsterr) |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2730 | goto unwind; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2731 | } |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2732 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2733 | GFP_KERNEL); |
| 2734 | if (reader_tasks == NULL) { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2735 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2736 | firsterr = -ENOMEM; |
| 2737 | goto unwind; |
| 2738 | } |
| 2739 | for (i = 0; i < nrealreaders; i++) { |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 2740 | firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2741 | reader_tasks[i]); |
| 2742 | if (firsterr) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2743 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2744 | } |
| 2745 | if (stat_interval > 0) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2746 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
| 2747 | stats_task); |
| 2748 | if (firsterr) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2749 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2750 | } |
Paul E. McKenney | e8e255f | 2015-05-14 16:55:45 -0700 | [diff] [blame] | 2751 | if (test_no_idle_hz && shuffle_interval > 0) { |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 2752 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
| 2753 | if (firsterr) |
Rusty Russell | 73d0a4b | 2009-03-30 22:05:16 -0600 | [diff] [blame] | 2754 | goto unwind; |
Srivatsa Vaddagiri | d84f520 | 2006-01-08 01:03:42 -0800 | [diff] [blame] | 2755 | } |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 2756 | if (stutter < 0) |
| 2757 | stutter = 0; |
| 2758 | if (stutter) { |
Paul E. McKenney | ff3bf92 | 2019-04-09 14:44:49 -0700 | [diff] [blame] | 2759 | int t; |
| 2760 | |
| 2761 | t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; |
| 2762 | firsterr = torture_stutter_init(stutter * HZ, t); |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 2763 | if (firsterr) |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 2764 | goto unwind; |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 2765 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2766 | if (fqs_duration < 0) |
| 2767 | fqs_duration = 0; |
| 2768 | if (fqs_duration) { |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 2769 | /* Create the fqs thread */ |
Paul E. McKenney | d0d0606 | 2014-03-17 20:56:45 -0700 | [diff] [blame] | 2770 | firsterr = torture_create_kthread(rcu_torture_fqs, NULL, |
| 2771 | fqs_task); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2772 | if (firsterr) |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2773 | goto unwind; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2774 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2775 | if (test_boost_interval < 1) |
| 2776 | test_boost_interval = 1; |
| 2777 | if (test_boost_duration < 2) |
| 2778 | test_boost_duration = 2; |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2779 | if (rcu_torture_can_boost()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2780 | |
| 2781 | boost_starttime = jiffies + test_boost_interval * HZ; |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2782 | |
| 2783 | firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", |
| 2784 | rcutorture_booster_init, |
| 2785 | rcutorture_booster_cleanup); |
| 2786 | if (firsterr < 0) |
| 2787 | goto unwind; |
| 2788 | rcutor_hp = firsterr; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2789 | } |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2790 | shutdown_jiffies = jiffies + shutdown_secs * HZ; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 2791 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
| 2792 | if (firsterr) |
Paul E. McKenney | e991dbc | 2014-01-31 14:52:13 -0800 | [diff] [blame] | 2793 | goto unwind; |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 2794 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, |
| 2795 | rcutorture_sync); |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 2796 | if (firsterr) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 2797 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 2798 | firsterr = rcu_torture_stall_init(); |
| 2799 | if (firsterr) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 2800 | goto unwind; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2801 | firsterr = rcu_torture_fwd_prog_init(); |
| 2802 | if (firsterr) |
| 2803 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 2804 | firsterr = rcu_torture_barrier_init(); |
| 2805 | if (firsterr) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2806 | goto unwind; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2807 | firsterr = rcu_torture_read_exit_init(); |
| 2808 | if (firsterr) |
| 2809 | goto unwind; |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 2810 | if (object_debug) |
| 2811 | rcu_test_debug_objects(); |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 2812 | torture_init_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2813 | return 0; |
| 2814 | |
| 2815 | unwind: |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 2816 | torture_init_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2817 | rcu_torture_cleanup(); |
| 2818 | return firsterr; |
| 2819 | } |
| 2820 | |
| 2821 | module_init(rcu_torture_init); |
| 2822 | module_exit(rcu_torture_cleanup); |